python获取彼岸所有壁纸

12次阅读

共计 6818 个字符,预计需要花费 18 分钟才能阅读完成。

第一部分代码

import requests

from bs4 import BeautifulSoup

import os

import time

import random

import UserAgent

index = ‘http://www.netbian.com’ # 网站根地址

interval = 0.1 # 爬取图片的间隔时间

firstDir = ‘E:\ 彼岸桌面壁纸 ’ # 总路径

classificationDict = {} # 存放网站分类子页面的信息

# 获取页面筛选后的内容列表

def screen(url, select):

headers = UserAgent.get_headers() # 随机获取一个 headers

html = requests.get(url = url, headers = headers)

html.encoding = ‘gbk’ # 网站的编码

html = html.text

soup = BeautifulSoup(html, ‘lxml’)

return soup.select(select)

# 获取页码

def screenPage(url, select):

html = requests.get(url = url, headers = UserAgent.get_headers())

html.encoding = ‘gbk’

html = html.text

soup = BeautifulSoup(html, ‘lxml’)

return soup.select(select)[0].next_sibling.text

# 下载操作

def download(src, name, path):

if(isinstance(src, str)):

response = requests.get(src)

path = path + ‘/’ + name + ‘.jpg’

while(os.path.exists(path)): # 若文件名重复

path = path.split(“.”)[0] + str(random.randint(2, 17)) + ‘.’ + path.split(“.”)[1]

with open(path,’wb’) as pic:

for chunk in response.iter_content(128):

pic.write(chunk)

# 定位到 1920 1080 分辨率图片

def handleImgs(links, path):

for link in links:

href = link.get(‘href’)

if(href == ‘http://pic.netbian.com/’): # 过滤图片广告

continue

# 第一次跳转

if(‘http://’ in href): # 有极个别图片不提供正确的相对地址

url = href

else:

url = index + href

select = ‘div#main div.endpage div.pic div.pic-down a’

link = screen(url, select)

if(link == []):

print(url + ‘ 无此图片,爬取失败 ’)

continue

href = link[0].get(‘href’)

# 第二次跳转

url = index + href

# 获取到图片了

select = ‘div#main table a img’

link = screen(url, select)

if(link == []):

print(url + ” 该图片需要登录才能爬取,爬取失败 ”)

continue

name = link[0].get(‘alt’).replace(‘\t’, ”).replace(‘|’, ”).replace(‘:’, ”).replace(‘\\’, ”).replace(‘/’, ”).replace(‘*’, ”).replace(‘?’, ”).replace(‘”‘, ”).replace(‘<‘, ”).replace(‘>’, ”)

print(name) # 输出下载图片的文件名

src = link[0].get(‘src’)

if(requests.get(src).status_code == 404):

print(url + ‘ 该图片下载链接 404,爬取失败 ’)

print()

continue

print()

download(src, name, path)

time.sleep(interval)

# 选择下载分类子页面

def select_classification(choice):

print(‘—————————‘)

print(‘————–‘ + choice + ‘————-‘)

print(‘—————————‘)

secondUrl = classificationDict[choice][‘url’]

secondDir = classificationDict[choice][‘path’]

if(not os.path.exists(secondDir)):

os.mkdir(secondDir) # 创建分类目录

select = ‘#main > div.page > span.slh’

pageIndex = screenPage(secondUrl, select)

lastPagenum = int(pageIndex) # 获取最后一页的页码

for i in range(0, lastPagenum):

if i == 0:

url = secondUrl

else:

url = secondUrl + ‘index_%d.htm’ %(i+1)

print(‘————–‘ + choice + ‘: ‘ + str(i+1) + ‘————-‘)

path = secondDir + ‘/’ + str(i+1)

if(not os.path.exists(path)):

os.mkdir(path) # 创建分类目录下页码目录

select = ‘div#main div.list ul li a’

links = screen(url, select)

handleImgs(links, path)

# ui 界面,用户选择下载分类

def ui():

print(‘————–netbian————-‘)

print(‘ 全部 ’, end=’ ‘)

for c in classificationDict.keys():

print(c, end=’ ‘)

print()

choice = input(‘ 请输入分类名:’)

if(choice == ‘ 全部 ’):

for c in classificationDict.keys():

select_classification(c)

elif(choice not in classificationDict.keys()):

print(“ 输入错误,请重新输入!”)

print(‘—-‘)

ui()

else:

select_classification(choice)

# 将分类子页面信息存放在字典中

def init_classification():

url = index

select = ‘#header > div.head > ul > li:nth-child(1) > div > a’

classifications = screen(url, select)

for c in classifications:

href = c.get(‘href’) # 获取的是相对地址
外汇赠金活动 http://www.fx61.com/activities

text = c.string # 获取分类名

if(text == ‘4k 壁纸 ’): # 4k 壁纸,因权限问题无法爬取,直接跳过

continue

secondDir = firstDir + ‘/’ + text # 分类目录

url = index + href # 分类子页面 url

global classificationDict

classificationDict = {

‘path’: secondDir,

‘url’: url

}

def main():

if(not os.path.exists(firstDir)):

os.mkdir(firstDir) # 创建总目录

init_classification()

ui()

if __name__ == ‘__main__’:

main()第二部分代码

import random

user_agent = [

“Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50”,

“Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50”,

“Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0”,

“Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko”,

“Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)”,

“Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)”,

“Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)”,

“Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)”,

“Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1”,

“Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1”,

“Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11”,

“Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11”,

“Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11”,

“Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)”,

“Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)”,

“Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)”,

“Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)”,

“Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)”,

“Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)”,

“Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)”,

“Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)”,

“Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5”,

“Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5”,

“Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5”,

“Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1”,

“MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1”,

“Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10”,

“Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13”,

“Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+”,

“Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0”,

“Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124”,

“Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)”,

“UCWEB7.0.2.37/28/999”,

“NOKIA5700/ UCWEB7.0.2.37/28/999”,

“Openwave/ UCWEB7.0.2.37/28/999”,

“Mozilla/4.0 (compatible; MSIE 6.0;) Opera/UCWEB7.0.2.37/28/999”,

# iPhone 6

“Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25”,

# 新版移动 ua

“Mozilla/5.0 (Linux;u;Android 4.2.2;zh-cn;) AppleWebKit/534.46 (KHTML,like Gecko) Version/5.1 Mobile Safari/10600.6.3 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/s…)”

]

# 随机获取一个请求头

def get_headers():

return {‘User-Agent’: random.choice(user_agent)}

原文链接:https://blog.csdn.net/wsad861…

正文完
 0