共计 2909 个字符,预计需要花费 8 分钟才能阅读完成。
import requests # 发送申请
from lxml import etree # 数据解析
import time # 线程暂停,怕封 ip
import os # 创立文件夹
因为指标网站更新了反爬虫机制,简略的 UA 假装不能满足咱们的需要,所有对整个音讯头进行了假装
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding':
'gzip, deflate, br',
'Accept-Language':
'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Cache-Control':
'max-age=0',
'Connection':
'keep-alive',
'Cookie':
'__gads=undefined; Hm_lvt_aecc9715b0f5d5f7f34fba48a3c511d6=1614145919,1614755756;' 'UM_distinctid=177d2981b251cd-05097031e2a0a08-4c3f217f-144000-177d2981b2669b;'
'sctj_uid=ccf8a73d-036c-78e4-6b1d-6035e961b0d3;'
'CNZZDATA300636=cnzz_eid%3D1737029801-1614143206-%26ntime%3D1614759211;' 'Hm_lvt_398913ed58c9e7dfe9695953fb7b6799=1614145927,1614755489,1614755737;' '__gads=ID=af6dc030f3c0029f-226abe1136c600e4:T=1614760491:RT=1614760491:S=ALNI_MZAA0rXz7uNmNn6qnuj5BPP7heStw;'
'ASP.NET_SessionId=3qd454mfnwsqufegavxl5lbm; Hm_lpvt_398913ed58c9e7dfe9695953fb7b6799=1614760490;'
'bbsmax_user=ce24ea68-9f80-42e3-8d4f-53b13b13c719; avatarId=a034b11b-abc9-4bfd-a8b2-bdf7fef644bc-;'
'Hm_lpvt_aecc9715b0f5d5f7f34fba48a3c511d6=1614756087',
'Host':
'sc.chinaz.com',
'If-None-Match':
'','Referer':'https://sc.chinaz.com/jianli/free.html','Upgrade-Insecure-Requests':'1','User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0',
}
如果该文件夹不存在, 则创立文件夹
if not os.path.exists(‘./moban’):
os.mkdir('./moban')
for i in range(1, 701): # 预计可爬 700*20 套简历模板
print(f"正筹备爬取第 {i} 页简历模板")
print("怕封 ip,操作暂停中......") # 操作暂停提醒语
time.sleep(15) # 每获取一个列表页暂停 15s,一个列表页有 20 分简历模板的链接
url = f'https://sc.chinaz.com/jianli/free_{str(i)}.html' # 设置相应的路由 i
try: # 异样解决
response = requests.get(url=url, headers=headers) # 获取响应
except Exception as e: # 给异样取名为 e
print(e) # 打印异样名称
print('连贯失败,抉择跳过!!!') # 连不上就不要连了,头铁容易出事
print("怕封 ip,[外汇跟单](https://www.gendan5.com/)获取列表页操作暂停中......") # 操作暂停提醒语
time.sleep(5) # 每呈现一次异样暂停 5s
continue # 跳过本次循环
response.encoding = 'utf-8' # 中文编码为 utf-8
page = response.text # 获取响应的文本数据
tree = etree.HTML(page) # 用 etree 进行数据解析
a_list = tree.xpath("//div[@class='box col3 ws_block']/a") # 用 xpath 提取指标内容造成 20 份一起的列表
for a in a_list:
resume_href = 'https:' + a.xpath('./@href')[0] # 依据爬取的链接设置新的网页
resume_name = a.xpath('./img/@alt')[0] # 爬取名字,并对列表进行切片取第一个
resume_name = resume_name.strip() # 去掉首尾的空格
try:
resume_response = requests.get(url=resume_href, headers=headers) # 进入简历模板详情页面
except Exception as e:
print(e)
print('连贯失败,抉择跳过!!!')
print("怕封 ip,获取个人简历详情页操作暂停中......")
time.sleep(5)
continue
resume_response.encoding = 'utf-8' # 中文编码为 utf-8
resume_page = resume_response.text # 获取响应的文本数据
resume_tree = etree.HTML(resume_page) # 用 etree 进行数据解析
resume_link = resume_tree.xpath('//ul[@class="clearfix"]/li/a/@href')[0] # 用 xpath 提取指标内容的下载链接
try:
download = requests.get(url=resume_link, headers=headers).content # 获取二进制数据
except Exception as e:
print(e)
print('连贯失败,抉择跳过!!!')
print("怕封 ip,下载个人简历操作暂停中......")
time.sleep(5)
continue
download_path = './moban/' + resume_name + '.rar' # 设置保留门路以及文件名称
with open(download_path, 'wb') as fp: # 设置文件制作,以二进制模式
fp.write(download) # 保留文件
print(resume_name, '下载胜利!!!') # 下载胜利提醒语
正文完