乐趣区

关于python3.x:爬取搜狗微信文章

一、获取索引页。
增加好 headers,params 间接 get 申请即可。
二、在索引页中获取详情页的 url。

在浏览器中间接关上这个 url,认真看浏览器上方中 url,你会发现 url 会跳转,就是说他进行了重定向。这里咱们须要应用 fiddler 这个抓包工具对他进行抓包。

你会发现他返回的是一段 js 代码,代码拼接成的 url 就是咱们想要的真正的 real_url,这里咱们间接用正则表达式,进行匹配、拼接就 ok 了。

三、获取详情页。
既然获取到了详情页的实在 url,剩下的结构一下申请头 headers,间接申请即可,这里我不举荐加 cookie 参数到申请头里,因为加上之后拜访次数过多,他会封 cookie,让你输出验证码进行验证。
四、总结。
我看网上说他是封 ip,封 cookie 的,然而我爬了几百篇并没有发现这个状况,之后遇到再补充。附上代码:

import requests
from lxml import etree
import re
import urllib3

# 重定向、封 ip、封 cookie


urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

def get_index(start_url, page):
    params = {
        'query': '美女',
        '_sug_type_': '','sut':'1828','lkt':'1,1597298703444,1597298703444','s_from':'input','_sug_':'y','type':'2','sst0':'1597298703546','page': page,'ie':'utf8','w':'01019900','dr':'1',
    }
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Cookie': 'ABTEST=5|1597380273|v1; SNUID=59BBCE9AEAEF4626F956F592EA96FE70; IPLOC=CN3709; SUID=B3512470160DA00A000000005F3616B1; JSESSIONID=aaapSleD9Qep4fAnwHYox; SUV=0004E05A702451B35F3616B1A47C7070; SUID=B35124706A13A00A000000005F3616B1',
        'Host': 'weixin.sogou.com',
        'Referer': 'https://weixin.sogou.com/weixin?query=%E7%BE%8E%E5%A5%B3&_sug_type_=&sut=1828&lkt=1%2C1597298703444%2C1597298703444&s_from=input&_sug_=y&type=2&sst0=1597298703546&page=4&ie=utf8&w=01019900&dr=1',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-User': '?1',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36',
    }
    response = requests.get(url=start_url, params=params, headers=headers, verify=False)

    if response.status_code == 200:
        html = etree.HTML(response.text)
        urls = html.xpath('//ul[@class="news-list"]/li//h3/a/@href')
        for url in urls:
            url = 'https://weixin.sogou.com/' + url
            print(url)
            yield url
    else:
        print('getting index page fail')


def get_real_url(url):
    headers = {
        'Host': 'weixin.sogou.com',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-User': '?1',
        'Sec-Fetch-Dest': 'document',
        'Referer': 'https://weixin.sogou.com/weixin?type=2&s_from=input&query=%E7%BE%8E%E5%A5%B3&ie=utf8&_sug_=n&_sug_type_=',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
        'Cookie': 'SUID=D85024704A21A00A000000005EFDF58C; SUV=0035E9CD702450D85EFDF58D4E7EB415; weixinIndexVisited=1; LCLKINT=121; LSTMV=468%2C24; pgv_pvi=2192807936; ABTEST=0|1596950375|v1; IPLOC=CN3709; SNUID=C32054007175DCCBC54D7C1071164832; JSESSIONID=aaaMv1RDeodhj5yAwCYox; PHPSESSID=34n87c5popss5ckq1gcinpc9s2',
    }
    response = requests.get(url=url, headers=headers, verify=False)
    pattern = re.compile('\+=.*?\'(.*?)\';', re.S)
    url_list = re.findall(pattern, response.text)
    real_url = ''
    for i in url_list:
        real_url += i
    real_url.replace('@', '')
    return real_url


def get_detail(url):
    headers = {
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'accept-encoding': 'gzip, deflate, br',
        'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
        'cache-control': 'max-age=0',
        'if-modified-since': 'Fri, 14 Aug 2020 12:33:50 +0800',
        'sec-fetch-dest': 'document',
        'sec-fetch-mode': 'navigate',
        'sec-fetch-site': 'cross-site',
        'sec-fetch-user': '?1',
        'upgrade-insecure-requests': '1',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36',
    }
    response = requests.get(url, headers=headers,verify=False)

    html = etree.HTML(response.text)
    if html.xpath('//*[@id="activity-name"]/text()'):
        title = html.xpath('//*[@id="activity-name"]/text()')[0].strip()
    else:
        title = ''result = {'title': title,}
    return result


if __name__ == '__main__':
    start_url = 'https://weixin.sogou.com/weixin'
    for page in range(1, 11):
        urls = get_index(start_url=start_url, page=page)
        for url in urls:
            real_url = get_real_url(url)
            result = get_detail(real_url)
            print(result)
退出移动版