代码成果演示
Gitee源码

# -*- coding: utf-8 -*-# Version: Python 3.9.7# Author: TRIX# Date: 2021-10-04 17:36:05# Use:抓取今日头条微博相干局部信息 并将信息贮存到txt headers贮存到jsonfrom urllib.parse import urlencodeimport requestsimport pyperclip,jsonfrom dateutil.parser import parse#解决工夫def getHeaders(noKeys=None,extraStripChars=[':'],doPrint=False,):    '''noKeys 依据键名去除键值对 键名与键名间以一个space宰割 疏忽大小写 疏忽是否strip    extraStripChars 额定strip键名的无关字符 字符与字符间以一个space宰割    doPrint 是否打印headers和去除的键值对 只有在首次创立headers.json有用'''    strs=pyperclip.paste()#读取剪贴字符串    headers={}    if 'Mozilla' not in strs:#如果复制字符串不是申请标头        try:            with open('headers.json','r',encoding='utf-8') as f:#读取贮存的headers                headers=json.loads(f.read())#json文本转dict对象        except FileNotFoundError:            return print('请至多残缺复制一次申请标头信息\n复制办法:f12-网络-f5-名称-任意一个条目-标头-申请标头-复制所有')    if not headers:        def stripChars(word):#strip键名的无关字符            word=word.strip()            for char in extraStripChars:                word=word.strip(char)            return word        keysValues=[]        for keyOrValue in strs.split(': '):#以: 和\n切分字符串            for kOrV in keyOrValue.split('\r\n'):                keysValues.append(stripChars(kOrV))        for i in range(0,len(keysValues),2):#生成headers字典            headers[keysValues[i]]=keysValues[i+1]        if noKeys:#去除指定键值对            popKvs=[]            for key in noKeys.split():                popKvs.append(f"'{key}':'{headers.pop(stripChars(key))}'")        json_data=json.dumps(headers,ensure_ascii=False,indent=2)#dict对象转json文本 ensure_ascii让输入后果是中文而不是unicode indent缩进字符个数 值只能是 dict list int float str bool None        with open('headers.json','w',encoding='utf-8') as h:#贮存json数据            h.write(json_data)        print('headers信息已贮存至headers.json')        if doPrint:#打印            print('headers={')            for k,v in headers.items():                print(f"'{k}':'{v}',")            print('}')            if popKvs:                print('\n去除headers的键值对如下:')                for kv in popKvs:                    print(kv)                print('\n')    return headersdef getPage(page):    params={    'uid':1618051664,    'page':page,#通过page刷新微博    'feature':0,    }    url='https://weibo.com/ajax/statuses/mymblog?'+urlencode(params)#结构url    try:        response=requests.get(url,headers=headers,)#申请数据        print(f'正在尝试爬取数据:{url}响应码:{response.status_code}')        if response.status_code==200:            return response.json()    except requests.ConnectionError as e:        print('爬取失败:',e.args)def getLongText(urlid):    url='https://weibo.com/ajax/statuses/longtext?id='+urlid#结构url    try:        response=requests.get(url,headers=headers,)#申请数据        print(f'检测到长文本 正在尝试爬取数据:{url},响应码:{response.status_code}')        if response.status_code==200:            return response.json()    except requests.ConnectionError as e:        print('爬取失败:',e.args)def getblogs(json):    if json:#如果json!=None        blogs=json.get('data').get('list')        articleCount=len(blogs)        articles=[]        for blog in blogs:            #print(blog)            info={}#微博数据            info['id']=blog.get('idstr')            info['createTime']=str(parse(blog.get('created_at'))).split('+')[0]#解决工夫格局            if blog.get('isLongText'):#如果微博为长文本                longText=getLongText(blog.get('mblogid'))                if longText:                    longText=longText.get('data')                    info['text']=longText.get('longTextContent')                    topicStruct=longText.get('topic_struct')                    if topicStruct:                        info['topics']=''                        for topic in topicStruct:                            info['topics']+=topic.get('topic_title')+' '                    else:                        info['topic']='无'            else:                info['text']=blog.get('text_raw')            if blog.get('retweeted_status'):#如果微博含转载内容                info['text']+='该微博含转载微博 内容略'            if info.get('page_info'):#如果微博有视频或图片                info['text']+='该微博含视频 或 图片 内容略'            tagStruct=blog.get('tag_struct')            if tagStruct:#如果有标签                info['tags']=''                for tag in tagStruct:                    info['tags']+=tag.get('tag_name')+' '            else:                info['tags']='无'            topicStruct=blog.get('topic_struct')            if topicStruct:#如果该微博有主题                info['topics']=''                for topic in topicStruct:                    info['topics']+=topic.get('topic_title')+' '            else:                info['topics']='无'            info['likesCount']=blog.get('attitudes_count')            info['commentsCount']=blog.get('comments_count')            info['repostsCount']=blog.get('reposts_count')            info['text']=info['text'].strip('')#替换一些不失常字符            info['text']=info['text'].replace('>','>')            if 'http' in info['text']:#如果有视频链接                textAndUrl=info['text'].split('http')                info['text']=textAndUrl[0]                info['videoUrl']='http'+textAndUrl[-1]            else:                info['videoUrl']='无'            articles.append(info)        return articlesdef getPageblogs(pages):#抓取pages页的所有微博的局部相干信息    with open('blogs.txt','w',encoding='utf-8') as f:        f.write('微博页面链接:{}\n'.format(headers['referer']))        f.write(f'共抓取了{pages}页微博相干信息,抓取信息如下:\n\n')        for n in range(pages):            json=getPage(n+1)            articles=getblogs(json)            f.write(f'---第{n+1}页---\n')            for i,a in enumerate(articles):                f.write(f'---第{i+1}个微博---\n')                f.write('微博id:{}\n'.format(a['id']))                f.write('公布工夫:{}\n'.format(a['createTime']))                f.write('内容:{}\n'.format(a['text']))                f.write('视频链接:{}\n'.format(a['videoUrl']))                f.write('微博标签:{}\n'.format(a['tags']))                f.write('微博主题:{}\n'.format(a['topics']))                f.write('点赞数:{}\n'.format(a['likesCount']))                f.write('评论数:{}\n'.format(a['commentsCount']))                f.write('转载数:{}\n'.format(a['repostsCount']))                f.write('\n')headers=getHeaders(noKeys='path')getPageblogs(3)