关于python:使用Python同步异步爬取某视频

8次阅读

共计 4536 个字符,预计需要花费 12 分钟才能阅读完成。

某视频是什么想必大家多少都有所理解,最开始接触某视频时,我曾被很多视频打动到,甚至当初,也是同样,正如我始终强调的,“所有的技巧,都抵不上一个实在故事的重量”。那触动人心的感觉,的确让人们在深切体悟着这个世界!理解这个世界!

而人生来,也不过就是感知世界,适应世界,扭转世界。从这一点上,我倒挺喜爱某视频的。因为它扩宽了咱们的视线,让咱们晓得了世界上原来还有这样的事件存在。此外,某视频还能时刻揭示咱们,世间百态,世间苦难,让咱们感觉本人其实是幸福的也挺好。

综上简略的论述了下,引入咱们明天想要爬取的指标,即爬取某视频下面的视频,分享内容次要分为同步爬取和异步爬取,并且对爬起工夫做了比照,因为代码的难度也不是很大,所以不做过多赘述。异步应用的是 asyncio,在前些篇也有做过相干的练习: ~ 异步爬取某者光荣~,~ 为什么应用异步来写爬虫~,有趣味的能够再去看看,做做练习。

同步代码如下:


# coding:utf-8
# __auth__ = "maiz"
import os
import re
import random
import requests
from datetime import datetime
from lxml import etree


class Sync(object):

    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
    }

    download_folder = "./videos"

    def run(self):
        url = 'https://www.pearvideo.com/category_5'

        if os.path.exists(self.download_folder):  # 查看是否存在这个文件夹
            print("文件夹已存在")
        else:
            os.mkdir(self.download_folder)   # 不存在则创立
            print("文件夹已创立")

        resp = requests.get(url, headers=self.headers)

        if resp.status_code == 200:
            tree = etree.HTML(resp.text)
            lis = tree.xpath('//ul[@id="categoryList"]/li')
        else:
            raise requests.RequestException

        for li in lis:
            filename, download_url = self.parse_video_url(li)
            print(f"==> 开始下载 {filename}")
            self.download(filename, download_url)

    def parse_video_url(self, li) -> tuple:
        title = li.xpath('./div/a/div[2]/text()')[0].strip('“”!?').replace("|", "").replace(" | ","")
        page = str(li.xpath('./div/a/@href')[0]).split('_')[1]
        ajax_url = 'https://www.pearvideo.com/videoStatus.jsp?'
        params = {'contId': page, 'mrd': random.random()}
        headers = self.headers.copy()
        headers.update({'Referer': 'https://www.pearvideo.com/video_' + page})
        resp = requests.get(ajax_url, headers=headers, params=params)
        ajax_text = resp.json()
        download_url = ajax_text["videoInfo"]['videos']["srcUrl"]
        download_url = re.sub(r"\d{13}", f"cont-{page}", download_url)

        return title + ".mp4", download_url

    def download(self, filename: str, url: str):
        resp = requests.get(url, headers=self.headers)
        if resp.status_code == 200:
            content = resp.content
            with open(os.path.join(self.download_folder, filename), "wb") as fb:
                fb.write(content)
            print(f"已下载:{filename}")
            print("-" * 60)
        else:
            raise requests.RequestException


if __name__ == '__main__':
    start = datetime.now()
    s = Sync()
    s.run()
    end = datetime.now()
    print((end - start).total_seconds(), "秒")

小编最近发现一个收费支付代理 ip 的平台 有须要的能够点击支付下 PC 端:http://i0k.cn/4KzbY 挪动端 http://i0k.cn/53dbO

异步代码如下:


# coding:utf-8
# __auth__ = "maiz"
import os
import re
import random
import asyncio
import aiofiles
import aiohttp

from datetime import datetime
from lxml import etree


class Spider(object):
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
    }

    download_folder = "./videos"

    urls = []

    async def main(self):
        await self._get_video_urls()

        downloader = [asyncio.create_task(self._download_video(filename, url)) for filename, url in self.urls]
        await asyncio.gather(*downloader)

    async def _get_video_urls(self):
        url = 'https://www.pearvideo.com/category_5'

        async with aiohttp.ClientSession(headers=self.headers) as session:
            async with session.get(url) as response:
                if response.status == 200:
                    text = await response.text()
                    tree = etree.HTML(text)
                    lis = tree.xpath('//ul[@id="categoryList"]/li')
                else:
                    raise aiohttp.ClientResponseError

        spider = [self._parse_video_url(li) for li in lis]
        await asyncio.wait(spider)

    async def _parse_video_url(self, li):
        title = li.xpath('./div/a/div[2]/text()')[0].strip('“”!?').replace("|", "").replace(" | ","")

        page = str(li.xpath('./div/a/@href')[0]).split('_')[1]

        ajax_url = 'https://www.pearvideo.com/videoStatus.jsp?'
        params = {'contId': page, 'mrd': random.random()}
        headers = self.headers.copy()
        headers.update({'Referer': 'https://www.pearvideo.com/video_' + page})

        async with aiohttp.ClientSession(headers=headers) as session:
            async with session.get(ajax_url, params=params) as response:
                ajax_text = await response.json()
                download_url = ajax_text["videoInfo"]['videos']["srcUrl"]
                download_url = re.sub(r"\d{13}", f"cont-{page}", download_url)
                self.urls.append((title + ".mp4", download_url))

    async def _download_video(self, filename: str, url: str):
        async with aiohttp.ClientSession(headers=self.headers) as session:
            print(f"开始下载 => {filename}")
            async with session.get(url, headers=self.headers) as response:
                content = await response.read()

        async with aiofiles.open(os.path.join(self.download_folder, filename), "wb") as fb:
            await fb.write(content)

        print(f"已下载 => {filename}.mp4")

    def run(self):

        if os.path.exists(self.download_folder):  # 查看是否存在这个文件夹
            print("文件夹已存在")
        else:
            os.mkdir(self.download_folder)   # 不存在则创立
            print("文件夹已创立")

        loop = asyncio.get_event_loop()
        loop.run_until_complete(self.main())


if __name__ == '__main__':
    start = datetime.now()
    s = Spider()
    s.run()
    end = datetime.now()
    print("=" * 40)
    print((end - start).total_seconds(), "秒")

右击运行代码,即可在以后文件夹下,生成一个 videos 文件夹,并下载相干的视频文件。快去看看有哪些你感兴趣的内容吧,代码获取后盾回复:”某视频下载“。

以上就是明天给大家分享的内容

正文完
 0