关于python爬虫:通过robotstxt中的Sitemap-寻找网站各个入口地址

58次阅读

共计 1021 个字符,预计需要花费 3 分钟才能阅读完成。


# -*- coding:utf-8 -*-

import requests
from lxml import etree


def get_sitemapinfo(robots_url):
    """
    性能:获得 robots.txt 中的 Sitemap 网址
    返回 sitemap 例如  https://www.qidian.com/newsitemap2/pcfixedsitemap.xml
    :param robots_url
    :return: https://www.qidian.com/newsitemap2/pcfixedsitemap.xml
    """
    response = requests.get(robots_url).text
    try:
        link = response.split("Sitemap:")[-1].strip()
        return link
    except:
        print("以后网站 robots 协定 未蕴含 Sitemap")


def get_links(sitemap_url,rule):
    """
    性能:获得 Sitemap 下所有的入口地址
    返回 links 例如  ['https://www.qidian.com/all_pub/chanId13700/', 'https://www.qidian.com/all_pub/chanId14100/', 'https://www.qidian.com/all_pub/chanId14400/']
    :param sitemap_url sitemap 的地址
    :param rule  xpath 匹配规定
    :return: https://www.qidian.com/newsitemap2/pcfixedsitemap.xml
    """
    response = requests.get(sitemap_url)
    r = etree.HTML(response.text.encode("UTF-8"))
    links = r.xpath(rule)
    return links

if __name__ == "__main__":
    ## 开始执行程序
    # robots.txt 地址
    url = "https://www.qidian.com/robots.txt"
    sitemap_url = get_sitemapinfo(robots_url=url)
    links = get_links(sitemap_url=sitemap_url, rule="//url/loc/text()")
    print(f"links:{links}")

正文完
 0