-
python功能强大非常少的代码轻松实现各种网站资源爬取,如电话信息,vip素材网等
-
上面介绍用python爬取中国商标网信息去天眼查爬取法人电话与公司地址进步销售效率
-
!/usr/bin/env python
-
–– coding: utf-8 ––
- from db.name.header_api import Headers, HeadersCookie, sb_num, get_user_agent_pc
- num_text = sb_num # 多少期数
- rows = [] # 批量信息列表
- sb_num_list = [] # 注册号列表换取图片
- gs_name_list = [] # 去重后公司列表
- gs_name_list1 = [] # 公司列表换取电话与地址
- gs_name_list2 = [] # 带公司名,注册号,商标名
- gs_name_list3 = []
- import requests
- import re
- import time
- from lxml import html
- etree =html.etree
- import pymysql
- import time
- import re
- import json
- import csv
- import xlwt # 存入xlsx文件
- import xlrd # 获取现有的行数
- from xlutils.copy import copy
- import os
- import requests
- from lxml import html # xls
- def str_true(str):
- a1 =re.search(‘[A-Z, a-z]’, str) # 是否含有大小写字母
- if a1 :
- return False # 如果有一个存在就不能用为假
- else:
- return True # 都没有能用为真
-
题目写入excel
- def write_excel_xls(path, sheet_name, value):
- index = len(value) # 获取须要写入数据的行数
- workbook = xlwt.Workbook() # 新建一个工作簿
- sheet = workbook.add_sheet(sheet_name) # 在工作簿中新建一个表格
- for i in range(0, index):
- for j in range(0, len(value[i])):
- sheet.write(i, j, valuei) # 像表格中写入数据(对应的行和列)
- workbook.save(path) # 保留工作簿
- print(“xls格局表格写入数据胜利!”)
外汇代理https://www.kaifx.cn/ib/ -
追加
- def write_excel_xls_append(path, value):
- index = len(value) # 获取须要写入数据的行数
- workbook = xlrd.open_workbook(path) # 关上工作簿
- sheets = workbook.sheet_names() # 获取工作簿中的所有表格
- worksheet = workbook.sheet_by_name(sheets[0]) # 获取工作簿中所有表格中的的第一个表格
- rows_old = worksheet.nrows # 获取表格中已存在的数据的行数
- new_workbook = copy(workbook) # 将xlrd对象拷贝转化为xlwt对象
- new_worksheet = new_workbook.get_sheet(0) # 获取转化后工作簿中的第一个表格
- for i in range(0, index):
- for j in range(0, len(value[i])):
- new_worksheet.write(i + rows_old, j, valuei) # 追加写入数据,留神是从i+rows_old行开始写入
- new_workbook.save(path) # 保留工作簿
-
print(“xls格局表格【追加】写入数据胜利!”)
- def sb_cs(pages):
- sb_num_list.clear()
- gs_name_list.clear()
- gs_name_list1.clear()
- gs_name_list2.clear()
- gs_name_list3.clear()
- head2 = ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36 QQBrowser/4.4.119.400’
- headers2 = {
-
‘user-agent’: head2,
- ‘Accept’: ‘application/json, text/javascript, /; q=0.01′,
- ‘Accept-Encoding’: ‘gzip, deflate’,
- ‘Accept-Language’: ‘zh-CN,zh;q=0.9’,
- ‘Connection’: ‘keep-alive’,
- ‘Content-Type’: ‘application/x-www-form-urlencoded; charset=UTF-8’,
- ‘Cookie’: ‘UM_distinctid=16eac05d31a122-071a188af83977-32365f08-100200-16eac05d3232e2;tmas_cookie=51947.7681.15402.0000’,
- ‘Host’: ‘wsgg.sbj.cnipa.gov.cn:9080’,
- ‘Origin’: ‘http://wsgg.sbj.cnipa.gov.cn:9080’,
- ‘Referer’: ‘http://wsgg.sbj.cnipa.gov.cn:9080/tmann/annInfoView/annSearch.html?annNum=’,
- ‘User-Agent’: get_user_agent_pc(), # 随机获取一个
-
‘User-Agent’: ‘Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36’,
- ‘X-Requested-With’: ‘XMLHttpRequest’,
- }
- datas = {
- “page”: pages,
- “rows”: ‘100’,
- “annNum”: num_text, # 布告期数
- ‘annType’: ‘TMZCSQ’,
- ‘tmType’: ”,
- ‘coowner’: ”,
- ‘recUserName’: ”,
- ‘allowUserName’: ”,
- ‘byAllowUserName’: ”,
- ‘appId’: ”,
- ‘appIdZhiquan’: ”,
- ‘bfchangedAgengedName’: ”,
- ‘changeLastName’: ”,
- ‘transferUserName’: ”,
- ‘acceptUserName’: ”,
- ‘regNum’: ”,
- ‘regName’: ”,
- ‘tmName’: ”,
- ‘intCls’: ”,
- ‘fileType’: ”,
- ‘totalYOrN’: ‘true’,
- ‘appDateBegin’: ”, # 日期开始
- ‘appDateEnd’: ”, # 日期完结
- ‘agentName’: ”, # 代理人
- }
- s = requests.session()
- urls1 = ‘http://wsgg.sbj.cnipa.gov.cn:9080/tmann/annInfoView/annSearchDG.html’ # 信息地址
- response = s.post(urls1, data=datas, headers=headers2 ) # post参数
- print(response)
- jsons = json.loads(response.text)
- info = jsons[‘rows’]
- print(‘总条数’, len(info))
- for i in range(len(info)):
- item1 = infoi # 多少期
- item2 = infoi # 颁布工夫
- item3 = infoi # 类型
- item4 = infoi # 注册号
- item5 = infoi # 公司名
- item6 = infoi # 商标
- item7 = infoi
- item8 = infoi # id
-
print(item7, type(item7), item4, type(item4), type(str(item7)),item7)
-
字符串中有点,有数字,有字母的不要
- if item3 == ‘商标初步审定布告’ and len(item5) > 11 and str_true(item5): # 过滤
- sb_num_list.append(item4)
- item_list = [str(item7), item5, item4, item6]
- gs_name_list1.append(item5)
- gs_name_list2.append(item_list)
- print(‘第’, i + 1, ‘条’, item1, item2, item3, item4, item5, item6, str(item7))
- print(‘注册号总共’, len(sb_num_list))
- print(‘公司名总共’, len(gs_name_list2))
- for index, each in enumerate(gs_name_list1):
- if each not in gs_name_list3: # 新存入的列表
- gs_name_list3.append(each)
- gs_name_list.append(gs_name_list2[index])
- print(index + 1, each, ‘index’, ‘each’, ‘第’, str(pages), ‘页’)
- book_name_xls = os.getcwd() + ‘/text’ + ‘/{}.xls’.format(num_text + ‘公司名’ + str(pages))
- value_title = [[“图片编号”, “公司名”, “注册号”, “标名”], ]
- if os.path.exists(book_name_xls):
- write_excel_xls_append(book_name_xls, gs_name_list)
- else:
- write_excel_xls(book_name_xls, ‘公司名列表’, value_title)
- write_excel_xls_append(book_name_xls, gs_name_list)
- if name == “__main__”:
-
执行第1步,获取过滤后的公司信息
- for i in range(1, 41): # 1到40,
- pages = i
- sb_cs(pages) # 一次获以所有的list
- print(‘第’, str(pages), ‘页’)
发表回复