import requests as rqs
import bs4
import re
import webbrowser
# 失信url的合成
root_url = "https://shixin.tianyancha.com/"
search_target = "gs_" + "供应链"
divide_sign = "/"
operator = "search"
number_pane = 98
start_pane = 1
url = root_url + operator + divide_sign + search_target + divide_sign + "p" + "{:d}"
# 守信url的合成
# root_url = "https://www.tianyancha.com/"
# search_target = "key=" + "供应链"
# params = "&base=" + "gs"
# divide_sign = "/"
# operator = "search"
# number_pane = 98 # 爬多少页
# start_pane = 1 # 起始页
# url = root_url + operator + divide_sign +"p"+ "{:d}?"+ search_target + params #合成url
#print(url)
#爬取的数据保存在什么文件里面
save_file_name = "甘肃.txt"
# 请求头的设立
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
}
# 由于天眼查需要登录,所以我们可以先登录,从浏览器获得cookie值,保存在本地的一个文件,需要的时候再提取出来
cookie = "C:\\Users\\Administrator\\Desktop\\cookie.txt"
with open(cookie, "r",encoding="utf-8") as f: # 打开文件
data = f.read() # 读取文件\
print(data)
headers['Cookie'] = data
session = rqs.Session()
print("爬取的网页为:", url)
# 该函数从指定的url中获得html代码,调用bs4库来解析
def getHtmlFromUrl(index_url):
# 发送请求,并获得response
response = session.get(index_url, headers=headers)
soup = bs4.BeautifulSoup(response.text, "lxml")
result = soup.find_all("a", class_="name")
print("url: "+index_url)
if len(result) == 0:
# 这里需要注意的是,如果在爬取的过程中发现返回的长度为0的话,说明网站在怀疑你是不是爬虫
print("被识别为机器人")
# 打开网页,输入验证码
webbrowser.open(index_url)
return None
print(len(result))
return result
save_file = "C:\\Users\\Administrator\\Desktop\\"+save_file_name
with open(save_file,'a',encoding="utf-8") as save_file:
for i in range(start_pane,number_pane+1):
result_list = getHtmlFromUrl(url.format(i))
if result_list == None:
# 输入验证码后,需要重新设置参数,继续爬取,参数在靠头
raise Exception("请将start_pane参数修改为"+str(i))
for k in result_list:
# 写入文件
save_file.write(k.text)
save_file.write("\n")
# time.sleep(500)
print("爬取完成")
注意需要先登录获取cookie 才能爬取奥,详细请看代码,如果有问题的话欢迎评论区交流。
Thanks♪(・ω・)ノ,谢谢阅读