百度V百科爬取所有问题&回答
哎,直接上代码
import requests
import re
from lxml import etree
import time
headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36 Edg/84.0.522.52'}
def get_url(v_url):
#url='https://baike.baidu.com/api/vbaike/knowledgelist?count=8&page=1&keyWord='
html=requests.get(url=v_url,headers=headers)
html.encoding='utf-8'
html=html.text
urls=re.findall('"link":"(.*?)"',html,re.S)
for url in urls:
url=url.replace('\\','')
get_info(url)
def get_info(url):
#url='https://baike.baidu.com/tashuo/browse/content?id=a2db9e0a9a02aef4eeb7778a'
html=requests.get(url=url,headers=headers)
html.encoding='utf-8'
html=html.text
html=etree.HTML(html)
title=html.xpath('//*[@id="article"]/div[1]/text()')[0].replace('\n','')
content1=html.xpath('//*[@id="article"]/div[4]/p/text()')
content2=html.xpath('//*[@id="article"]/div[3]/p/text()')
content=content1+content2
text=''
for info in content:
info=info.replace('@','')
text=text+info
main=title+'@'+text
write(main,title)
def write(main,key):
f = open("秒懂百科冷知识.txt",'a',encoding="utf-8") #在运行目录创建一个TXT文本存储结果并追加模式打开
f.write(main+"\n") #保存结果
f.close() #关闭句柄
print("=======================\n"+key+"++采集并输出完毕"+"\n=======================\n") #把采集的进度显示
time.sleep(2)
a=1
for a in range(1,40):
v_url='https://baike.baidu.com/api/vbaike/knowledgelist?count=8&page={}&keyWord='.format(a)
print('=========================================\n'+'正在下载第{}页'.format(a)+'\n=========================================\n')
get_url(v_url)
print('=========================================\n'+'下载完成第{}页'.format(a)+'\n=========================================\n')
对了,补充一句,亲测可用,人头担保!!!!!