- 代查词汇下载地址:https://jhc001.lanzouw.com/iWAtlwcuixa
密码:bxp6 - 爬虫代码:
import requests
from lxml import etree
import os
def spider(name):
try:
response=requests.get('http://www.guoxuedashi.net/zidian/so.php?sokeyci='+name+'&submit=&kz=12&cilen=0')
tree=etree.HTML(response.text)
lis=tree.xpath('//div[@class="info_txt2 clearfix"]/a[1]/@href')
if lis != []:
r_lis='http://www.guoxuedashi.net'+lis[0]
detail_page1(name,r_lis)
else:
response = requests.get('http://www.guoxuedashi.net/renwu/?sokeylishi='+name)
tree=etree.HTML(response.text)
lis=tree.xapth('//dl[@class="clearfix"]/dd[1]/a/@href')
if lis!=[]:
r_lis='http://www.guoxuedashi.net'+lis[0]
detail_page2(name,r_lis)
else:
print('两者搜索结果均为空')
except:
print('异常')
def detail_page1(name,r_lis):
response = requests.get(r_lis)
tree=etree.HTML(response.text)
lis=tree.xpath('//div[@class="info_txt2 clearfix"]/p[2]/span/span/text()')
if lis:
detail=lis[0].split('。')[0]
print(name+'\r\n'+detail)
save_data(name,detail)
else:
lis = tree.xpath('//div[@class="info_txt2 clearfix"]/text()'|'//div[@class="info_txt2 clearfix"]/font/span/text()')
detail=lis[1]+'\n'+lis[2]+'\n'+lis[3]
print(name+'\r\n'+detail)
save_data(name,detail)
def detail_page2(name,r_lis):
response=requests.get(r_lis)
tree=etree.HTML(response.text)
lis=tree.xpath('//div[@class="info_content zj clearfix"]/span/p/text()')
detail=lis[2].split('。')[0]
print(name+detail)
save_data(name, detail)
def read_word():
with open('./words.txt','r',encoding='utf-8') as fp:
words=fp.readlines()
for word in words:
name=word.replace('\n','')
spider(name)
def save_data(name,detail):
with open('./results/results.txt','a',encoding='utf-8') as fp:
result=name+':'+detail+'\n'
fp.write(result)
if __name__ == '__main__':
os.makedirs('./results',exist_ok=True)
read_word()
- 代码纯纯单线程,效率出奇的低,还有很多不足之处,希望各位大神不吝赐教