#coded by 伊玛目的门徒
import re
import requests
import time
from bs4 import BeautifulSoup
urllist=[]
titlelist=[]
start = time.clock() # 计时-开始
from concurrent.futures import ThreadPoolExecutor
header={'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.XXXX.XXX Safari/537.36'}
def do(i):
try:
cd=[]
html=requests.get('http://futures.hexun.com/domestic/index-'+str(i)+'.html',headers=header)
html.encoding='gbk'
Soup = BeautifulSoup(html.text, "lxml")
#ab=Soup.select('li a[target="_blank"]')
ab=Soup.select('div.temp01 ul li a[target="_blank"]')
for x in range(len(ab)):
if (x % 2) == 1:
cd.append (ab[x])
print ('-----------------')
pattern = re.compile(r'<a href="(.*?)" target="_blank">',re.S) # 查找数字
result1 = pattern.findall(str(cd))
pattern2 = re.compile(r'target="_blank">(.*?)</a>',re.S)
result2 = pattern2.findall(str(cd))
print (result1)
urllist.extend(result1)
print (result2)
titlelist.extend(result2)
list1.remove(i)
except:
pass
# 多线程
def multithreading():
sum=0
while len(list1)>0:
with ThreadPoolExecutor(max_workers=10) as executor:
for result in executor.map(do, list1):
sum+=1
return sum
list1=list(range(1,393,1))
sum=multithreading()
print ('还剩下{}页'.format(list1))
end = time.clock() # 计时-结束
print (("爬取完成 用时:"))
print ((end - start))
print ('总爬取 %d 页 '%(sum))
while None in titlelist:
titlelist.remove(None)
while None in urllist:
urllist.remove(None)
print (titlelist)
print (urllist)
'''
#可作为TXT输出
with open("test.txt","w") as f:
for thing in urllist:
f.write(thing)
f.write('\r\n')
'''
演示视频:
https://www.bilibili.com/video/av80003976/
python多线程爬虫和讯网 标题和正文URL效果展示
后续可看