Python爬取百思不得姐段子
利用urllib、random、re等模块
1 构建用户代理池(列表信息)
QQ浏览器:Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6788.400 QQBrowser/10.3.2843.400
谷歌浏览器:Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36
Microsoft Edge:Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362
本程序只利用这三种浏览器作为代理池的浏览器,在实际应用中可以加入更多的浏览器数据到列表中
import urllib
import urllib.request
import random
uapools = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6788.400 QQBrowser/10.3.2843.400"
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362"
]
def UA():
opener = urllib.request.build_opener()
thisua = random.choice(uapools)
ua = ("User-Agent",thisua)
opener.addheaders = [ua]
urllib.request.install_opener(opener) #加入到全局之后才可以应用
2 爬取段子内容
其中利用到了正则表达式,正则表达式之后会专门写一篇博文介绍,这里不多介绍了。
import re
for i in range(0,5):
UA()
url = "http://www.budejie.com/"+str(i+1)+""
try:
data = urllib.request.urlopen(url).read().decode("utf-8","ignore")
pat = '<div class="j-r-list-c-desc">.*?<a href=".*?">(.*?)</a>.*?</div>' #正则表达式
res = re.compile(pat,re.S).findall(data) #通过正则表达式在网页源代码中获取段子内容
print('第'+str(i+1)+'页')
for j in range(0,len(res)):
print(res[j])
print('------')
except Exception as err:
pass
3 源代码
这个是把段子内容保存到文件中的代码。。。
import urllib
import urllib.request
import random
import re
uapools = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6788.400 QQBrowser/10.3.2843.400"
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362"
]
def UA():
opener = urllib.request.build_opener()
thisua = random.choice(uapools)
ua = ("User-Agent",thisua)
opener.addheaders = [ua]
urllib.request.install_opener(opener)
#print("当前的使用的UA:"+str(thisua))
#只是爬取前5页
for i in range(0,5):
UA()
url = "http://www.budejie.com/"+str(i+1)+""
try:
data = urllib.request.urlopen(url).read().decode("utf-8","ignore")
pat = '<div class="j-r-list-c-desc">.*?<a href=".*?">(.*?)</a>.*?</div>'
res = re.compile(pat,re.S).findall(data)
print('第'+str(i+1)+'页')
for j in range(0,len(res)):
with open("百思不姐姐.doc","a+",encoding="utf-8")as f:
f.write(res[j]+'\r\n')
except Exception as err:
pass
欢迎大家参考与交流