1.开发环境:python3.7.3,正则表达式,urllib,urllib.request模块
2.分析:
1.爬取的网站为糗事百科:https://www.qiushibaike.com
2.需要爬取是糗事百科的内容分析页面结构: `<div class="content">.*?<span>(.*?)</span>.*?</div>`
3.想爬取多少页:
https://www.qiushibaike.com/hot/page/+页数
4.把爬取的内容存取到本地:io流
3.代码
import urllib
import urllib.request
import re
import sys
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
file=open("D:/糗事百科爬取内容.txt","w",encoding="utf-8")
urlouter="https://www.qiushibaike.com"
opener=urllib.request.build_opener()
UA=("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36")
opener.addheaders=[UA]
urllib.request.install_opener(opener)
data=urllib.request.urlopen(urlouter).read().decode("utf-8","ignore")
for page in range(0,13):
print("爬取第"+str(page+1)+"页\n")
file.write("爬取第"+str(page+1)+"页\n")
urlinner="https://www.qiushibaike.com/hot/page/"+str(page+1)
data=urllib.request.urlopen(urlinner).read().decode("utf-8","ignore")
pat='<div class="content">.*?<span>(.*?)</span>.*?</div>'
items=re.compile(pat,re.S).findall(data)
for i in range(0,len(items)):
print("---------------------------------------------------------")
#str(items[i]).translate(non_bmp_map)
file.write(items[i].replace("<br/>","\n"))
file.flush()
file.close()