import urllib.request
import re
data = urllib.request.urlopen('http://blog.csdn.net/').read() #注意要去掉https中的s
headers = ('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36')
opener = urllib.request.build_opener()
opener.addheaders=[header] #模拟浏览器
data=opener.open(url).read() #模拟浏览器访问网站
data2=data.decode('utf-8','ignore') #解码
pat = '<a href="(https://blog.csdn.net/.*?/\d+?)"'
allurl=re.compile(pat).findall(data2)
set=set(allurl) #用集合的方式去掉重复的url
listallurl=list(set) #再把集合变回LIST
for i in range(0,len(listallurl)):
try: #异常处理
print('第'+str(i)+'次爬取')
currenturl = allurl[i]
file = 'D:/CSDNBLOG/'+str(i)+'.html' #设定保存路径
urllib.request.urlretrieve(currenturl,file) #保存
except urllib.error.URLError as e:
if hasattr(e,'code'):
print(e.code)
if hasattr(e,'reason'):
print(e.reason)
2019-12-05 Python3作业 爬取CSDNBLOG
最新推荐文章于 2020-01-28 14:24:35 发布