初学爬虫,参考了几篇博客,在这留下我的总结。
import requests
from bs4 import BeautifulSoup
import os
import urllib.request
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
'referer': 'https://www.5442.com'}#referer必须写,否则会有报异常
for i in range(800,1000):
#创建每套图的文件夹
os.mkdir('C:/Users/yangsong/Desktop/spider_btf_girl_4/image{}'.format(str(i)))
for j in range(1,10):
soup = BeautifulSoup(requests.get('https://www.5442.com/meinv/20190228/84{}_{}.html'.format(str(i),str(j)),headers=headers).text,'lxml')
img = soup.find(id='contents')
try:
content = img.a.img.get('src')
except AttributeError:#捕获异常(每套图数量不确定)
if j==1:
print(i,' is null')
#循环搜索的网页可能会时空网页,跳出循环并且删除创建的文件夹
os.rmdir('C:/Users/yangsong/Desktop/spider_btf_girl_4/image{}'.for
mat(str(i)))
break
elif j==9:
print(i,' has 8 page------------')
else:
print(i,' has ',j,' page-----------')
break
else:
if j==9:
print(i,' has 9 page------------')
#获取图片的原网页后
req = urllib.request.Request(url=content,headers=headers)
response = urllib.request.urlopen(req)
html = response.read()
with open("C:/Users/yangsong/Desktop/spider_btf_girl_4/image{}/{}.jpg".format(str(i),str(j)),'wb') as img:
img.write(html)