import requests
import urllib.request
N=0
imgName = 0
for page in range(0,100):#循环实现翻页动态爬取
page=page+1
i=str(page)
url='http://moe.005.tv/moeimg/list_2_'+i+'.html'
imgstr=requests.get(url)
from bs4 import BeautifulSoup #解析
soup=BeautifulSoup(imgstr.text,'lxml')
data=soup.select('body > div.nav_warp > div.nav_w_left > div.zhuti_w_list > ul > li > a > span > img')
for each in data: #循环遍历data
N_1=str(N)
each = each.get('src') # 抓取src
print(each)
urllib.request.urlretrieve(each, 'F:\\py\\%s'+N_1+'.jpg')#通过使用urlib库实现文件下载
N=N+1
#参考书籍 《python 3 爬虫 数据清洗与可视化实战》