import requests
import os
from lxml import etree
from multiprocessing.dummy import Pool
####保存图片文件####defsave_img(url):
name = url[-14:]print(name)
date = requests.get(url)withopen(c+'/'+name,'wb')as f:
f.write(date.content)####开始####if __name__ =='__main__':
a =int(input('输入开始页: '))
b =int(input('输入结束页: '))
c =input('输入目录名: ')
os.makedirs(c)for i inrange(a,b +1):
target='http://www.meizitu.org/page/'+str(i)+'/'
req = requests.get(url=target)
soup = etree.HTML(req.text)
url_list = soup.xpath('//*[@class="thumb"]/img/@src')
pool = Pool(12)
pool.map(save_img,url_list)
pool.close()
pool.join()print('\n'+'....下载完成....')
动态加载图,另类爬取方法
import requests
import os
import re
from multiprocessing.dummy import Pool
defget_img_url(a,b,c):
list_url ='https://huaban.com/boards/'+a+'/?jxy8h0iv&max='+b+'&limit='+str(c)+'&wfl=1'
list_req = requests.get(list_url).text
list_req = re.findall('category_name([\W\w]*?)app._csr',list_req)[0]
url_pins = re.findall('pin_id":(.*?), "user_id',list_req)
url_pins +=[b]
pool = Pool(14)
pool.imap(get_date,url_pins)
pool.close()
pool.join()defget_date(url):
new_url ='https://huaban.com/pins/'+url+'/'
get_req = requests.get(url=new_url).text
get_req = re.findall('page(.*?)type',get_req)[0]
img_url = re.findall('"key":"(.*?)"',get_req)[0]
img_url ='http://hbimg.huabanimg.com/'+ img_url
save_img(img_url)defsave_img(url):
name = url[-30:-13]print(url+'\n')
date = requests.get(url)withopen(d+'/'+name+'.jpg','wb')as f:
f.write(date.content)if __name__ =='__main__':print('仅支持:https://huaban.com/boards/*/'+'\n')
a =input('输入要下载的链接码:')
target='https://huaban.com/boards/'+a+'/'
req = requests.get(url=target).text
req = re.findall('category_name([\W\w]*?)app._csr',req)[0]
b = re.findall('pin_id":(.*?), "user_id',req)[0]
c =int(input('须要下载前多少张图片:'))-1
d =input('创建目录名: ')
os.makedirs(d)
get_img_url(a,b,c)print('\n'+'....下载完成....')