本文用多线程生产者,消费者模式爬取斗图网的表情包,并下载保存到本地,get_page()函数负责获取页面的图片的url链接,downlod_img()函数通过图片url下载保存到本地文件夹中,开启多线程可以加快速度。代码如下:
import requests
from bs4 import BeautifulSoup
import threading
from urllib import request
import os
gLock = threading.Lock()
BASE_URL_LIST = []
IMG_URL_LIST = []
for x in range(1,2018):
BASE_URL = 'https://www.doutula.com/photo/list/?page={}'.format(x)
BASE_URL_LIST.append(BASE_URL)
print(BASE_URL_LIST)
def get_page():
while True:
gLock.acquire()
if len(BASE_URL_LIST) == 0:
gLock.release()
break
else:
page_url = BASE_URL_LIST.pop()
gLock.release()
responce = requests.get(page_url)
soup = BeautifulSoup(responce.content,"lxml")
imgs = soup.find_all('img',attrs={'class':'img-responsive lazy image_dta'})
gLock.acquire()
for img in imgs:
img_url = img['data-original']
if img_url.startswith('http:') or img_url.startswith('https:'):
#img_url = 'https:' + img_url
IMG_URL_LIST.append(img_url)
else:
img_url = 'https:' + img_url
IMG_URL_LIST.append(img_url)
print(img_url)
#print(IMG_URL_LIST)
gLock.release()
def downlod_img():
while True:
gLock.acquire()
if len(IMG_URL_LIST) == 0:
gLock.release()
continue
else:
img_url = IMG_URL_LIST.pop()
gLock.release()
split_list = img_url.split('/') # 有一些需要添加http
img_name = split_list.pop()
path = os.path.join('E:\\images',img_name)
request.urlretrieve(img_url,filename=path)
def main():
for i in range(4):
th = threading.Thread(target=get_page)
th.start()
for i in range(6):
th = threading.Thread(target=downlod_img)
th.start()
if __name__ =="__main__":
main()
运行效果:
结果: