查看网页源代码

"""
分析:
进程1. 从主页面当中解析出详情页的url,从详情页中提取到图片的下载地址
进程2. 把拿到的下载地址,进行下载
队列:可以进行进程之间的通信
"""
from multiprocessing import Process,Queue
from concurrent.futures import ThreadPoolExecutor
import requests
from lxml import etree
from urllib import parse
def get_img_src(q):
url = "http://www.591mm.com/mntt/6.html"
resp = requests.get(url)
resp.encoding='utf-8'
tree = etree.HTML(resp.text)
href_list = tree.xpath("//div[@class='MeinvTuPianBox']/ul/li/a[1]/@href")
for href in href_list:
href = parse.urljoin(url.href)
child_resp = requests.get(href)
child_resp.encoding = 'utf-8'
child_tree = etree.HTML(child_resp.text)
src = child_tree.xpath("//div[@id='picBody']//img/@src")[0]
q.put(src)
print(f"{src},被塞进队列")
q.put("完事儿了")
def download(url):
print("开始下载",url)
name = url.split("/")[-1]
with open("./img"+name,mode ="wb") as f:
resp = requests.get(url)
f.write(resp.content)
print("下载完毕")
def download_img(q):
with ThreadPoolExecutor(10) as t:
while 1:
src = q.get()
if src == "完事儿了":
break
t.submit(download,src)
if __name__ == '__main__':
q =Queue()
p1 = Process(target=get_img_src(),args=(q,))
p2 = Process(target=download_img(),args=(q,))
p1.start()
p2.start()
调用函数拼接

执行过程

运行结果
