1.同步爬虫
import re
import requests
from lxml import etree
from urllib import request
import os
def parse_url(url):
headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36"}
response=requests.get(url,headers=headers)
text=response.text
html=etree.HTML(text)
imgs=html.xpath("//div[@class='page-content text-center']//img[@class!='gif']")
for index,img in enumerate(imgs):
img_url=img.get("data-original")
alt=img.get("alt")
alt=re.sub(r"\W","",alt)
suffix=os.path.splitext(img_url)[-1]
file_path="images/"+alt+str(index)+suffix
print(file_path)
# if not os.path.exists(file_path):
# os.mkdir(file_path)
request.urlretrieve(img_url,file_path)#images/文件夹要提前创建,否则会报找不到路径错误
#print(img_url)
def main():
for i in range(1,50):
url="https://www.doutula.com/photo/list/?page=%d"%i
parse_url(url)
break
if __name__=='__main__':
main()
2.异步爬虫 多线程利用queue,快很多
import re
import requests
from lxml import etree
from urllib import request
import os
from queue import Queue
import threading
class Producer(threading.Thread):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36"}
def __init__(self,urlqueque,imagequeue,*args,**kwargs):
super(Producer,self).__init__(*args,**kwargs)
self.urlqueue=urlqueque
self.imagequeue=imagequeue
def run(self):
while True:
if self.urlqueue.empty() :
break
url=self.urlqueue.get()
self.parse_url(url)
def parse_url(self,url):
response=requests.get(url,headers=self.headers)
text=response.text
html=etree.HTML(text)
imgs=html.xpath("//div[@class='page-content text-center']//img[@class!='gif']")
for index,img in enumerate(imgs):
img_url=img.get("data-original")
alt=img.get("alt")
alt=re.sub(r"\W","",alt)
suffix=os.path.splitext(img_url)[-1]
file_path="images/"+alt+str(index)+suffix
#print(file_path)
self.imagequeue.put((img_url,file_path))
class Costumer(threading.Thread):
def __init__(self,urlqueque,imagequeue,*args,**kwargs):
super(Costumer,self).__init__(*args,**kwargs)
self.urlqueue=urlqueque
self.imagequeue=imagequeue
def run(self):
while True:
if self.imagequeue.empty() and self.urlqueue.empty():
break
img_url,file_path=self.imagequeue.get()
request.urlretrieve(img_url,file_path)
print(file_path+"已经下载完毕")
#print(img_url)
def main():
urlqueue=Queue(100)
imagequeue=Queue(10000)
for i in range(1,10):
url="https://www.doutula.com/photo/list/?page=%d"%i
# parse_url(url)
urlqueue.put(url)
#break
for i in range(5):
t=Producer(urlqueue,imagequeue)
t.start()
for i in range(5):
t=Costumer(urlqueue,imagequeue)
t.start()
if __name__=='__main__':
main()