requests+bs4
from urllib.parse import urlparse
import os
import requests
from bs4 import BeautifulSoup
ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
headers = {"User-Agent": ua}
r = requests.get('http://www.xiachufang.com', headers=headers)
soup = BeautifulSoup(r.text, "lxml")
img_list = []
for img in soup.select("img"):
if img.has_attr("data-src"):
img_list.append(img.attrs["data-src"])
else:
img_list.append(img.attrs["src"])
img_dir = "E:\\知识学习\\2019版-千锋爬虫-源码+笔记+作业\\爬虫\\下厨房"
if not os.path.isdir(img_dir):
os.mkdir(img_dir)
for img in img_list:
if img.strip() != '':
o = urlparse(img)
filename = o.path[1:].split("@")[0]
filepath = os.path.join(img_dir, filename)
if not os.path.exists(os.path.dirname(filepath)):
print(os.path.exists(os.path.dirname(filepath)))
os.mkdir(os.path.dirname(filepath))
url = f"{o.scheme}://{o.netloc}/{filename}"
resp = requests.get(url)
with open(filepath, "wb") as file:
for chunk in resp.iter_content(1024):
file.write(chunk)
file.close()
pycurl+re
import re
from pycurl import Curl
from urllib.parse import urlparse
from io import BytesIO
import os
buffer = BytesIO()
c = Curl()
c.setopt(c.URL, 'http://www.xiachufang.com')
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
text = body.decode("utf-8")
img_list = re.findall(r"src=\"(http://i2\.chuimg\.com/\w+\.jpg)", text)
img_dir = "E:\\知识学习\\2019版-千锋爬虫-源码+笔记+作业\\爬虫\\下厨房"
if not os.path.isdir(img_dir):
os.mkdir(img_dir)
for img in img_list:
print(img)
if img.strip() != '':
o = urlparse(img)
filename = o.path[1:]
filepath = os.path.join(img_dir, filename)
if not os.path.exists(os.path.dirname(filepath)):
print(os.path.exists(os.path.dirname(filepath)))
os.mkdir(os.path.dirname(filepath))
url = f"{o.scheme}://{o.netloc}/{filename}"
with open(filepath, "wb") as file:
c = Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, file)
c.perform()
c.close()