# -*- coding:utf-8 -*-
import os
import requests
import urllib
from pyquery import PyQuery
import uuid
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome"
"/75.0.3770.142 Safari/537.36"
}
def href_url_download():
# 1.填写要爬取关键词的list.txt
keyword_list = open("list.txt", 'r', encoding='utf-8')
lines = keyword_list.readlines()
keyword_list.close()
for keyword in lines:
keyword = keyword.strip()
print(keyword)
# 2.修改爬取的页数(1,14),默认爬取13页
for pages in range(1, 14):
page = str(pages)
# https://www.xiachufang.com/search/?keyword=华夫饼&cat=1001&page=2
url = "https://www.xiachufang.com/search/?keyword=" + keyword + "&cat=1001&page= " + page
# print(url)
try:
txt = requests.get(url, headers=headers).text
doc = PyQuery(txt)
# print(doc)
pages_url = doc(".pure-u .name a").items()
for page_url in pages_url:
img_url = page_url.attr("href")
img_urls = "https://www.xiachufang.com/" + img_url
# print(img_urls)
txt2 = requests.get(img_urls, headers=headers).text
doc2 = PyQuery(txt2)
# print(doc2)
images = doc2(".block-negative-margin img").items()
for image in images:
image_url = image.attr("src")
print(image_url)
image_download(keyword, image_url)
except requests.exceptions.ConnectionError as e:
print("requests.exceptions.ConnectionError")
def image_download(keyword, image_url):
# 3.图片存储路径
folder_path = "./image/" + keyword + "/"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
# print(folder_path)
image = keyword + "_" + str(uuid.uuid1()) + ".jpg" # 图片命名
print(image)
try:
content = requests.get(image_url, headers=headers)
with open(folder_path + image, "wb") as f:
f.write(content.content)
except urllib.error.HTTPError:
print("Internal Server Error")
except requests.exceptions.ConnectionError as e:
print("requests.exceptions.ConnectionError")
if __name__ == '__main__':
href_url_download()
下厨房图片爬虫
最新推荐文章于 2023-03-02 16:48:18 发布