# -*- coding:utf-8 -*-
import os
import time
import requests
import urllib
import uuid
from pyquery import PyQuery
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/75.0.3770.142 Safari/537.36"
}
def href_url_download():
# 1.填写要爬取关键词的list.txt
keyword_list = open("list.txt", 'r', encoding='utf-8')
lines = keyword_list.readlines()
keyword_list.close()
for keyword in lines:
keyword = keyword.strip()
print(keyword)
# for pages in range(1, 5):
# page = str(pages)
url = "https://www.benlai.com/products/search.html?keyword=" + keyword + "&sType=Search" # 获取keyword网页
print(url)
try:
txt = requests.get(url, headers=headers).text # 获取URL及headers
doc = PyQuery(txt)
# print(doc)
urls = doc(".box .pic a").items()
for page_url in urls:
html_url = page_url.attr("href")
print(html_url)
txt2 = requests.get(html_url, headers=headers).text
doc2 = PyQuery(txt2)
images = doc2("#smallPic div:nth-child(odd)").items()
for image in images:
image_url = image.text()
print(image_url)
image_download(keyword, image_url) # 下载"图片"
except requests.exceptions.ConnectionError as e:
print("requests.exceptions.ConnectionError")
except requests.exceptions.InvalidURL as e:
print("requests.exceptions.InvalidURL")
def image_download(keyword, image_url):
# 3.图片存储路径
folder_path = "./image/" + keyword + "/"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
# print(folder_path)
image = keyword + "_" + str(uuid.uuid1()) + ".jpg" # 图片命名
print(image)
# urllib.request.urlretrieve(href3, folder_path + "/" + image)
try:
content = requests.get(image_url, headers=headers)
with open(folder_path + image, "wb") as f:
f.write(content.content)
except urllib.error.HTTPError:
print("Internal Server Error")
except requests.exceptions.ConnectionError as e:
print("requests.exceptions.ConnectionError")
if __name__ == '__main__':
href_url_download()
本来生活网图片爬虫
最新推荐文章于 2024-03-21 19:14:04 发布