# -*- coding:utf-8 -*-
import os
import requests
import urllib
from pyquery import PyQuery
import uuid
from pypinyin import pinyin
import unicodedata
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome"
"/75.0.3770.142 Safari/537.36"
}
def href_url_download():
# 1.填写要爬取关键词的list.txt
keyword_list = open("list.txt", 'r', encoding='utf-8')
lines = keyword_list.readlines()
keyword_list.close()
for keyword in lines:
keyword = keyword.strip()
word = pinyin(keyword)
pin_yin = "".join(str(i[0]) for i in word)
print(pin_yin)
res = unicodedata.normalize('NFKD', pin_yin).encode('ascii', 'ignore').decode("ascii")
print(res)
# 2.修改爬取的页数(1,10),默认爬取9页
for pages in range(1, 10):
page = str(pages)
# http://www.51yuansu.com/search/xiangjiao-40-0-0-0-1/
url = "http://www.51yuansu.com/search/" + res + "-40-0-0-0-" + page
print(url)
try:
txt = requests.get(url, headers=headers).text
doc = PyQuery(txt)
print(doc)
pages_url = doc(".img-out-wrap .img-wrap").items()
for page_url in pages_url:
html_url = page_url.attr("href")
print(html_url)
txt2 = requests.get(html_url, headers=headers).text
doc2 = PyQuery(txt2)
# print(doc2)
images = doc2(".img-wrap .show-image").items()
for image in images:
image_url = image.attr("src")
print(image_url)
image_download(keyword, image_url)
except requests.exceptions.ConnectionError as e:
print("requests.exceptions.ConnectionError")
def image_download(keyword, image_url):
# 3.图片存储路径
folder_path = "./image/" + keyword + "/"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
# print(folder_path)
image = keyword + "_" + str(uuid.uuid1()) + ".jpg" # 图片命名
print(image)
try:
content = requests.get(image_url, headers=headers)
with open(folder_path + image, "wb") as f:
f.write(content.content)
except urllib.error.HTTPError:
print("Internal Server Error")
except requests.exceptions.ConnectionError as e:
print("requests.exceptions.ConnectionError")
if __name__ == '__main__':
href_url_download()
觅元素网图片爬虫
最新推荐文章于 2021-06-08 21:14:53 发布