# -*- coding:utf-8 -*-
import os
import requests
import urllib
from pyquery import PyQuery
import uuid
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome"
"/75.0.3770.142 Safari/537.36"
}
def href_url_download():
# 1.填写要爬取关键词的list.txt
keyword_list = open("list.txt", 'r', encoding='utf-8')
lines = keyword_list.readlines()
keyword_list.close()
for keyword in lines:
keyword = keyword.strip()
print(keyword)
# 2.修改爬取的页数(1,14),默认爬取13页
for pages in range(1, 2):
page = str(pages)
# http://www.viewstock.com/search?opt=&gid=&page=2&key2=苹果&selwr=10&selyc=0&seldj=0&key=苹果&addkey=&order=10&size=100
url = "http://www.viewstock.com/search?opt=&gid=&page="+ page+"&key2=" + keyword + \
"&selwr=10&selyc=0&seldj=0&key=" + keyword + "&addkey=&order=10&size=100"
# print(url)
try:
txt = requests.get(url, headers=headers).text
doc = PyQuery(txt)
# print(doc)
pages_url = doc(".item a img").items()
for page_url in pages_url:
image_url = page_url.attr("src")
print(image_url)
image_download(keyword, image_url)
except requests.exceptions.ConnectionError as e:
print("requests.exceptions.ConnectionError")
def image_download(keyword, image_url):
# 3.图片存储路径
folder_path = "./image/" + keyword + "/"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
# print(folder_path)
image = keyword + "_" + str(uuid.uuid1()) + ".jpg" # 图片命名
print(image)
try:
content = requests.get(image_url, headers=headers)
with open(folder_path + image, "wb") as f:
f.write(content.content)
except urllib.error.HTTPError:
print("Internal Server Error")
except requests.exceptions.ConnectionError as e:
print("requests.exceptions.ConnectionError")
if __name__ == '__main__':
href_url_download()
美好景象网图片爬虫
最新推荐文章于 2020-11-30 13:13:35 发布