使用requests爬取百度图片
# -*- coding:utf-8 -*-
"""
@Author: zyc
@CreateTime: 2021/6/15 8:30
@File : Train06_baidu_pic_spyder.py
@content: crawl https://image.baidu.com/ pictures
"""
import os
from datetime import datetime
import requests
header = {'User-Agent': '自己的user-Agent'}
def get_page_url(page_url: str, pic_url_list_: []):
"""
page_url: str 页面地址
pic_url_list_: [] 之前所有页面的所有图片地址列表
获取页面的所有图片地址/链接
"""
try:
response = requests.get(page_url, headers=header)
response.encoding = response.apparent_encoding
link_data = response.json()['albumdata']['linkData']
for a_link_data in link_data:
pic_url = a_link_data["thumbnailUrl"]
# print(pic_url)
pic_url_list_.append(pic_url)
except requests.exceptions as e:
print("!!!!!! ERROR !!!!!!\n", e)
finally:
return pic_url_list_
def get_pages_url(base_url_: str, page_num_: int):
"""
base_url: str,
page_num: int 页面总数
获取所有页面的图片地址
"""
pic_url_list = []
for i in range(1, page_num_+1):
url = base_url_.format(i*30, i)
pic_url_list = get_page_url(url, pic_url_list)
return pic_url_list
def get_pics_save(pic_urls: [], save_dir_: str):
"""
pic_urls: [] 图片地址列表
save_dir: str 保存路径,(路径!)
获取并保存图片
"""
n = 1
for pic_url in pic_urls:
try:
response = requests.get(pic_url)
pic_name = save_dir + datetime.now().date().__str__() + '-%d.jpg' % n
if not os.path.exists(pic_name):
with open(pic_name, 'wb') as fp:
for data in response.iter_content(256):
fp.write(data)
print(pic_name)
n += 1
except requests.exceptions as e:
print("ERROR: ", e)
if __name__ == '__main__':
base_url = 'https://image.baidu.com/search/albumsdata?pn={0}&rn=30&tn=albumsdetail&word=%E6%B8%90%E5%8F%98%E9%A3' \
'%8E%E6%A0%BC%E6%8F%92%E7%94%BB&album_tab=%E8%AE%BE%E8%AE%A1%E7%B4%A0%E6%9D%90&album_id=409&ic=0' \
'&curPageNum={1}'
page_num = 10 # 页面总数
save_dir = '保存路径'
pic_urls_list = get_pages_url(base_url, page_num)
# print(pic_urls_list)
get_pics_save(pic_urls_list, save_dir)