方法1:
转载:https://zhuanlan.zhihu.com/p/367325899
# -*- coding: utf-8 -*-
"""
Created on 2021/4/19 11:47
Filename : spider_image_baidu.py
Author : Taosy
Zhihu : https://www.zhihu.com/people/1105936347
Github : https://github.com/AFei19911012
Description: Spider - get images from baidu
"""
import requests
import os
import re
def get_images_from_baidu(keyword, page_num, save_dir):
# UA 伪装:当前爬取信息伪装成浏览器
# 将 User-Agent 封装到一个字典中
# 【(网页右键 → 审查元素)或者 F12】 → 【Network】 → 【Ctrl+R】 → 左边选一项,右边在 【Response Hearders】 里查找
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
# 请求的 url
url = 'https://image.baidu.com/search/acjson?'
n = 0
for pn in range(0, 30 * page_num, 30):
# 请求参数
param = {'tn': 'resultjson_com',
# 'logid': '7603311155072595725',
'ipn': 'rj',
'ct': 201326592,
'is': '',
'fp': 'result',
'queryWord': keyword,
'cl': 2,
'lm': -1,
'ie': 'utf-8',
'oe': 'utf-8',
'adpicid': '',
'st': -1,
'z': '',
'ic': '',
'hd': '',
'latest': '',
'copyright': '',
'word': keyword,
's': '',
'se': '',
'tab': '',
'width': '',
'height': '',
'face': 0,
'istype': 2,
'qc': '',
'nc': '1',
'fr': '',
'expermode': '',
'force': '',
'cg': '', # 这个参数没公开,但是不可少
'pn': pn, # 显示:30-60-90
'rn': '30', # 每页显示 30 条
'gsm': '1e',
'1618827096642': ''
}
request = requests.get(url=url, headers=header, params=param)
if request.status_code == 200:
print('Request success.')
request.encoding = 'utf-8'
# 正则方式提取图片链接
html = request.text
image_url_list = re.findall('"thumbURL":"(.*?)",', html, re.S)
print(image_url_list)
# # 换一种方式
# request_dict = request.json()
# info_list = request_dict['data']
# # 看它的值最后多了一个,删除掉
# info_list.pop()
# image_url_list = []
# for info in info_list:
# image_url_list.append(info['thumbURL'])
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for image_url in image_url_list:
image_data = requests.get(url=image_url, headers=header).content
with open(os.path.join(save_dir, f'{n:06d}.jpg'), 'wb') as fp:
fp.write(image_data)
n = n + 1
if __name__ == '__main__':
keyword = '猫'
save_dir = keyword
page_num = 2
get_images_from_baidu(keyword, page_num, save_dir)
print('Get images finished.')
方法2:
import requests
import time
import re
import faker
faker = faker.Faker('zh_CN')
class GetPage():
def __init__(self, url, path, circulate_time):
headers = {
'User-Agent' : str(faker.chrome())
}
self.headers = headers
self.url = url
self.path = path
self.circulate_time = circulate_time
def get_shufa(self):
url = self.url
# 预加载正则表达式
obj_1 = re.compile(r'&pn=(?P<param>\d+)&',re.S)
# 筛选出原来的pn参数
old_param = int(obj_1.findall(url)[0])
# 取一个完整的原来的pn参数,取这个整体用来做替换
old_change_param = f'&pn={old_param}&'
# 让pn参数变成0,从0开始
start_param = f'&pn=0&'
# 进行替换
new_url = url.replace(old_change_param, start_param)
param_num = 0
url_list = []
for i in range(self.circulate_time):
try:
resp_json = requests.get(url = new_url, headers = self.headers).json()
for j in range(30):
url_list.append(resp_json['data'][j]['thumbURL'])
# 循环对pn参数进行替换
old_param = f'&pn={param_num}&'
param_num += 30
new_param = f'&pn={param_num}&'
new_url = new_url.replace(old_param , new_param)
print(f'已完成第【{i+1}】页的请求')
time.sleep(1)
except KeyError:
print(f'至多只能获取【{i}】页')
break
page_num = 1
url_list = list(set(url_list))
for page_url in url_list:
resp_content = requests.get(url = page_url, headers = self.headers).content
with open(f'{self.path}/{page_num}.jpg','wb')as file:
file.write(resp_content)
print(f'下载图片 {page_num}/{len(url_list)} ')
page_num += 1
time.sleep(0.8)
if __name__ == '__main__':
baidu_url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&logid=10676196996033214004&ipn=rj&ct=201326592&is=&fp=result&fr=&word=%E8%90%BD%E7%9F%B3&queryWord=%E8%90%BD%E7%9F%B3&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&hd=&latest=©right=&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&expermode=&nojc=&isAsync=&pn=150&rn=30&gsm=9600000000000096&1666077668271='
save_path = 'D:/Workspace/local_py/落石'
circulate_time = int(input('请输入需要爬取的循环数量(一次循环30张图片):'))
a = GetPage(url = baidu_url, path = save_path, circulate_time = circulate_time )
a.get_shufa()