之前本来是做过这方面的代码,但是不知道弄哪儿去了,百度了一下这次重新记录下来
import requests
import os
#爬取第一页
url='https://image.baidu.com/search/acjson?'
headers={#360安全浏览器,百度需要使用cookie
'Cookie':'.......',
'Referer': 'https://image.baidu.com/search/index?tn=baiduimage&fm=result&ie=utf-8&word=%E5%B0%8F%E7%8B%97', # 搜索小狗的链接
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}
key='tn=resultjson_com&logid=10902883729278332902&ipn=rj&ct=201326592&is=&fp=result &queryWord={}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=&hd=&latest=©right=&word={}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn={}&rn={}&gsm=1e&1615729530798='
word='小狗' # 这里的关键字进行更改
pn=eval('30') # 这里输入要爬取的页数,30页,也就是30*30九百张图片
imurls=[]
for i in range(1,pn+1):
url=(url+key).format(word,word,i*30,30)
r=requests.get(url,headers=headers)
#"thumbURL":"http://..."图片地址
imurl=r.json().get('data')
for im in imurl:
if im:
imurls.append(im.get('thumbURL'))
print(len(imurls))
i=1
for iurl in imurls:
if iurl:
print('正在下载第{}张图片...'.format(i))
ir=requests.get(iurl,headers=headers)
fpath=os.getcwd()+'/dog/' #在当前路径下建立dog文件夹
if not os.path.exists(fpath):
os.mkdir(fpath) #创建文件夹
with open(fpath+str(i)+'.png',mode='wb') as f:
f.write(ir.content)
i+=1
下面这段代码虽然滚动了30页,但是还是只会获取第一个请求链接,所以只有30条数据。
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import re
import requests
import os
# 设置ChromeDriver的路径
driver_path = "D:\software_office\chromedriver_win32\chromedriver.exe" # which chromedriver
# 创建一个Chrome浏览器实例
driver = webdriver.Chrome(driver_path)
# 访问百度图片搜索结果页面
url = 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1683774515614_R&pv=&ic=&nc=1&z=&hd=&latest=©right=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&dyTabStr=MCw0LDEsNiw1LDMsNyw4LDIsOQ%3D%3D&ie=utf-8&sid=&word=%E5%8E%8C%E6%81%B6%E7%9A%84%E7%9C%9F%E4%BA%BA%E8%A1%A8%E6%83%85'
driver.get(url)
# 滚动页面
for _ in range(10): # 滚动10次,你可以根据需要调整这个值
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1) # 等待页面加载
# 获取页面源代码
html = driver.page_source
# 解析页面源代码,获取图片链接
res = re.findall('"objURL":"(.*?)"', html)
print('-------------',len(res))
# 保存图片
num = 0
pachong_picture_path = './disguestm'
if not os.path.exists(pachong_picture_path):
os.mkdir(pachong_picture_path)
for i in res:
num = num + 1
try:
picture = requests.get(i)
file_name = os.path.join(pachong_picture_path, 'dis' + str(num) + ".png")
with open(file_name, "wb") as f:
f.write(picture.content)
except:
print('Failed to download image: {}'.format(i))
driver.quit()