# 导包
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.by import By
import re
import time
import requests
# 实例化浏览器
driver = webdriver.Chrome()
# 打开网址
driver.get("http://www.baidu.com")
# driver.get('https://www.selenium.dev/selenium/web/web-form.html')
# 窗口最大化
driver.maximize_window()
driver.implicitly_wait(0.5)
text_box = driver.find_element(by=By.ID, value='kw')
submit_button = driver.find_element(by=By.ID, value="su")
text_box.send_keys("黑人") # 搜索黑人
submit_button.click() # 点击百度查找按钮
driver.implicitly_wait(3)
black_name = driver.find_element(by=By.CLASS_NAME, value="foot-more_3ukcZ") # 找到所有黑人图片
driver.implicitly_wait(1)
black_name.click() # 点击进去
driver.implicitly_wait(1)
black_gq = driver.find_element(by=By.XPATH, value='//*[@id="typeFilter"]/div[2]/ul/li[2]') # 找到所有黑人图片
black_gq.click()
time.sleep(2)
head = 'https://image.baidu.com'
result = re.findall(r'"imgbox-border"><a href="(.*?)" target="_blank"', driver.page_source)
print(len(result), result)
#下滑窗口下載所需的数据量
# js = "var q=document.documentElement.scrollTop=10000" # documentElement表示获取根节点元素
# while (len(result) < 100): # 下滑窗口下載所需的数据量
# driver.execute_script(js)
# time.sleep(2)
# result = re.findall(r'"imgbox-border"><a href="(.*?)" target="_blank"', driver.page_source)
# print(len(result), result)
#获取高清图片的链接
https = []
count = 1
for i in result:
temp = head + i
temp = temp.replace("amp;", "")
print(count, temp)
https.append(temp)
count += 1
#进入高清图片链接获取图片下载链接
photo=[]
for i in range(len(https)):
driver.get(https[i])
time.sleep(0.5)
temp=re.findall(r';" src="(.*?)" width=',driver.page_source)
photo.append(temp)
print(len(photo),photo)
photo = list(filter(None, photo))#去除为空的数据
#对图片进行下载
for i in range(len(photo)):
if photo[i][0]==None:
continue
print(i,photo[i])
temp=requests.get(photo[i][0])
path_img = "E:/Python代码/网络爬虫/image/图{}.jpg".format(i)
temp.raise_for_status()
with open(path_img,'wb') as f:
f.write(temp.content)
f.close()
print(path_img,"保存成功!")
input('wait...')
Selenium爬取百度黑人高清图片
最新推荐文章于 2024-07-16 19:26:54 发布