import urllib
import requests
import re
from bs4 import BeautifulSoup
import time
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 UBrowser/6.1.2107.204 Safari/537.36'
}
path = './测试/' # 保存的路径
query = '中华田园猫图片' # 搜索的图片
rule = re.compile(r"\"murl\"\:\"http\S[^\"]+")
for i in range(1, 101, 35): # 更改first,爬取不同的图片
urls = f'https://cn.bing.com/images/async?q={query}=&first={i}&count=35&mmasync=1'
html = requests.get(url=urls, headers=header).text
soup = BeautifulSoup(html, "lxml")
link_list = soup.find_all("a", class_="iusc")
for id, link in enumerate(link_list):
result = re.search(rule, str(link))
url = result.group(0)[8:]
try:
time.sleep(0.5)
urllib.request.urlretrieve(url, path + str(id + i) + '.jpg')
except Exception as e:
time.sleep(1)
print("获取异常,跳过...")
else:
print(f"成功保存{str(id + i)}: {url}")