仅将自己游览器的user-agent,改为对应的user-agent应该就可以实现 import requests import re from lxml import etree pic = input("你想要爬取的图片?\n") # 创建文件夹 import os current_dir = os.path.dirname(os.path.abspath(__file__)) # 创建文件夹 并获取文件夹名称 folder_name = "{}".format(pic) folder_address = os.path.join(current_dir, folder_name) if not os.path.exists(folder_address): os.mkdir(folder_address) url = "https://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&dyTabStr=MCwzLDEsNCw1LDgsNiwyLDcsOQ%3D%3D&" headers = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36", "cookie":"SUB=_2AkMUSX2of8NxqwFRmPsVymjib4V_ywHEieKiFYxzJRMxHRl-yT9kqm4ptRB6P8lTRy78n4EHx38V-1U-nr9VFyGCRb2l; SUBP=0033WrSXqPxfM72-Ws9jqgMF55529P9D9W5xbkMbBZ5C3Hz54-eKnhTh; _s_tentry=-; Apache=4896943710273.367.1662623945765; SINAGLOBAL=4896943710273.367.1662623945765; ULV=1662623945777:1:1:1:4896943710273.367.1662623945765:", } kw = { "word": pic } response = requests.get(url, headers=headers, params=kw) content = response.content.decode('utf8') # 更换数据爬取方式 detail_urls = re.findall('"thumbURL":"(.*?)"', content, re.DOTALL) # for detail_urlss in detail_urls: # print(detail_urlss) i = 0 for detail_url in detail_urls: try: # 得到对应图片的响应 response = requests.get(detail_url, headers=headers) # 得到对应图片的bytes流数据 content = response.content # 注意在in函数中是区分大小写的 if 'JPG' or "jpg" in detail_url: with open("{}\{}.jpg".format(folder_address, i), "wb") as f: f.write(content) elif 'JPEG' or "jpeg" in detail_url: with open("{}\{}.jpeg".format(folder_address, i), "wb") as f: f.write(content) elif 'PNG' or "png" in detail_url: with open("{}\{}.png".format(folder_address, i), "wb") as f: f.write(content) elif 'BMP' or "bmp" in detail_url: with open("{}\{}.bmp".format(folder_address, i), "wb") as f: f.write(content) else: continue except: continue i += 1
图像抓取,任意图像抓取并自动保存
最新推荐文章于 2023-03-08 23:14:27 发布
![](https://img-home.csdnimg.cn/images/20240711042549.png)