网络爬虫,用于从Bing图片搜索结果中下载玫瑰花的图片,下载一页的图片

import os
import re
import requests
from tqdm import tqdm
from lxml import etree
#1 发起请求
url = 'https://cn.bing.com/images/search?q=%e7%8e%ab%e7%91%b0%e8%8a%b1&form=HDRSC2&first=1'
headers = {'cookie':'MUID=0B7CD3127FB865D43B9AC7A07EFA6428; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=B07F85DE4F054F2D94A5D44A7A9B5DFF&dmnchg=1; MUIDB=0B7CD3127FB865D43B9AC7A07EFA6428; SRCHUSR=DOB=20240704&T=1720073840000; _EDGE_S=SID=0D102C5A2E2A6BE1054238E82F4C6A2E; ABDEF=V=13&ABDV=13&MRNB=1720075314974&MRB=0; _Rwho=u=d&ts=2024-07-04; SNRHOP=I=&TS=; USRLOC=HS=1&ELOC=LAT=22.527973175048828|LON=113.38566589355469|N=%E4%B8%AD%E5%B1%B1%E5%B8%82%EF%BC%8C%E5%B9%BF%E4%B8%9C%E7%9C%81|ELT=4|; ipv6=hit=1720079074158; MMCASM=ID=ECB24D4CDF624FC991AFCF1913D6313E; _C_ETH=1; _RwBf=r=0&ilt=5&ihpd=0&ispd=4&rc=15&rb=0&gb=0&rg=200&pc=12&mtu=0&rbb=0&g=0&cid=&clo=0&v=5&l=2024-07-03T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&ard=0001-01-01T00:00:00.0000000&rwdbt=0001-01-01T00:00:00.0000000&rwflt=0001-01-01T00:00:00.0000000&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2024-07-04T06:45:23.5217099+00:00&rwred=0&wls=&wlb=&wle=&ccp=&cpt=&lka=0&lkt=0&aad=0&TH=; _SS=SID=394C94CCB425622D02C9807EB5436316&R=15&RB=0&GB=0&RG=200&RP=12; SRCHHPGUSR=SRCHLANG=zh-Hans&PV=10.0.0&BRW=M&BRH=M&CW=1318&CH=969&SCW=1301&SCH=1996&DPR=1.0&UTC=480&DM=0&EXLTT=4&HV=1720075525&WTS=63855670640&PRVCW=1318&PRVCH=969',
          'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 Edg/109.0.1518.78'}
res = requests.get(url,headers=headers)
#2获取响应
#res.text
#3解析网页
dom = etree.HTML(res.text)
address = dom.xpath('//*[@id="mmComponent_images_1"]/ul[1]/li[1]/div/div[1]/a/div/img/@src')[0]
#请求图片地址信息,将图片下载下来
pic = requests.get(address).content#.content转换成二进制
#4数据保存
with open('meiguihua.jpg',mode='wb') as p:
    p.write(pic)

# 获取所有图片地址
image_urls = [eval(a).get('turl') for a in dom.xpath('//*[@id="mmComponent_images_1"]/ul/li/div/div/a/@m')]

# 过滤掉 None 值
image_urls = [url for url in image_urls if url]

# 下载所有图片
for i, url in enumerate(tqdm(image_urls)):
    pic = requests.get(url).content
    with open(f'images/meiguihua_{i+1}.jpg', 'wb') as p:
        p.write(pic)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值