python-抓取页面的图片

# Author:MG
#! /usr/bin/env python
# -*-coding:utf-8-*-

from bs4 import BeautifulSoup
import requests,time,urllib.request,os



heard ={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0",
}
# index = "#comments > div:nth-child(6) > div > span"
index = str(input("请输入要爬取图片的网页:"))
web_data1 = requests.get(index,headers = heard)
web_data1.encoding = 'utf-8'
soup1 = BeautifulSoup(web_data1.text,'lxml')
# pages = soup1.select("#comments > div > div > span")
# total_pages = int(pages[0].get_text()[1:-1])
# print(total_pages)
number = int(input("请输入下载多少页图片:"))

download_links = []

folder_path = "./my_pics/"
if not os.path.exists(folder_path):
    os.mkdir(folder_path)
for pic_tag in soup1.find_all('img'):
    # print(pic_tag)
    pic_link = pic_tag.get('src')
    try:
        if pic_link[-4:] == ".gif":
            pic_link = pic_tag.get('org_src')

        download_links.append(pic_link)
    except Exception as e:
        pass


print(len(download_links))

for item in download_links:
    try:
        if not item[:4] == "http":
            item = "http:" + item
        print(item)
        if not os.path.exists("./my_pics/"+item[-10:]):
            urllib.request.urlretrieve(item, folder_path + item[-8:])
        # print("Done")
    except Exception as e:
        print(e)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值