python小程序——图片篇

前言

python可以让程序以最简单的方式呈现出来,本期是python批量下载图片的一些程序。


1.包图网

import requests
import parsel
 
for page in range(1,10):
    print(f'=======第{page}页=======')
    url = f'https://ibaotu.com/shipin/7-0-0-0-0-{page}.html'
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36'
    }
 
    response = requests.get(url=url, headers=headers)
    html = response.text
    #print(html)
 
 
    selector = parsel.Selector(html)
    lis = selector.xpath('//ul[@class="clearfix"]/li')  #所以li标签
 
    for li in lis:
        video_title = li.xpath('./@pr-data-title').get()
        video_url = li.xpath('.//video/@src').get()
        #print(video_title,video_url)
 
        video_url = 'https:' + video_url
 
        video_data = requests.get(url=video_url, headers=headers).content
 
        with open('video\\' + video_title + '.mp4', mode='wb') as f:
            f.write(video_data)
            print('保存完成', video_title)

2.彼岸桌面

import os
import bs4
import re
import time
import requests
from bs4 import BeautifulSoup
 
 
def getHTMLText(url, headers):
    """向目标服务器发起请求并返回响应"""
    try:
        r = requests.get(url=url, headers=headers)
        r.encoding = r.apparent_encoding
        soup = BeautifulSoup(r.text, "html.parser")
        return soup
    except:
        return ""
 
 
def CreateFolder():
    """创建存储数据文件夹"""
    flag = True
    while flag == 1:
        file = input("请输入保存数据文件夹的名称:")
        if not os.path.exists(file):
            os.mkdir(file)
            flag = False
        else:
            print('该文件已存在,请重新输入')
            flag = True
 
    # os.path.abspath(file)  获取文件夹的绝对路径
    path = os.path.abspath(file) + "\\"
    return path
 
 
def fillUnivList(ulist, soup):
    """获取每一张图片的原图页面"""
    # [0]使得获得的ul是 <class 'bs4.BeautifulSoup'> 类型
    div = soup.find_all('div', 'list')[0]
    for a in div('a'):
        if isinstance(a, bs4.element.Tag):
            hr = a.attrs['href']
            href = re.findall(r'/desk/[1-9]\d{4}.htm', hr)
            if bool(href) == True:
                ulist.append(href[0])
 
    return ulist
 
 
def DownloadPicture(left_url, list, path):
    for right in list:
        url = left_url + right
        r = requests.get(url=url, timeout=10)
        r.encoding = r.apparent_encoding
        soup = BeautifulSoup(r.text, "html.parser")
        tag = soup.find_all("p")
        # 获取img标签的alt属性,给保存图片命名
        name = tag[0].a.img.attrs['alt']
        img_name = name + ".jpg"
        # 获取图片的信息
        img_src = tag[0].a.img.attrs['src']
        try:
            img_data = requests.get(url=img_src)
        except:
            continue
 
        img_path = path + img_name
        with open(img_path, 'wb') as fp:
            fp.write(img_data.content)
        print(img_name, "   ******下载完成!")
 
 
def PageNumurl(urls):
    num = int(input("请输入爬取所到的页码数:"))
    for i in range(2, num + 1):
        u = "http://www.netbian.com/index_" + str(i) + ".htm"
        urls.append(u)
 
    return urls
 
 
if __name__ == "__main__":
    uinfo = []
    left_url = "http://www.netbian.com"
    urls = ["http://www.netbian.com/index.htm"]
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"
    }
    start = time.time()
    # 1.创建保存数据的文件夹
    path = CreateFolder()
    # 2. 确定要爬取的页面数并返回每一页的链接
    PageNumurl(urls)
    n = int(input("访问的起始页面:"))
    for i in urls[n - 1:]:
        # 3.获取每一个页面的首页数据文本
        soup = getHTMLText(i, headers)
        # 4.访问原图所在页链接并返回图片的链接
        page_list = fillUnivList(uinfo, soup)
        # 5.下载原图
        DownloadPicture(left_url, page_list, path)
 
    print("全部下载完成!", "共" + str(len(os.listdir(path))) + "张图片")
    end = time.time()
    print("共耗时" + str(end - start) + "秒")

3.靓丽图库

import  requests
import parsel
 
for page in range(1,5):
        print(f'============第{page}页===========')
        headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"
        }
 
        url = f'https://www.hexuexiao.cn/meinv/guzhuang/list-{page}.html'
 
        response = requests.get(url=url, headers=headers)
        text = response.text
        #print(text)
 
        selector = parsel.Selector(text)
        href_list = selector.xpath('//div[@class="waterfall_1box"]/dl/dd/a/@href').getall()
        #print(href_list)
        for href in href_list:
                href_data = requests.get(url=href, headers=headers).text
 
                selector_2 = parsel.Selector(href_data)
 
                img_url = selector_2.xpath('//a[@class="btn btn-default btn-xs"]/@href').get()
                #print(img_url)
 
                img_data = requests.get(url=img_url, headers=headers).content
 
                filename = img_url.split('/')[-1]
                #print(filename)
 
                with open('img\\' + filename, mode='wb') as f:
                        f.write(img_data)
                        print(filename)
 
 

4.美桌网

import requests
import parsel
 
headers = {
        'Cookie': 't=f8e5c504a4e96b4ddcd519559ac24870; r=1066; XSRF-TOKEN=eyJpdiI6IkdLWWRpRVlmUzk2RVNIWVVENms0V2c9PSIsInZhbHVlIjoiVDcwbjlCNEVnUFZHdG5XREJ1VWIxZWlNN2dPTG5XSEh4UTIycTdaQ2V2aWF1UkpPU2dpeEU0Z3lQaVZJazhscWt1XC80cEt0eVJcL2doaXVlZXZBaWh3U2hGNlU2VXlSQzRrY3lcL1BjdkV3b2dMUFR2MTk1UHhcL0tNY2NORVJ3MGdZIiwibWFjIjoiOTM4YWZmNTAyZDBhY2RkMmQzOWQyMDAzZDAyNzhjMDk5ZWZhZWJjMjAzMDI3MzI3MDI2ZGZjYjQ3MDJhYTZlMCJ9; win4000_session=eyJpdiI6IklKSlVWMlFHTkkzaExpdEVGYlNweUE9PSIsInZhbHVlIjoibVpxYjhHWGhocVBYZFwvQW8rZzFzdG1Td3BuRzVLbzRjTHdQd2J4aTJHWXJcL1hpTk5YNU9GUnNTSHpVU2p3Y3UxcDE3UE1UV3BzT2hJektjM2R6K1l0NGR4RmFiY1NhZlIzR0VzakhEQzdmT3Z4UjR2ZDdjZ2RJYlhcL2xzY1J4TnMiLCJtYWMiOiI2NGUyMWEwOWIxOWM1NWJkNmI1OTJlYTJjYmRjMjI1MDdkNWRmOTc5NWMyN2NlNmM0MzRkZThmMjYxOTQ2MjI0In0%3D',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
}
 
base_url ='http://www.win4000.com/mobile_2340_0_0_1.html'
 
response = requests.get(url=base_url, headers=headers)
data = response.text
#print(data)
 
html_data = parsel.Selector(data)
data_list = html_data.xpath('//div[@class="Left_bar"]//ul[@class="clearfix"]/li/a/@href').extract()
#print(data_list)
 
for alist in data_list:
       #print(alist)
        response_2 = requests.get(url=alist, headers=headers).text
 
        html_2 = parsel.Selector(response_2)
        img_url = html_2.xpath('//div[@class="pic-meinv"]/a/img/@src').extract_first()
        #print(img_url)
 
        img_data = requests.get(url=img_url, headers=headers).content
 
        file_name = img_url.split('/')[-1]
 
        with open('img\\' + file_name, 'wb') as f:
                print(file_name)
                f.write(img_data)
 
 

5.英雄联盟图片

import requests
import os
url = 'https://game.gtimg.cn/images/lol/act/img/js/heroList/hero_list.js'
 
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
}
 
def save( hero_name, name, img_url):
    filename = f'{hero_name}\\'
    if not os.path.exists(filename):
        os.mkdir(filename)
 
    img_content = requests.get(url=img_url).content
    with open(filename + name + '.jpg', mode='wb') as f:
        f.write(img_content)
 
 
response = requests.get(url=url, headers=headers)
heroes = response.json()['hero']
for hero in heroes:
    hero_id = hero['heroId']
    hero_url = f'https://game.gtimg.cn/images/lol/act/img/js/hero/{hero_id}.js'
    response_1 = requests.get(url=hero_url, headers=headers)
    skins = response_1.json()['skins']
    for index in skins:
        title = index['heroTitle']
        hero_name = index['heroName']+title
        img_name = index['name']
        img_url = index['mainImg']
        if img_url:
            save(hero_name, img_name,img_url)
        else:
            img_url = index['chromaImg']
    print(title)

感谢每一个观看本篇文章的朋友,更多精彩敬请期待

文章多处存在借鉴,如有侵权请联系修改删除!

 

  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值