微博内容图片 & 下载文件 & while死循环 & python代码精简合并

超级精简合并(看不懂系列)

import requests,os,json
from multiprocessing.dummy import Pool

def save_img(url):
    with open(c+'/'+url[-36:], 'wb') as f:
        f.write(requests.get(url).content)
    print(url[-36:]+'\n')
	
if __name__ == '__main__':
    list=[]
    a,b,c = int(input('输入开始页 :')),int(input('输入结束页 :')),input('创建目录名 :')
    os.makedirs(c) if os.path.exists(c) == False else None
    for Page in range(a,b + 1):
        for pics in requests.get(url='https://m.weibo.cn/api/container/getIndex?uid=1669879400&containerid=1076031669879400&page='+str(Page)).json().get('data').get('cards'):
            if pics.get('mblog') != None:
                if pics.get('mblog').get('pics') != None:
                    for large_url in pics.get('mblog').get('pics'):
                        list.append(large_url.get('large').get('url')) 
    Pool(44).map(save_img,list)
    print('\n'+'....下载完成....')

微博内容图片

import requests
import os
import json
from multiprocessing.dummy import Pool

uid = '1669879400'
containerid = '1076031669879400'

headers = {
           #'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
           #'cookie': ''
           }

def save_img(url):
    name = url[-36:]
    date = requests.get(url)
    with open(c+'/'+name, 'wb') as f:
        f.write(date.content)
    print(name+'\n')

if __name__ == '__main__':
    list=[]
    a = int(input('输入开始页 :'))
    b = int(input('输入结束页 :'))
    c = input('创建目录名 :')
    os.makedirs(c) if os.path.exists(c) == False else None  
    for Page in range(a,b + 1):
        target='https://m.weibo.cn/api/container/getIndex?uid='+uid+'&containerid='+containerid+'&page='+str(Page)
        req = requests.get(target,headers=headers)
        items  = req.json().get('data').get('cards')
        for pics in items:
            url_text = dict(pics).get('mblog')
            if url_text != None:
                if url_text.get('pics') != None:
                    for large_url in url_text.get('pics'):
                        url = large_url.get('large').get('url')
                        list.append(url) 
    pool = Pool(44)
    pool.map(save_img,list)
    pool.close()
    pool.join()
    print('\n'+'....下载完成....')

下载文件 & while死循环

import time
import requests

def downloadFile(name, url):
    headers = {'Proxy-Connection':'keep-alive'}
    r = requests.get(url, stream=True, headers=headers)
    length = float(r.headers['content-length'])
    f = open(name, 'wb')
    count = 0
    count_tmp = 0
    time1 = time.time()
    for chunk in r.iter_content(chunk_size = 512):
        if chunk:
            f.write(chunk)
            count += len(chunk)
            if time.time() - time1 > 2:
                p = count / length * 100
                speed = (count - count_tmp) / 1024 / 1024 / 2
                count_tmp = count
                print(name + ': ' + formatFloat(p) + '%' + ' Speed: ' + formatFloat(speed) + 'M/S')
                time1 = time.time()
    f.close()

def formatFloat(num):
    return '{:.2f}'.format(num)

if __name__ == '__main__':
	i = 1
	while i ==1:
    	url = input('输入要下载的文件地址:')
    	name = input('输入要下载的文件名字: ')
    	downloadFile(name,url)
    	print('下载完成...')
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值