下载示例代码:
from bs4 import BeautifulSoup
from urllib.request import urlretrieve #下载使用这个库
import requests
import os
import time
if __name__ == '__main__':
list_url = []
for num in range(1, 2):
if num == 1:
url = 'http://www.shuaia.net/index.html'
else:
url = 'http://www.shuaia.net/index_%d.html' % num
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
req = requests.get(url=url, headers=headers)
req.encoding = 'utf-8'
html = req.text
bf = BeautifulSoup(html, 'lxml')
print(bf)
targets_url = bf.find_all(class_='item-img')
print('----------------------')
print(targets_url)
for each in targets_url:
print('1111111')
print(each)
list_url.append(each.img.get('alt') + '=' + each.get('href'))
print('连接采集完成')
for each_img in list_url:
img_info = each_img.split('=') # 列表元素被等号分割成2个列表元素
target_url = img_info[1]
filename = img_info[0] + '.jpg'
print('下载:' + filename)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
img_req = requests.get(url=target_url, headers=headers)
img_req.encoding = 'utf-8'
img_html = img_req.text
img_bf_1 = BeautifulSoup(img_html, 'lxml')
img_url = img_bf_1.find_all('div', class_='wr-single-content-list')
img_bf_2 = BeautifulSoup(str(img_url), 'lxml')
img_url = 'http://www.shuaia.net' + img_bf_2.div.img.get('src')
if 'images' not in os.listdir():
os.makedirs('images')
urlretrieve(url=img_url, filename='images/' + filename)#下载(网址,命名)
time.sleep(1)
print('下载完成!')
显示下载进度
import os
import urllib
def cbk(a,b,c): #这里定义的cbk用于显示下载进度
'''回调函数
@a:已经下载的数据块
@b:数据块的大小
@c:远程文件的大小
'''
per=100.0*a*b/c
if per>100:
per=100
print '%.2f%%' % per
url='http://www.python.org/ftp/python/2.7.5/Python-2.7.5.tar.bz2'
dir=os.path.abspath('.')
work_path=os.path.join(dir,'Python-2.7.5.tar.bz2')
urllib.urlretrieve(url,work_path,cbk) #这里调用的cbk用于显示下载进度