使用BeautifulSoup4爬取豆瓣电影Top250的图片保存到本地。
1.安装必要的包
pip install bs4
pip install requests
pip install lxml
2.上代码
# --coding:utf-8--
import os
import requests
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
download_path = './DouBanTop250'
if not os.path.exists(download_path):
os.makedirs(download_path)
def download(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/87.0.4280.141 Safari/537.36'}
rq = requests.get(url, headers=headers)
print(rq.text)
soup = BeautifulSoup(rq.text, 'lxml')
content = soup.find(class_='article') # 获取总标签
images = content.find_all('img') # 获取所有图片
link_list = [image['src'] for image in images]
name_list = [image['alt'] for image in images]
# 保存到本地
for name, link in zip(name_list, link_list):
urlretrieve(link, f'{download_path}/{name}.jpg')
print(f'{url}下载完成')
def main():
# 获取所有页面
url_list = [f'https://movie.douban.com/top250?start={25 * i}&filter=' for i in range(0, 10)]
# 基础下载方法
# for url in url_list:
# download(url)
# 线程
with ThreadPoolExecutor(max_workers=10) as executor:
futures = []
for url in url_list:
future = executor.submit(download, url)
futures.append(future)
# 等待所有线程完成才执行后续的逻辑
wait(futures, return_when=ALL_COMPLETED)
if __name__ == '__main__':
main()