[爬虫基础]爬取豆瓣电影Top250


使用BeautifulSoup4爬取豆瓣电影Top250的图片保存到本地。

1.安装必要的包

pip install bs4
pip install requests
pip install lxml

2.上代码

# --coding:utf-8--
import os
import requests
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED

download_path = './DouBanTop250'

if not os.path.exists(download_path):
    os.makedirs(download_path)


def download(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/87.0.4280.141 Safari/537.36'}

    rq = requests.get(url, headers=headers)
    print(rq.text)
    soup = BeautifulSoup(rq.text, 'lxml')

    content = soup.find(class_='article')  # 获取总标签

    images = content.find_all('img')  # 获取所有图片

    link_list = [image['src'] for image in images]
    name_list = [image['alt'] for image in images]

    # 保存到本地

    for name, link in zip(name_list, link_list):
        urlretrieve(link, f'{download_path}/{name}.jpg')

    print(f'{url}下载完成')


def main():
    # 获取所有页面
    url_list = [f'https://movie.douban.com/top250?start={25 * i}&filter=' for i in range(0, 10)]

    # 基础下载方法

    # for url in url_list:
    #     download(url)

    # 线程
    with ThreadPoolExecutor(max_workers=10) as executor:
        futures = []
        for url in url_list:
            future = executor.submit(download, url)
            futures.append(future)

    # 等待所有线程完成才执行后续的逻辑
    wait(futures, return_when=ALL_COMPLETED)


if __name__ == '__main__':
    main()

3.上图

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值