豆瓣爬虫-正则表达式-bs4

该代码示例展示了如何利用Python的requests库获取网页数据,结合正则表达式解析HTML,提取豆瓣Top250电影的名称、评分、评论人数、上映时间、国家和类型,并将数据保存到CSV文件中。另外,还提供了使用BeautifulSoup进行数据解析的方法。
摘要由CSDN通过智能技术生成

豆瓣爬虫

import csv

import requests
from re import findall


def get_one_page(start=0):
    # 1. 获取网页数据
    url = f'https://movie.douban.com/top250?start={start}&filter='
    headers = {
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
    }
    response = requests.get(url, headers=headers)
    html = response.text
    # print(html)
    # 2. 解析数据
    # 1)所有电影的名字
    names = findall(r'<img width="100" alt="(.+?)"', html)

    # 2)所有电影的上映时间、国家和类型
    info = findall(r'(?s)<p class="">(.+?)</p>', html)
    info = [x.strip().split('\n')[-1].strip() for x in info]

    times = []
    countries = []
    types = []
    for x in info:
        result = x.split('&nbsp;/&nbsp;')
        times.append(result[0])
        countries.append(result[1])
        types.append(result[2])

    # 3)评分
    score = findall(r'<span class="rating_num" property="v:average">(.+?)</span>', html)

    # 4)评论人数
    comment = findall(r'<span>(\d+)人评价</span>', html)

    data = map(lambda i1, i2, i3, i4, i5, i6: (i1, i2, i3, i4, i5, i6), names, score, comment, times, countries, types)

    # 将数据保存到csv文件中
    w2.writerows(data)
    print('-------------------------------一页获取完成-----------------------')


def get_one_page2():
    # 1. 获取网页数据
    url = 'https://movie.douban.com/top250?start=0&filter='
    headers = {
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
    }
    response = requests.get(url, headers=headers)
    html = response.text
    # print(html)
    # 2. 解析数据
    result = findall(r'(?s)<img width="100" alt="(.+?)".+?<p class="">(.+?)</p>.+?<span class="rating_num" property="v:average">(.+?)</span>.+?<span>(\d+)人评价</span>', html)
    print(result)


from datetime import datetime
from csv import writer
if __name__ == '__main__':
    f = open('files/top250.csv', 'w', encoding='utf-8', newline='')
    w2 = writer(f)
    w2.writerow(['电影名称', '评分', '评论人数', '上映时间', '国家', '类型'])

    for x in range(0, 226, 25):
        get_one_page(x)

    f.close()



import requests
from bs4 import BeautifulSoup

# 1. 获取网页源代码
headers = {
    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}
response = requests.get('https://movie.douban.com/top250', headers=headers)
html = response.text

# 2. 解析数据
soup = BeautifulSoup(html, 'lxml')

# 获取每个电影对应的div
div_list = soup.select('.grid_view>li>div')
for x in div_list:
    name = x.select_one('.title').text
    score = x.select_one('.rating_num').text
    comment = x.select('.star>span')[-1].text[:-3]
    print(name, score, comment)




评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值