day2 爬虫作业
"""
创建者:卢俊杰
创建时间:2023/1/4,17:16
"""
from bs4 import BeautifulSoup
import requests
from re import search, split, fullmatch, sub
import csv
headers = {
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.136 "
"Safari/537.36 "
}
for i in range(0, 226, 25):
response = requests.get(url=f'https://movie.douban.com/top250?start={i}&filter=', headers=headers)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, 'lxml')
rates = soup.select('.star .rating_num')
rates = [rate.text for rate in rates]
names = soup.select('.grid_view .pic img')
names = [name.attrs['alt'] for name in names]
comment_nums = soup.select('.star span')
comment_nums = [comment_num.text for comment_num in comment_nums]
comment_nums = list(map(lambda comment_num: search('(\d+)', comment_num).group(),
filter(lambda comment_num: fullmatch(r'(\d+)人评价', comment_num), comment_nums)))
quotes = soup.select('.info')
quotes = [quote.select('.quote span.inq')[0] if quote.select('.quote span.inq') else '' for quote in quotes]
quotes = [quote.text if type(quote) != str else '' for quote in quotes]
brief_intros = soup.select('.info p')
brief_intros = [sub(r'\s', '', split('\n', brief_intro.text)[2]) for brief_intro in brief_intros]
brief_intros = list(filter(lambda x: x != '', brief_intros))
print(brief_intros)
all_info = map(
lambda name, rate, comment_num, quote, brief_intro: {'电影名': name, '评分': rate, '评论人数': comment_num,
'总结': quote, '简介': brief_intro}, names, rates,
comment_nums, quotes, brief_intros)
with open('file/豆瓣250.csv', 'a', encoding='utf-8', newline='') as fw:
csv_file = csv.DictWriter(fw, ['电影名', '评分', '评论人数', '总结', '简介'])
csv_file.writerows(list(all_info))