import requests
from bs4 import BeautifulSoup
def get_movie_info(url):
headers = {
'User-Agent': 'Your User Agent Here', # 使用你自己的User-Agent字符串
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
movie_list = soup.find_all('div', class_='item')
movies = []
for movie in movie_list:
title = movie.find('span', class_='title').text.strip() # 电影名称
info_p = movie.find('div', class_='bd').find('p')
country = ''
year = ''
genre = ''
for child in info_p.children:
if child.name == 'br': # 如果遇到<br>标签,则在添加换行符后继续收集后续的文本
parts = ''.join(child.next_siblings).strip().split("/")
year = parts[0].strip()
country = parts[1].strip()
genre = parts[2].strip()
movies.append({"title": title, "country": country, "year": year, "genre": genre})
return movies
def main():
base_url = 'https://movie.douban.com/top250'
all_movies = []
for i in range(10):
print(f'开始抓取第{i + 1}页')
url = f'{base_url}?start={(i * 25)}'
movies = get_movie_info(url)
for movie in movies:
print(f'Title: {movie["title"]}')
print(f'Country: {movie["year"]}')
print(f'Year: {movie["country"]}')
print(f'Genre: {movie["genre"]}\n')
all_movies.extend(movies)
print(f'抓取第{i + 1}页完成')
print("抓取数据结果如下")
#打印抓取的电影信息
for movie in all_movies:
print(f'Title: {movie["title"]}')
print(f'Country: {movie["year"]}')
print(f'Year: {movie["country"]}')
print(f'Genre: {movie["genre"]}\n')
if __name__ == '__main__':
main()
Python爬取豆瓣250,最新
于 2024-05-28 14:23:50 首次发布