#coding:gbk
import requests
import csv
from bs4 import BeautifulSoup
def get_movies():
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36',
'Host': 'movie.douban.com'
}
movie_list = []
for i in range(0,10):
link = 'https://movie.douban.com/top250?start=' + str(i * 25)
r = requests.get(link, headers=headers, timeout= 10)
print (str(i+1),"页响应状态码:", r.status_code)
soup = BeautifulSoup(r.text, "lxml")
div_list = soup.find_all('div', class_='hd')
for each in div_list:
movie = each.a.span.text.strip()
movie_list.append(movie)
return movie_list
movies = get_movies()
print('数据采集完成')
Movies = []
for movie in movies:
Movies.append([movie, 'N'])
with open('Top250.csv', 'w',newline='') as F:
writer = csv.writer(F)
writer.writerows(Movies)
print('存储完成')
爬取豆瓣Top250(电影名称并保存)
最新推荐文章于 2024-06-25 03:17:30 发布