1.导入包
import requests
from bs4 import BeautifulSoup
import pandas as pd
2. 构造分页数字列表
page_indexs = list(range(0, 250, 25))
#执行:[0, 25, 50, 75, 100, 125, 150, 175, 200, 225]
page_indexs
def download_all_htmls():
"""
下载所有列表页面的HTML,用于后续的分析
"""
htmls = []
i = 1
for idx in page_indexs:
url = f"https://movie.douban.com/top250?start={idx}&filter="
print("第 {} 页:html: {}".format(i,url))
i += 1
r = requests.get(url,
headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"})
if r.status_code != 200: #请求返回的状态码不为200
raise Exception("error")
htmls.append(r.text)
return htmls
3.执行爬取
htmls = download_all_htmls()
#执行:
第 1 页:html: https://movie.douban.com/top250?start=0&filter=
第 2 页:html: https://movie.douban.com/top250?start=25&filter=
第 3 页:html: https://movie.douban.com/top250?start=50&filter=
第 4 页:html: https://movie.douban.com/top250?start=75&filter=
第 5 页:html: https://movie.douban.com/top250?start=100&filter=
第 6 页:html: https://movie.douban.com/top250?start=125&filter=
第 7 页:html: https://movie.douban.com/top250?start=150&filter=
第 8 页:html: https://movie.douban.com/top250?start=175&filter=
第 9 页