导入需要的包
import requests
from bs4 import BeautifulSoup
import pandas as pd
import pprint
import json
①下载10个页面的HTML
def download_all_htmls():
htmls =[]
for idx in page_indexs:
ur1=f"https://movie.douban.com/top250?start={idx}&filter="
print("craw html:", ur1)
r=requests.get(ur1,headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ''AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
})
htmls.append(r.text)
return htmls
htmls = download_all_htmls()#执行爬取
②解析所爬取的HTML页面得到数据
def parse_single_html(html):
soup = BeautifulSoup(html,'html.parser')
article_items = soup.find("ol",class_="grid_view").find_all("div",class_="item")
datas =[]
for article_item in article_items:
rank = article_item.find("div", class_="pic").find("em").get_text()
info = article_item.find("div", class_="info")
title = info.find("div", class_="hd").find("span", class_="title").get_text()
stars = (info.find("div", class_="bd").find("div",class_="star").find_all("span"))
rating_star = stars[0]["class"][0]
rating_num = stars[1].get_text()
comments = stars[3].get_text()
datas.append({
"rank":rank,
"title":title,
"rating_star":rating_star.replace("rating","").replace("-t",""),
"rating_num":rating_num,
"comments":comments.replace("人评价","")
})
return datas
all_datas=[]#执行所有的HTML页面解析
for html in htmls:
all_datas.extends(parse_single_html(html))
all_datas
len(all_datas)
③将结果导入Excel
df = pd.DataFrame(all_datas)
df.to_excel("豆瓣TOP250.xlsx")