通过爬虫获取豆瓣Top250

导入需要的包

import requests
from bs4 import BeautifulSoup
import pandas as pd
import pprint
import json

①下载10个页面的HTML

def download_all_htmls():
    htmls =[]
    for idx in page_indexs:
        ur1=f"https://movie.douban.com/top250?start={idx}&filter="
        print("craw html:", ur1)
        r=requests.get(ur1,headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ''AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
})
        htmls.append(r.text)
    return htmls
htmls = download_all_htmls()#执行爬取

②解析所爬取的HTML页面得到数据

def parse_single_html(html):

    soup = BeautifulSoup(html,'html.parser')
    article_items = soup.find("ol",class_="grid_view").find_all("div",class_="item")
    datas =[]
    for article_item in article_items:
        rank = article_item.find("div", class_="pic").find("em").get_text()
        info = article_item.find("div", class_="info")
        title = info.find("div", class_="hd").find("span", class_="title").get_text()
        stars = (info.find("div", class_="bd").find("div",class_="star").find_all("span"))
        rating_star = stars[0]["class"][0]
        rating_num = stars[1].get_text()
        comments = stars[3].get_text()
        
        datas.append({
            "rank":rank,
            "title":title,
            "rating_star":rating_star.replace("rating","").replace("-t",""),
            "rating_num":rating_num,
            "comments":comments.replace("人评价","")
        })
    return datas
all_datas=[]#执行所有的HTML页面解析
for html in htmls:
    all_datas.extends(parse_single_html(html))
all_datas
len(all_datas)

③将结果导入Excel

df = pd.DataFrame(all_datas)
df.to_excel("豆瓣TOP250.xlsx")

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值