python爬取豆瓣电影排行榜_python爬取豆瓣电影排行榜

使用Python爬虫程序获取豆瓣电影Top250的电影名称、图片链接、演员、用户印象、评分和评价数量,数据存储为Excel文件。
摘要由CSDN通过智能技术生成

# -*- coding: utf-8 -*-

import pandas

import numpy as np

import pandas as pd

from pandas import Series,DataFrame

from bs4 import BeautifulSoup

import requests

import time

urls=['https://movie.douban.com/top250?start={}&filter='.format(str(i)) for i in range(0,250,25)]

#url='https://movie.douban.com/mine?status=wish'

headers={

'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36',

'Cookie':'gr_user_id=1ba0f40c-adb5-4011-9f66-ef641710c42e; viewed="1419678_1786120"; RT=s=1466650874130&r=https%3A%2F%2Fmovie.douban.com%2Ftop250%3Fstart%3D75%26filter%3D; ll="108091"; __utmt_t1=1; __utma=30149280.1565775292.1453707266.1466481018.1466650178.19; __utmb=30149280.15.8.1466650881759; __utmc=30149280; __utmz=30149280.1466650178.19.18.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; bid="0K5aWKIHxEU"'

}

title=[]

image=[]

actor=[]

empression=[]

rate=[]

evalu_num=[]

#爬取并显示top250电影的名称,图片地址,演员等

def allfilm(web_url,data=None):

web_data=requests.get(web_url,headers = headers)

soup=BeautifulSoup(web_data.text,'lxml')

time.sleep(2)

titles=soup.select('#content > div > div.article > ol > li > div > div.info > div.hd > a')

images=soup.select('#content > div > div.article > ol > li > div > div.pic > a > img')

actors=soup.select('#content > div > div.article > ol > li > div > div.info > div.bd > p:nth-of-type(1)') #bd樹下的第一個p標籤

empressions=soup.select('#content > div > div.article > ol > li > div > div.info > div.bd > p:nth-of-type(2)')

rates=soup.select('#content > div > div.article > ol > li > div > div.info > div.bd > div > span.rating_num')

evalu_nums=soup.select('#content > div > div.article > ol > li > div > div.info > div.bd > div > span:nth-of-type(4)')

#content > div > div.article > ol > li:nth-child(1) > div > div.info > div.bd > div > span:nth-child(4)

#content > div > div.article > ol > li:nth-child(2) > div > div.info > div.bd > div > span:nth-child(4)

for title1,image1,actor1,empression1,rate1,evalu_num1 in zip(titles,images,actors,empressions,rates,evalu_nums):

title.append(title1.get_text().replace('\\xa0',' ').strip()),

image.append(image1.get('src')),

actor.append(actor1.get_text().replace('\\xa0',' ').strip()),

empression.append(empression1.get_text()),

rate.append(rate1.get_text()),

evalu_num.append(evalu_num1.get_text())

for sigle_url in urls:

allfilm(sigle_url)

data={'title':title,

'image':image,

'actor':actor,

'empression':empression,

'rate':rate,

'evalu_num':evalu_num}

#frame=DataFrame(data,columns=[u'电影名',u'图片链接',u'演员',u'印象',u'评分',u'评价数'])

print('done!!')

frame=DataFrame(data,columns=['title','image','actor','empression','rate','evalu_num'])

frame.columns=[u'电影名',u'图片链接',u'演员',u'印象',u'评分',u'评价数']

#将dataframe数据写入csv或xlsx文件

frame.to_excel('C:\\Users\\zhchenjia\\Desktop\doubanfilm.xlsx',index=True)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值