爬取豆瓣电影数据并进行分析可视化

学习爬虫爬取豆瓣电影数据并进行分析,整体流程如下:

1、爬取豆瓣电影数据

2、读取豆瓣电影数据

3、统计各个电影的评论数

4、读取某个电影的全部评论内容

5、获取某个电影的关键词并生成词云图

6、对电影数据的关键词和评分进行辩证分析并生成热力图

让我们开始吧!废话不多说,直接上代码 #------#

爬取豆瓣电影数据

import requests
from bs4 import BeautifulSoup
from collections import OrderedDict
import pandas as pd
# 设定headers
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
movie_info = OrderedDict()

def detail_handle(url):
    html = requests.get(url,headers = headers)
    soup = BeautifulSoup(html.text,'lxml')
    movie_info['movie_rank'] = soup.find_all('span',class_="top250-no")[0].string
    movie_info['movie_name'] = soup.find_all('span',property="v:itemreviewed")[0].string
    soup_div = soup.find(id="info")
    movie_info['movie_director'] = handle_mul_tags(soup_div.find_all('span')[0].find_all('a'))
    movie_info['movie_writer'] = handle_mul_tags(soup_div.find_all('span')[3].find_all('a'))
    movie_info['movie_starring'] = handle_mul_tags(soup_div.find_all('span')[6].find_all('a'))
    movie_info['movie_type'] = handle_mul_tags(soup_div.find_all('span',property="v:genre"))
    movie_info['movie_country'] = soup_div.find(text = '制片国家/地区:').next_element.lstrip().rstrip()
    movie_info['movie_language'] = soup_div.find(text = '语言:').next_element.lstrip().rstrip()
    movie_info['movie_release_date'] = handle_mul_tags(soup_div.find_all('span',property="v:initialReleaseDate"))
    movie_info['movie_run_time'] = handle_mul_tags(soup_div.find_all('span',property="v:runtime"))
    movie_second_name = ''
    try:
        movie_info['movie_second_name'] = soup_div.find(text = '又名:').next_element.lstrip().rstrip()
    except AttributeError:
        print('{}没有别名'.format(movie_info['movie_name']))
        movie_info['movie_second_name'] = movie_second_name
        
    movie_info['movie_rating'] = soup.find_all('strong',property="v:average")[0].string
    movie_info['movie_comment_users'] = soup.find_all('span',property="v:votes")[0].string
    soup_div_for_ratings = soup.find('div',class_="ratings-on-weight")
    movie_info['movie_five_star_ratio'] = soup_div_for_ratings.find_all('div')[0].find(class_="rating_per").string
    movie_info['movie_four_star_ratio'] = soup_div_for_ratings.find_all('div')[2].find(class_="rating_per").string
    movie_info['movie_three_star_ratio'] = soup_div_for_ratings.find_all('div')[4].find(class_="rating_per").string
    movie_info['movie_two_star_ratio'] = soup_div_for_ratings.find_all('div')[6].find(class_="rating_per").string
    movie_info['movie_one_star_ratio'] = soup_div_for_ratings.find_all('div')[8].find(class_="rating_per").string
    return movie_info
    
def handle_mul_tags(soup_span): # 获取多个标签结果将合并在一起,以/分隔
    info = ''
    for second_span in soup_span:
        info = ('' if (info == '') else '/').join((info,second_span.string))
    return info

def crawl():
    htmls = ['https://movie.douban.com/top250?start={}&filter='.format(str(page)) for page in range(0,250,25)]
    for html in htmls:
        html_url = requests.get(html,headers = headers)
        soup = BeautifulSoup(html_url.text,'lxml')
        movie_htmls = soup.select('.pic')
        for movie_html in movie_htmls:
            url = movie_html.select('a')[0]['href']
            return detail_handle(url)

对电影数据进行分析和可视化

import sqlite3
import pandas as pd
import jieba
import math
import pyecharts.options as opts
from pyecharts.charts import WordCloud
import os
os.chdir('C:\\Users\\Theo.chen\\Desktop\\数据分析项目\\')

import matplotlib as mpl
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False

conn = sqlite3.connect('douban_comment_data.db')
comment_data = pd.read_sql_query('select * from comment;', conn)
movie_data = pd.read_excel('douban_movie_data.xlsx')
FILTER_WORDS = ['知道','影评','影片','小编','没有','一个','\n','good','is','thing','这个','就是','什么','真的','of',
                '我们','最后','一部','the','片子','这么','那么','不是','还是','时候','觉得','电影','但是','hope','Hope','best','因为',
                '只是','故事','看过','豆瓣','maybe','这部']

def get_movie_idList(min_comment_count):
    movie_list = comment_data['MOVIEID'].value_counts()
    movie_list = movie_list[movie_list.values > min_comment_count]  
# 筛选出评论数>100的电影
    return movie_list.index

def get_comment_keywords(movie_id,count):
    comment_list = comment_data[comment_data['MOVIEID'] == movie_id]['CONTENT']
    comment_str_all = ''
    for comment in comment_list:
        comment_str_all += comment + '\n'
    seg_list = list(jieba.cut(comment_str_all))
    keywords_counts = pd.Series(seg_list)
    keywords_counts = keywords_counts[keywords_counts.str.len() > 1]
    keywords_counts = keywords_counts[~keywords_counts.str.contains('|'.join(FILTER_WORDS))]
    keywords_counts = keywords_counts.value_counts()[:count]
    return keywords_counts

def get_movie_name_and_score(movie_id):
    movie_link = 'https://movie.douban.com/subject/{}/'.format(movie_id)
    search_result = movie_data[movie_data['链接'] == movie_link].iloc[0]
    movie_name = search_result['电影名']
    movie_score = search_result['评分']
    return (movie_name,movie_score)

def generate_wordcloud(word_list,path_name):
    wordcloud = WordCloud()
    wordcloud.add(
        "",
        tuple(zip(keywords_counts.index,keywords_counts)),word_size_range = [20,100])
    wordcloud.render(path_name)
    print(f"Generate word cloud file done: {path_name}")

# 创建列表, 每个列表都含有10个列表
kw_list_by_score=[[] for i in range(10)]
kw_counts_by_score = [[] for i in range(10)]

movie_id_list = get_movie_idList(300)
for movie_id in movie_id_list:
    word_list = get_comment_keywords(movie_id,30)
    movie_name, movie_score = get_movie_name_and_score(movie_id)
    try:
        kw_list_by_score[math.floor(movie_score)].extend(word_list.index)
        kw_counts_by_score[math.floor(movie_score)].extend(word_list.values)
    except:
        print('Something Error!!!')

for i in range(10):
    if kw_list_by_score[i]:
        kw30_with_counts = pd.DataFrame({
            'kw':kw_list_by_score[i],
            'count':kw_counts_by_score[i]
            })
        kw30_with_counts = kw30_with_counts.groupby('kw').sum()
        kw30_with_counts = kw30_with_counts.sort_values(by = 'count', ascending = False)[:30]
        counts_sum = kw30_with_counts['count'].sum()
        kw30_with_counts['percentage'] = kw30_with_counts['count'] / counts_sum
        kw30_with_counts.to_csv('{}_movie_keywords.csv'.format(i))


from pyecharts.charts import HeatMap

kw_counts_by_score=[[] for _ in range(10)]
for i in range(4,10):
	kw_counts_by_score[i] = pd.read_csv('{}_movie_keywords.csv'.format(i))
	kw_percentage_df = pd.DataFrame([],
		columns = list(range(4,10)),
		index=kw_counts_by_score[9]['kw'][:10])

for i in range(4,10):
	kw=kw_counts_by_score[i]
	kw=kw[kw['kw'].isin(kw_percentage_df.index)]
	kw_percentage_df[i] = pd.Series(list(kw['percentage']),index=kw['kw'])

kw_percentage_df.fillna(0,inplace=True)

data=[]
i = 0
for index in kw_percentage_df.index:
	j=0
	for column in kw_percentage_df.columns:
		data.append([j,i,kw_percentage_df[column][index]*100])
		j+=1
	i+=i

heatmap = HeatMap()
heatmap.add_xaxis(list(kw_percentage_df.columns))
heatmap.add_yaxis("电影评论关键词热力图",list(kw_percentage_df.index),data)
heatmap.set_global_opts(
	visualmap_opts=opts.VisualMapOpts(
		min_= 0,
		max_=10,
		orient='horizontal'
		),
	)
heatmap.render(path="heatmap.html")

 

  • 5
    点赞
  • 184
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值