淘宝商品信息爬取,实现词云,并进行饼状图绘制及效果图

对应json文件地址:https://blog.csdn.net/nicholas_K/article/details/86094530

1. 获取了淘宝手机商品的评论和追评

2. 对淘宝评论进行了词云

3. 实现了淘宝手机商品版本的饼状图绘制

import json
import time
import pygal
import pymysql.cursors
from wordcloud import WordCloud

# 打开对应淘宝json文件。
def get_comments():
	# 这里打开的是上面的对应json文件,文件名要正确
    with open('tb_comments_1.json', encoding='utf-8') as tb:
        comments_dict = json.load(tb)

        # 这是找到追评里的内容
        comments = comments_dict['rateDetail']
        comments2 = comments['rateList']

        result_list = []
        for comment in comments2:
            # 判断是不是追评
            if comment['appendComment']:
                comment['appendComment'] =  comment['appendComment']["content"]
            # 添加到列表
            result_list.append({
                'id': comment['id'],
                'content': comment['appendComment'],
                'rateContent': comment['rateContent'],
                'auctionSku': comment['auctionSku'],
                'rateDate': comment['rateDate']
            })



        return result_list


# 连接数据库
def save_db(comments):
    connection = pymysql.connect(host='127.0.0.1',
                                 port=3306,
                                 user='root',
                                 password='zhangkai',
                                 db='tb',
                                 charset='utf8mb4',
                                 cursorclass=pymysql.cursors.DictCursor)
    try:
        connection.ping(reconnect=True)
    except:
        connection()
    for comment in comments:  # 循环评论
        cursor = connection.cursor()  # 创建游标
        # 先判断一下是否已存储过
        sql1 = "select id from tb.taobao where taobao_id=%s " % (comment['id'])
        cursor.execute(sql1)
        rs_set = cursor.fetchone()  # 有值返回{'id':23} 无值返回None
        if rs_set:
            print('这条评论已存在在数据库中')
            continue

        sql = """INSERT INTO tb.taobao VALUES (%s, %s ,%s ,%s ,%s, %s)"""
        for n in comments:
            cursor.execute(sql, args=(None, n["id"], n["rateContent"], n["auctionSku"], n["rateDate"], n["content"]))
        connection.commit()
        time.sleep(1.2)
        print('添加成功')

    sql2 = """select rate, content from tb.taobao"""
    cursor.execute(sql2)
    rs_set = cursor.fetchall()

    # 查询手机版本信息
    sql3 = """SELECT COUNT(*)as num,auctionint FROM tb.taobao group by auctionint"""
    cursor.execute(sql3)
    rs_sets = cursor.fetchall()
    return rs_set, rs_sets

# 把所有评论转成一个大字符串
def jieba_db(comments):
    jieba_list = comments
    tb_str = ''
    for i in jieba_list:
        tb_str += i['rate']

    return tb_str


# 生成词云
def word_cloud(string):
    # font是字体
    font = 'msyhl.ttc'
    wordcloud = WordCloud(font_path=font,
                          background_color="white",
                          width=1000,
                          height=860,
                          max_font_size=30,
                          min_font_size=10,
                          margin=2).generate(string)

    wordcloud.to_file('淘宝词云.png')

    return None

# 生成饼状图
def pygals(comments):
    x = 0
    for i in comments:
        x = x + i['num']

    pie_chart = pygal.Pie()
    pie_chart.title = '购买手机颜色比例(in % )'
    for i in comments:

        pie_chart.add(i['auctionint'], i['num']/x*100)
    pie_chart.render_to_file('淘宝.svg')
    # svg文件用浏览器打开

    print('绘图成功')


if __name__ == '__main__':
    comment = get_comments()
    save, banben = save_db(comments=comment)
    taobao_jieba = jieba_db(comments=save)
    ciyun = word_cloud(string=taobao_jieba)
    print(ciyun)

词云图片效果如下

在这里插入图片描述

饼状图效果如下

在这里插入图片描述
在这里插入图片描述

  • 2
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
由于QQ音乐的反爬虫机制比较强,需要先模拟登录QQ音乐获取cookies,然后再进行爬取评论和生成词云图的操作。 以下是完整代码: ```python import requests import json import time import os from wordcloud import WordCloud import matplotlib.pyplot as plt from PIL import Image import numpy as np # 登录QQ音乐获取cookies def get_cookies(): headers = { 'Referer': 'https://y.qq.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3' } url = 'https://y.qq.com/' response = requests.get(url, headers=headers) cookies = response.cookies.get_dict() return cookies # 爬取评论 def get_comments(song_id, page): headers = { 'Referer': 'https://y.qq.com/n/yqq/song/{}.html'.format(song_id), 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3', 'Cookie': 'pgv_pvid=6021975380; pgv_pvi=3306170880; pt2gguin=o0533414728; RK=J0VJyXs+Ld; ptcz=4e11a4a6e4a6b8d37b42a6b9f9d2b6c8a6b2a6b9f9d2b6c8a6b2a6b9f9d2b6c8; pgv_si=s1046426624; pgv_info=ssid=s7230811040; _qpsvr_localtk=0.7601771490547045; yq_index=0; yq_playschange=0; yq_playdata=; ts_uid=4789989478; player_exist=1; qqmusic_fromtag=66' } url = 'https://c.y.qq.com/base/fcgi-bin/fcg_global_comment_h5.fcg' params = { 'g_tk': '5381', 'loginUin': '0', 'hostUin': '0', 'format': 'json', 'inCharset': 'utf8', 'outCharset': 'utf-8', 'notice': '0', 'platform': 'yqq.json', 'needNewCode': '0', 'cid': '205360772', 'reqtype': '2', 'biztype': '1', 'topid': song_id, 'cmd': '8', 'pagenum': page, 'pagesize': '25', 'lasthotcommentid': '', 'domain': 'qq.com', 'ct': '24', 'cv': '10101010' } response = requests.get(url, headers=headers, params=params) json_data = json.loads(response.text) comment_list = json_data['comment']['commentlist'] comments = [] for comment in comment_list: content = comment['rootcommentcontent'] comments.append(content) return comments # 生成词云图 def generate_wordcloud(text, mask_path): # 读取遮罩图片 mask = np.array(Image.open(mask_path)) # 设置词云图参数 wc = WordCloud(background_color="white", max_words=2000, mask=mask, contour_width=1, contour_color='steelblue') # 生成词云图 wc.generate(text) # 显示词云图 plt.imshow(wc, interpolation='bilinear') plt.axis("off") plt.show() if __name__ == '__main__': # 歌曲id song_id = '108119' # 存储评论的文件名 filename = 'comments.txt' # 遮罩图片路径 mask_path = 'jay.jpg' # 获取cookies cookies = get_cookies() # 爬取评论 comments = [] for i in range(1, 11): comments += get_comments(song_id, i) time.sleep(1) # 保存评论到文件中 with open(filename, 'w', encoding='utf-8') as f: for comment in comments: f.write(comment + '\n') # 读取评论文件 with open(filename, 'r', encoding='utf-8') as f: text = f.read() # 生成词云图 generate_wordcloud(text, mask_path) ``` 需要注意的几点: 1. `get_cookies()`函数中的`Cookie`参数需要根据自己的账号进行修改,可以通过浏览器获取; 2. `get_comments()`函数中的`cid`参数是QQ音乐评论的分类id,每个歌曲的分类id不同,需要通过浏览器获取; 3. `generate_wordcloud()`函数中的`mask_path`参数是用于遮罩的图片路径,需要提前准备好; 4. 由于QQ音乐的反爬虫机制比较强,为了避免被封IP,需要在爬取评论时加入适当的延时。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值