python-使用词云

 

 

import time
from lxml import etree
import wordcloud
 # 词云
import jieba  # 长句切分
# import imageio  # 遮幕
import imageio.v2 as imageio
import requests

class DoubanSpider():
    def __init__(self, ):
        self.url_temp = 'https://movie.douban.com/subject/35290372/comments?start={}&limit=20&status=P&sort=new_score'
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.5359.95 Safari/537.36"
        }
    def get_url_list(self):  # 构造url 翻页
        url_list = [self.url_temp.format(i * 20) for i in range(0, 10)]
        return url_list
    def parse(self, url):  # 请求
        print("获取 url=" + url)
        response = requests.get(url=url, headers=self.headers)
        return response.content.decode()
    def get_content(self, html_str):
        html = etree.HTML(html_str)
        content_list = html.xpath(r'/html/body/div[3]/div[1]/div/div[1]/div[4]//div/div[2]/p/span/text()')
        # "/html/body/div[3]/div[1]/div/div[1]/div[3]//div/div[2]/p/span/text()"
        # "/html/body/div[3]/div[1]/div/div[1]/div[4]/div[1]/div[2]/p/span"
        # "/html/body/div[3]/div[1]/div/div[1]/div[4]/div[2]/div[2]/p/span"
        print(content_list)
        return content_list
    def save(self, content_list):
        with open('doubanpl.txt', 'a', encoding='utf-8') as f:
            for comment in content_list:
                f.write(comment)
                f.write('\n')
    def run(self):
        url_list = self.get_url_list()
        for url in url_list:
            html_str = self.parse(url)
            content_list = self.get_content(html_str)
            self.save(content_list)
            time.sleep(1)

if __name__ == '__main__':
    douban = DoubanSpider()
    douban.run()
    mk = imageio.imread('guangda.png')
    # mk= imageio.imread()
    f = open('doubanpl.txt', 'r', encoding='utf-8')
    txt = f.read()
    wmy = wordcloud.WordCloud(width=1000, height=1000, background_color='white', mask=mk, scale=8, font_path="C:\Windows\Fonts\msyhbd.ttc", stopwords={"的", "了", "和"})
    txtlist=jieba.lcut(txt)
    print(txtlist)
    string=" ".join(txtlist)
    print(string)
    wmy.generate(string)
    wmy.to_file("commentpng.png")

note:

1. python  script配置到环境变量:C:\Users\user\AppData\Roaming\Python\Python39\Scripts

2.自己的版本对应whl要清楚

3.numpy要下载带mkl的

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值