本文涉及到jieba,wordcloud等Python第三方库的基本使用以及词频统计的基本操作,主要完成以下6项任务:
- 读取文件,用split进行分隔,并选出文本,一行视为一个文档。文档中可能会包含一些“噪声”(比如‘[’和‘]’等,可以删除)。
- 使用jieba对所有文档进行分词,并统计词频。
- 按词频进行排序。观察高频词和低频词。
- 引入停用词表(上网搜索)进行停用词过滤,重新观察词频排序的结果。
- 用wordcloud对高频词进行可视化(词云)。
- 对词性进行分析,观察不同词性的出现频率,并对特定词性的词进行可视化(词云)。
import re
import jieba
import jieba.posseg
from zhon.hanzi import punctuation
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from collections import Counter
#停用词导入
punctuation_str = punctuation
path_stop="stopwords.txt"
stopword = [line.strip() for line in open(path_stop,encoding="utf-8").readlines()]
#数据清理
def clean(str):
str = re.sub(r"(回复)?(//)?\s*@\S*?\s*(:| |$)", " ", str)#去除评论中的特殊符号,如转发,回复,@
r = "[A-Za-z0-9_.!+-=——,$%^,。?、~@#¥%……&*《》<>「」{}【】()/\\\[\]'\"]"
#去除中文标点符号
for i in punctuation_str:
str = str.replace(i,"")
str = re.sub(r, "", str)#去除标点符号
str = re.sub(r"\[\S+\]", "",str)#去除表情
URL_REGEX = re.compile(
r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
re.IGNORECASE)
str = re.sub(URL_REGEX, "", str) # 去除网址
str = str.replace("转发微博", "") # 去除无意义的词语
str = re.sub(r"\s+", " ", str) # 合并正文中过多的空格
return str.strip()
#文件读取
with open("weibo.txt","r",encoding = 'utf-8',errors='ignore') as f:
words_lis = f.readlines()
#创建列表收集有效的文本数据
comments=[]
for i in words_lis:
#去除经纬度和时间,只留下文本
i=i.split('\t')[1]
i = clean(i)#数据清洗
comments.append(i)
#制作字典,统计每个词语出现的次数
counts = {}
for l in comments:
words = list(jieba.cut(l))
#print(words)
for word in words:
if word not in stopword:
counts[word] = counts.get(word,0) + 1
counts = sorted(counts.items(),key = lambda x:x[1],reverse=False)#.items()返回的是由元组构成的列表
for i in counts:
print("{0:<10}{1:>5}".format(i[0],i[1]))
dic = dict(counts)
mask = np.array(Image.open("labi.png"))
wd = WordCloud(font_path='C:\Windows\Fonts\simfang.ttf',background_color="white",max_words=1000,mask=mask)
wd.fit_words(dic)
plt.imshow(wd, interpolation="bilinear")
plt.axis('off')
plt.show()
#词性分析
n = []
v = []
a = []
d = []
for j in comments:
for word, flag in jieba.posseg.cut(j):
if (flag == "n") and word not in stopword:
n.append(word)
elif flag == "a" and word not in stopword:
a.append(word)
elif flag == "v" and word not in stopword:
v.append(word)
elif flag == "d" and word not in stopword:
d.append(word)
else:
continue
noun = Counter(n)
verb = Counter(v)
adj = Counter(a)
de = Counter(d)
noun = dict(sorted(noun.items(), key=lambda item: item[1],reverse=True))
verb = dict(sorted(verb.items(), key=lambda item: item[1],reverse=True))
adj = dict(sorted(adj.items(), key=lambda item: item[1],reverse=True))
de = dict(sorted(de.items(), key=lambda item: item[1],reverse=True))
print("前十个名词")
print(list(noun.items())[:10])
print("前十个动词")
print(list(verb.items())[:10])
print("前十个形容词")
print(list(adj.items())[:10])
print("前十个副词")
print(list(de.items())[:10])
wr = WordCloud(font_path='C:\Windows\Fonts\simfang.ttf',background_color="white",max_words=1000)
wr.fit_words(noun)
plt.imshow(wr, interpolation="bilinear")
plt.axis('off')
plt.show()