微信内容分析

分词

import pandas as pd
import jieba
import jieba.analyse
import re
jieba.load_userdict('./dataset/dictdata/newwords.txt') 

#载入数据
data1=pd.read_csv('./dataset/rawdata/total.csv',encoding='utf-8',dtype=str)
data1=data1.astype(str)
data1["fenci"] = data1["content"]

#定义stopwords
stopwords = {}.fromkeys([ line.rstrip().strip('\n') for line in open('./dataset/dictdata/stopwords.txt',encoding='utf-8')])
eng_stopwords = set(stopwords)

for index, row in data1.iterrows():
    tmp = re.sub(r"[\s+\.\!\/_,$%^*(【】:\]\[\-:;+\"\']+|[+——!,。?、~@#¥%……&*()]+|[0-9]+","",row['content'])
    words = jieba.cut_for_search(tmp, HMM = True)
    w_ = [w for w in words if w not in eng_stopwords]
    row['fenci'] = " ".join(w_)
    
print (data1.head())
data1.to_csv('./dataset/rawdata/total_fenci_1.csv',columns=['title','content','fenci'],encoding='utf-8')
data1.to_excel('./dataset/rawdata/total_fenci_1.xlsx',columns=['title','content','fenci'],encoding='utf-8')

词频统计


```python
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import jieba
import jieba.analyse
import codecs

#载入数据:#title	content	fenci
data1=pd.read_csv('./dataset/rawdata/total_fenci_1.csv', header=0,encoding='utf-8',dtype=str)
data1=data1.astype(str)

segments = []
for index, row in data1.iterrows():
    words = str(row['fenci']).strip('\n').split(' ')
    splitedStr = ''
    for word in words:
        # 记录全局分词
        segments.append({'word':word, 'count':1})
        splitedStr += word + ''
dfSg = pd.DataFrame(segments)


dfWord = dfSg.groupby('word')['count'].sum()
#导出csv
dfWord.to_csv('./dataset/wordfrequency/word_frequency01.csv',encoding='utf-8')
dfWord.to_excel('./dataset/wordfrequency/word_frequency01.xlsx',encoding='utf-8')

情感分析

import jieba
from snownlp import SnowNLP
import numpy as np
import pandas as pd
 
# s = SnowNLP(u'这个东西真心赞')
# s1=SnowNLP(u'还是很 设施也不错但是 和以前 比急剧下滑了 和客房 的服务极差幸好我不是很在乎')
# print (s1)

# print(s.tags)
# p =s.sentiments 
# print (p)

#载入数据:#title	content	fenci
data1=pd.read_csv('./dataset/rawdata/total_fenci_1.csv', header=0,encoding='utf-8',dtype=str)
data1=data1.astype(str)
# 添加情感列  初始值为空
data1["sentiment"] = data1['fenci']


for index, row in data1.iterrows():
    row["sentiment"] = SnowNLP(str(row['fenci']).strip('\n')).sentiments
    
data1.to_csv('./dataset/sentiment/sentiment_1.csv',columns=['title','content','fenci','sentiment'],encoding='utf-8')
data1.to_excel('./dataset/sentiment/sentiment1.xlsx',columns=['title','content','fenci','sentiment'],encoding='utf-8')

LDA主题模型


```python


# -*- coding: utf-8 -*-
import codecs
import jieba
from gensim import corpora
from gensim.models import LdaModel
from gensim.corpora import Dictionary
fr=open('./dataset/rawdata/total_fenci_2.csv','r',encoding='utf-8')
train=[]
for line in fr:
    line=line.strip().split(' ')
    #print (line)
    train.append(line)

#print (len(train))
#print (' '.join(train[2]))
#生成词典
dictionary = corpora.Dictionary(train)
#转换成向量形式
corpus = [ dictionary.doc2bow(text) for text in train ]
#print (type(corpus))

#for i in corpus:
#    print ("序号:%s   值:%s" % (corpus.index(i) + 1, i))


#print (corpus)
lda = LdaModel(corpus=corpus, id2word=dictionary, num_topics=5)

topic_list=lda.print_topics(5)
#print (type(lda.print_topics(20)))
#print (len(lda.print_topics(20)))

for topic in topic_list:
    print (topic)



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值