这是当时在琢磨文本挖掘时的小技术,贴出来共享一下
library(Rwordseg) #分词的包
#导入数据
data = read.csv("C:\\Users\\hormy\\Documents\\咨询数据.csv",stringsAsFactors=F)
#去除数字,英文字符
data$内容 = gsub("[a-z0-9A-Z_]","",data$内容)
#分词,Rwordseg包,手动加入分词表在工作路径的文件
words = segmentCN(data$内容)
#生成停词表stopwordsCN.txt,读入,确保是utf-8编码
stopwordsCN = as.character(readLines("stopwordsCN.txt"))
stopwordsCN = enc2utf8(stopwordsCN)
stopwordsCN<-stopwordsCN[Encoding(stopwordsCN)!="unknown"]
#编写去停词函数
removeStopWords <- function(x,stopwords) {
temp <- character(0)
index <- 1
xLen <- length(x)
while (index <= xLen) {
if (length(stopwords[stopwords==x[index]]) <1)
temp<- c(temp,x[index])
index <- index +1
}
temp
}
#去停词
words = lapply(words,removeStopWords,stopwordsCN)
#画词云图
library(wordcloud)
#计算词频
wordsnum = table(unlist(words))
wordsnum = sort(wordsnum) #排序
#选出词频最高的250个
wordsnum = tail(wordsnum,250)
#画词云图
wordcloud(names(wordsnum), as.vector(wordsnum),random.order=FALSE,
random.color=FALSE,colors=brewer.pal(8,"Dark2"),family="myFont3")