a <- segmentCN("1.txt",returnType="tm")
b <- read.table("1.segment.txt",fill=T)
?table
lecture=read.csv("1.segment.txt",sep=",",header=TRUE,fileEncoding="UTF-8")
# 查看前几行,看是否有字符编码问题
head(lecture);
# 获取数据集长度
n=length(lecture[,1]);
print(n)
# == 文本预处理
res=lecture[lecture!=" "];
#剔除URL
res=gsub(pattern="http:[a-zA-Z\\/\\.0-9]+","",res);
#剔除特殊词
res=gsub(pattern="[我|你|的|了|是]","",res);
# == 分词+频数统计
words=unlist(lapply(X=res, FUN=segmentCN));
word=lapply(X=words, FUN=strsplit, " ");
v=table(unlist(word));
# 降序排序
v=rev(sort(v));
d=data.frame(word=names(v), freq=v);
# 过滤掉1个字和词频小于20的记录
d=subset(d, nchar(as.character(d$word))>1 & d$freq>=20)
#设置中文输出
par(family='STKaiti')
mycolors <- brewer.pal(8,"Dark2")
wordcloud(d$word,d$freq,random.order=F,random.color=F,colors=mycolors)