记录论文中使用到的文本挖掘代码
(1)去除停用词并使用jieba进行分词
# @Time : 2021/3/8 19:50
# @Author : chao
#去除停用词,并且进行分词
import jieba
stopwords_filepath = r"C:\Users\词典\stopword停用词.txt"
jieba.load_userdict(r"C:\Users\\词典\out.txt")
# 创建停用词list
def stopwordslist(stopwords_filepath):
stopwords = [line.strip() for line in open(stopwords_filepath, 'r',
encoding='utf-8-sig', errors='ignore').readlines()]
return stopwords
# 对句子进行分词
def seg_sentence(sentence):
sentence_seged = jieba.cut(sentence.strip())
stopwords = stopwordslist(stopwords_filepath) # 这里加载停用词的路径
outstr = ''
for word in sentence_seged:
if word not in stopwords:
if word != '\t':
outstr += word
outstr += " "
return outstr
inputs = open(r'C:\Users\\代码\数据\预处理后数据\zong_data.txt', 'r', encoding='ANSI', errors='ignore')
outputs = open(r'C:\Users\\代码\去除停用词并分词\去除停用词并分词结果\zong_fengci_tingyongci2.txt', 'w',encoding='utf-8')
for line in inputs:
line_seg = seg_sentence(line)
outputs.write(line_seg + '\n')
outputs.close()
inputs.close()