import jieba
import re
import jieba.analyse
import jieba.posseg as pseg
from collections import Counter
def token(file):
f = open(r'E:\BaiduNetdiskDownload\cnews\stop_word.txt','r',encoding='utf8')
lf = [re.sub('[\r\n]','',i) for i in f.readlines()]
content = open(r'E:\BaiduNetdiskDownload\cnews\cnews.val.txt', 'rb').read() #rb 形式读取文件
jieba.analyse.set_idf_path(r'E:\BaiduNetdiskDownload\cnews\idf.txt')
jieba.analyse.set_stop_words(r'E:\BaiduNetdiskDownload\cnews\stop_word.txt') #去停用词
key_word_tf_idf = jieba.analyse.extract_tags(content, topK=20, withWeight=True) # 关键词提取,前20,返回权重,allowPOS不过滤词性
key_word_text_rank = jieba.analyse.textrank(content, topK=20, withWeight=True) #利用text_rank进行关键词提取
# print(key_word_text_rank)
X_train= []
count_list = []
for i in train.readlines():
i = re.sub('[\r\n]','',i)
i = i.split('\t')
result = jieba.lcut(i[1])
pse = pseg.lcut(i[1],HMM=False) #词性标注切分
for word,flage in pse:
print(word,flage) #词性标注
train_list = []
for j in result:
print(j)
if j not in lf:
train_list.append(j)
count_list.append(j)
X_train.append(train_list)
count = Counter(count_list) #统计词频
print(count)
train = open(r'E:\BaiduNetdiskDownload\cnews\cnews.val.txt','r',encoding='utf8')
token(train)
train.close()