from importlib.resources import path
import os
import re
import jieba
import jieba.posseg as psg
from sklearn.feature_extraction.text import TfidfVectorizer
from wordcloud import WordCloud
import matplotlib.pyplot as plt
"""
句子分词, 去停
"""
def fclist(sentences,stoplist,fchcxfilepath):
list = ["zg","z","y","x","uv","ul","uj","ug","ud","vi","v","t","tg","rz","rr","r","q","o","mq","m","i","k","h","f","e","a","ad","ag","an"]
outcx = open(fchcxfilepath,encoding='utf-8',mode='w')
# 结巴分词(精准模式)
cutsentence = psg.lcut(sentences)
lastsentences = ""
cx = ""
for word,flag in cutsentence:
# 去停
if
特征提取 TF-IDF算法 python代码
最新推荐文章于 2023-12-31 22:33:59 发布