# 自定义词库
f='g:/'+I[i]+'.txt'
jieba.load_userdict(f)
# 停止词
stopwords='G:/g/data/word/chinese_stopword.txt'
stop_single_words=[]
with open(stopwords,'r') as f:
for line in f:
content=line.strip()
stop_single_words.append(content.decode('utf8'))
# 分词时去除停止词
word_cut=table_x.ABSTRACT_ZH[[j,k]].apply(lambda s: [i for i in list(jieba.cut(s)) if i not in stop_single_words])