1 代码
import pandas as pd
# 读取数据
df_news = pd.read_table('./data/val.txt', names=['category', 'theme', 'URL', 'content'], encoding='utf-8')
df_news = df_news.dropna()
# print(df_news.shape)
import jieba
content = df_news.content.values.tolist() # 利用tolist将其转化为列表的形式
# print(content[0])
# 利用jieba进行分词,并将分好的词保存在content_S中
content_S = []
for line in content:
current_segment = jieba.lcut(line)
if len(current_segment) > 1 and current_segment != '\r\n':
content_S.append(current_segment)
# print(content_S[1000])
# 创建一个DataFrame格式
df_content = pd.DataFrame({'content_S': content_S})
# print(df_content.head())
# 分完词后数据太乱,对数据进行清洗工作,停用词过滤操作
stopwords = pd.read_csv('stopwords.txt', index_col=False, sep='\t', quoting=3, names=['stopword'], encoding='utf-8