"""
@author: liushuchun
"""
import re
import string
import jieba
# 加载停用词
with open("dict/stop_words.utf8", encoding="utf8") as f:
stopword_list = f.readlines()
def tokenize_text(text):
tokens = jieba.cut(text)
tokens = [token.strip() for token in tokens]
return tokens
def remove_special_characters(text):
tokens = tokenize_text(text)
pattern = re.compile('[{}]'.format(re.escape(string.punctuation)))
filtered_tokens = filter(None, [pattern.sub('', token) for token in tokens])
filtered_text = ' '.join(filtered_tokens)
return filtered_text
def remove_stopwords(text):
tokens = tokenize_text(text)
filtered_tokens = [token for token in tokens if token not in stopword_list]
filtered_text = ''.join(filtered_tokens)
return filtered_text
def normalize_corpus(corpus, tokenize=False):
normalized_corpus = []
for text in corpus:
text = remove_special_characters(text)
text = remove_stopwords(text)
normalized_corpus.append(text)
if tokenize:
text = tokenize_text(text)
normalized_corpus.append(text)
return normalized_corpus

本文介绍了一种中文文本预处理的方法,包括去除特殊字符、停用词过滤等步骤,使用了jieba分词库和正则表达式进行操作。通过这些步骤,可以有效地清理文本数据,为后续的自然语言处理任务做准备。

被折叠的 条评论
为什么被折叠?



