本文利用20Newsgroup这个数据集作为Corpus(语料库),用户可以通过搜索关键字来进行查询关联度最高的News,实现对文本的搜索引擎:
1. 导入数据集
from sklearn.datasets import fetch_20newsgroups
newsgroups = fetch_20newsgroups()
print(f'Number of documents: {len(newsgroups.data)}')
print(f'Sample document:\n{newsgroups.data[0]}')
2. 向量化单词
from sklearn.feature_extraction.text import CountVectorizer
count = CountVectorizer()
count.fit(newsgroups.data)
show_vocabulary(count)
print(f'Size of vocabulary: {len(count.get_feature_names_out())}')
def show_vocabulary(vectorizer):
words = vectorizer.get_feature_names_out()
print(f'Vocabulary size: {len(words)} words')
# we can print ~10 words per line
for l in np.array_split(words, math.ceil(len(words) / 10)):
print(''.join([f'{x:<15}' for x in l]))