# -*-coding:utf-8-*-
import gensim
"""
Tutorial 1: Corpora and Vector Spaces
"""
import logging
log = logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
"""
From Strings to Vectors
"""
from gensim import corpora
documents = [
"Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"
]
# 分词,去除停等词
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist] for document in documents]
# 去除频率为1的元素
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1] for text in texts]
from pprint import pprint # pprint美观打印,不用全部输出到同一行中
pprint(texts)
dictionary = corpora.Dictionary(texts)
dictionary.save('deerweater.dict')
print dictionary
print dictionary.token2id
new_doc = 'Human computer interaction'
new_vec = dictionary.doc2bow(new_doc.lower().split())
print new_vec
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('deerwaster.mm', corpus)
for c in corpus:
print c
"""
Corpus Streaming -- One Document at a Time
"""
class MyCorpus(object):
def __iter__(self):
for line in open('dataset/mycorpus.txt'):
yield dictionary.doc2bow(line.lower().split())
corpus_memory_friendly = MyCorpus()
print corpus_memory_friendly
for vector in corpus_memory_friendly:
print vector
from six import iteritems
dictionary = corpora.Dictionary(line.lower().split() for line in open('datasets/mycorpus.txt'))
stop_ids = [dictionary.token2id[stopword] for stopword in stoplist
if stopword in dictionary.token2id]
once_ids = [tokenid for tokenid, docfreq in iteritems(dictionary.dfs) if docfreq == 1]
# remove stop words and words that appear only once
dictionary.filter_tokens(stop_ids + once_ids)
# remove gaps in id sequence after words that were removed
dictionary.compactify()
print(dictionary)
"""
Corpus Format
"""
corpus = [[(1, 0.5)], []]
corpora.MmCorpus.serialize('', corpus)
corpora.SvmLightCorpus.serialize('', corpus)
corpora.BleiCorpus.serialize('', corpus)
corpora.LowCorpus.serialize('', corpus)
corpus = corpora.MmCorpus('corpus.mm')
print corpus
print list(corpus)
for doc in corpus:
print doc
"""
Compatibility with Numpy and Scipy
"""
import gensim
import numpy as np
numpy_matrix = np.random.randint(10, size=[5,2])
corpus = gensim.matutils.Dense2Corpus(numpy_matrix)
numpy_matrix_dense = gensim.matutils.corpus2dense(corpus, num_terms=10)
import scipy.sparse
scipy_sparse_matrix = scipy.sparse.random(5, 2)
corpus = gensim.matutils.Sparse2Corpus(scipy_sparse_matrix)
scipy_csc_matrix = gensim.matutils.corpus2csc(corpus)
文本分析--Gensim向量空间
最新推荐文章于 2022-10-30 14:50:38 发布