自然语言处理 gensim

这里我需要申明,我写的所有文章,都是为了我自己以后复习用的

一、gensim基本上分为以下三个步骤

Corpora and Vector Spaces //词向量和向量空间

Topics and Transformations //主题变化

Similarity Queries                 //相似性查询

二、一个例子

#step 1
corpus =  [[(0, 1.0), (1, 1.0), (2, 1.0)],
           [(2, 1.0), (3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (8, 1.0)],
           [(1, 1.0), (3, 1.0), (4, 1.0), (7, 1.0)],
           [(0, 1.0), (4, 2.0), (7, 1.0)],
           [(3, 1.0), (5, 1.0), (6, 1.0)],
           [(9, 1.0)],
           [(9, 1.0), (10, 1.0)],
           [(9, 1.0), (10, 1.0), (11, 1.0)],
           [(8, 1.0), (10, 1.0), (11, 1.0)]]
#step 2
from gensim import models
tfidf = models.TfidfModel(corpus)
vec = [(0, 1), (4, 1)]
print(tfidf[vec])
#step 3
from gensim import similarities
index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=12)
sims = index[tfidf[vec]]
print(list(enumerate(sims)))

三、Corpora and Vector Spaces

  • 内存方式

documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]

from pprint import pprint 
from collections import defaultdict
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist] for document in documents]
frequency = defaultdict(int)
for text in texts:
    for token in text:
        frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1] for text in texts]

import gensim.corpora
dictionary = gensim.corpora.Dictionary(texts)
dictionary.save("C:\\Users\\17768\\Desktop\\deerwester.dict")

corpus = [dictionary.doc2bow(text) for text in texts]
#gensim.corpora.MmCorpus.serialize('C:\\Users\\17768\\Desktop\\deerwester.mm', corpus)  #写入
#corpus = corpora.MmCorpus('C:\\Users\\17768\\Desktop\\deerwester.mm') #读入

print(corpus)

  • 磁盘方式

import gensim.corpora
from six import iteritems
stoplist = set('for a of the and to in'.split())
dictionary = gensim.corpora.Dictionary(line.lower().split() for line in open('C:\\Users\\17768\\Desktop\\mycorpus.txt'))
stop_ids = [dictionary.token2id[stopword] for stopword in stoplist if stopword in dictionary.token2id]
once_ids = [tokenid for tokenid, docfreq in iteritems(dictionary.dfs) if docfreq == 1]
dictionary.filter_tokens(stop_ids + once_ids) 
dictionary.compactify()
print(dictionary)

class MyCorpus(object):
    def __iter__(self):
        for line in open('C:\\Users\\17768\\Desktop\\mycorpus.txt'):
            yield dictionary.doc2bow(line.lower().split())

corpus_memory_friendly = MyCorpus()
for vector in corpus_memory_friendly:
    print(vector)

四、Topics and Transformations

import gensim
corpus = gensim.corpora.MmCorpus('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.mm') 
tfidf = gensim.models.TfidfModel(corpus) 
# tfidf.save('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.tfidf') #写入
# tfidf = gensim.models.TfidfModel.load('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.tfidf') #读入

corpus_tfidf = tfidf[corpus]
for doc in corpus_tfidf:
    print(doc)

#model = models.TfidfModel(corpus, normalize=True)
#model = models.LsiModel(tfidf_corpus, id2word=dictionary, num_topics=300)
#model = models.RpModel(tfidf_corpus, num_topics=500)
#model = models.LdaModel(corpus, id2word=dictionary, num_topics=100)
#model = models.HdpModel(corpus, id2word=dictionary)

五、Similarity Queries

import gensim
dictionary = gensim.corpora.Dictionary.load('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.dict')
corpus = gensim.corpora.MmCorpus('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.mm')  
lsi = gensim.models.LsiModel(corpus, id2word=dictionary, num_topics=2)
index = gensim.similarities.MatrixSimilarity(lsi[corpus])
# index.save('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.index') #写入
# index = similarities.MatrixSimilarity.load('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.index') #读入
doc = "Human computer interaction"
vec_bow = dictionary.doc2bow(doc.lower().split())    
vec_lsi = lsi[vec_bow]
sims = index[vec_lsi]
print(list(enumerate(sims)))

六、所有的读写

dict:

#dictionary.save("C:\\Users\\17768\\Desktop\\deerwester.dict")

#dictionary = gensim.corpora.Dictionary.load('C:\\Users\\17768\\Desktop\\deerwester.dict)

corpus:

#gensim.corpora.MmCorpus.serialize('C:\\Users\\17768\\Desktop\\deerwester.mm', corpus)  #写入

#corpus = gensim.corpora.MmCorpus('C:\\Users\\17768\\Desktop\\deerwester.mm') #读入

model:

# tfidf.save('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.tfidf') #写入

# tfidf = gensim.models.TfidfModel.load('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.tfidf') #读入

model index:

# index.save('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.index') #写入

# index = similarities.MatrixSimilarity.load('C:\\Users\\17768\\Desktop\\program\\gensim_\\deerwester.index') #读入

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值