tf-idf、textrank、lda主题文档模型代码示例

#!/usr/bin/env python

-- coding:utf-8 --

import pandas as pd
“”"
基于 TF-IDF 算法的关键词抽取¶
import jieba.analyse

jieba.analyse.extract_tags(sentence, topK=20, withWeight=False, allowPOS=())
sentence 为待提取的文本
topK 为返回几个 TF/IDF 权重最大的关键词,默认值为 20
withWeight 为是否一并返回关键词权重值,默认值为 False
allowPOS 仅包括指定词性的词,默认值为空,即不筛选
“”"
import jieba.analyse as analyse
import pandas as pd
df = pd.read_csv(“technology_news.csv”, encoding=‘utf-8’)
df = df.dropna()
lines=df.content.values.tolist()
content = “”.join(lines)
print(" ".join(analyse.extract_tags(content, topK=30, withWeight=False, allowPOS=())))

“”"
基于 TextRank 算法的关键词抽取
基本思想:
将待抽取关键词的文本进行分词
以固定窗口大小(默认为5,通过span属性调整),词之间的共现关系,构建图
计算图中节点的PageRank,注意是无向带权图
“”"
import jieba.analyse as analyse
import pandas as pd
df = pd.read_csv(“military_news.csv”, encoding=‘utf-8’)
df = df.dropna()
lines=df.content.values.tolist()
content = “”.join(lines)

print(" “.join(analyse.textrank(content, topK=20, withWeight=False, allowPOS=(‘ns’, ‘n’, ‘vn’, ‘v’))))
print(”---------------------我是分割线----------------")
print(" ".join(analyse.textrank(content, topK=20, withWeight=False, allowPOS=(‘ns’, ‘n’))))

“”"
用LDA主题模型建模;对新的文档进行主题分类
“”"

from gensim import corpora, models, similarities
import gensim
import pandas as pd
stopwords=pd.read_csv(“stopwords.txt”,index_col=False,quoting=3,sep="\t",names=[‘stopword’], encoding=‘utf-8’)
stopwords=stopwords[‘stopword’].values
import jieba
import pandas as pd
df = pd.read_csv(“technology_news.csv”, encoding=‘utf-8’)
df = df.dropna()
lines=df.content.values.tolist()
sentences=[]
for line in lines:
try:
segs=jieba.lcut(line)
segs = filter(lambda x:len(x)>1, segs)
segs = filter(lambda x:x not in stopwords, segs)
sentences.append(list(segs))
except Exception as e:
print(e)
continue

print(type(sentences[5]))

for ele in sentences[5]:

print(“ele:”,ele)

print("------------")
print(sentences[4])
print(type(sentences),type(sentences[4]))

for e in sentences[4]:

print(“e:”,e)

dictionary = corpora.Dictionary(sentences)
corpus = [dictionary.doc2bow(sentence) for sentence in sentences]
print(corpus[5])
print(type(corpus))
print(corpus)
print(type(dictionary))
print(“dictionary:”,dictionary)
lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=20)
#print(lda.print_topic(3, topn=5))
print(“topics :”)
for topic in lda.print_topics(num_topics=20, num_words=8):
print(topic[0],topic[1])
import re
def clean_email_text(text):
text = text.replace(’\n’, " “) # 去掉所有换行符
text = re.sub(r”-", " “, text) # 用空格替换掉所有- 将-连接的2个词分开
text = re.sub(r”\d+/\d+/\d+", " “, text) # 去掉所有的日期 26/2/2019类似这样的格式 +一次或多次
text = re.sub(r”[0-2]?[0-9]:[0-6][0-9]", " “, text) # 去除时间 12:40 ?的0次或者1次
text = re.sub(r”[\w]+@[.\w]+", " “, text) # 取出邮件地址
text = re.sub(r”/[a-zA-Z][😕/][A-Za-z0-9-]+.+[a-zA-Z0-9./%&=-?]+/i", “”, text) # 取出网址
pure_text = “”
for letter in text:
if letter.isalpha() or letter == " ":
pure_text += letter
text = ’ '.join(word for word in pure_text.split() if len(word) > 1)
return text
text1 = “百度 科技 人工智能 机器学习 自动驾驶 互联网 发展 城市 经济 时代”

预处理

text1 = clean_email_text(text1)
print(“before drop stopwords:”,text1)

去停用词

text1 = [word for word in text1.lower().split() if word not in [".",’&’]]

转成数字形式

print(“after drop stopwords:”,text1)
bow = dictionary.doc2bow(text1)
print(lda.get_document_topics(bow))

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小金子的夏天

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值