naive_bayes贝叶斯新闻分类 学习笔记

import pandas as pd
import jieba    # 结巴分词器
df_news = pd.read_csv('val.txt',names=['category','theme','URL','content'],encoding='utf-8',sep='\s+')
df_news = df_news.dropna()

content = df_news.content.values.tolist()
content_S = []
for line in content:
	current_segment = jieba.lcut(line)
	if len(current_segment) > 1 and current_segment != '\r\n':
		content_S.append(current_segment)
# print(content_S[1000])

df_content = pd.DataFrame({'content_S': content_S}) # 分完词构造数据
# df_content.head()

# 停用词库
stopwords = pd.read_csv('stopwords.txt',index_col=False,sep='\t',quoting=3,names=['stopword'],encoding='utf-8)

# 过滤掉停用词
def drop_stopwords(contents, stopwords):
	contents_clean = []
	all_words = []
	for line in contents:
		line_clean = []
		for word in line:
			if word in stopwords:
				continue
			line_clean.append(word)
			all_words.append(str(word)) #所有词库
		contents_clean.append(line_clean)
	return contents_clean, all_words

contents = df_content.content_S.values.tolist()
stopwords = stopwords.stopword.values.tolist()
contents_clean, all_words = drop_stopwords(contents, stopwords)

df_content = pd.DataFrame({'contents_clean': contents_clean})
# df_content.head()

df_all_words = pd.DataFrame({'all_words': all_words})
# df_all_words.head()

# 统计词频
import numpy as np
words_count=df_all_words.groupby(by=['all_words'])['all_words'].agg({'count':np.size})
words_count = words_count.reset_index().sort_values(by=['count'], ascending = False)
#words_count.head(10)

from wordcloud import WordCloud
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (10,6)
wordcloud=WordCloud(font_path='simhei.ttf',background_color='white',max_font_size=80)
word_frequence = {x[0]:x[1] for x in words_count.head(100).values}
wordcloud = wordcloud.fit_words(word_frequence)
plt.imshow(wordcloud)

在这里插入图片描述

TF-IDF:提取关键词

import jieba.analyse
index = 2400
# print(df_news['content'][index])
content_S_str = ''.join(content_S[index])
print(' '.join(jieba.analyse.extract_tags(content_S_str, topK=6, withWeight=False)))

耐克 阿迪达斯 欧洲杯 球衣 西班牙 赞助费

LDA:主题模型

格式要求:list of list形式,分词好的的整个语料

from gensim import corpora, models, similarities #语料库,模型,相似性
import gensim
#做映射,相当于词袋
dictionary = corpora.Dictionary(contents_clean)
corpus = [dictionary.doc2bow(sentence) for sentence in contents_clean]
lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=20)
print(lda.print_topic(1, topn=5))
Out:
0.009*"男人" + 0.007*"女人" + 0.007*"中" + 0.007*"说" + 0.004*"学生"

for topic in lda.print_topics(num_topics=20, num_words=5):
    print(topic[1])

……

df_train = pd.DataFrame({'contents_clean':contents_clean, 'label':df_news['category']})
df_train.label.unique()
# array(['汽车', '财经', '科技', '健康', '体育', '教育', '文化', '军事', '娱乐', '时尚'], dtype=object)

label_mapping = {'汽车':1,'财经':2,'科技':3,'健康':4,'体育':5,'教育':6,'文化':7,'军事':8,'娱乐':9,'时尚':0}
df_train['label'] = df_train['label'].map(label_mapping)

from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(df_train['contents_clean'].values,df_train['label'].values, random_state=0)
# print(x_train[0][1])

words = []
for line_index in range(len(x_train)):
    try:
        words.append(' '.join(x_train[line_index]))
    except:
        print(line_index,word_index)
# words[0]

# 文本特征提取-特征数值计算(TF)
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(analyzer='word', max_features=4000, lowercase=False)
cv.fit(words)

from sklearn.naive_bayes import MultinomialNB # 朴素贝叶斯分类器
classifier = MultinomialNB()
classifier.fit(cv.transform(words), y_train)

test_words = []
for line_index in range(len(x_test)):
    try:
        test_words.append(' '.join(x_test[line_index]))
    except:
        print(line_index, word_index)
# test_words[0]

classifier.score(cv.transform(test_words),y_test)

0.8032

# 文本特征提取-(TF-IDF)
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(analyzer='word', max_features=4000, lowercase=False)
vectorizer.fit(words)

classifier = MultinomialNB()
classifier.fit(vectorizer.transform(words), y_train)

classifier.score(vectorizer.transform(test_words),y_test)

0.8128

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值