【机器学习】基于贝叶斯算法对新闻进行分类

import pandas as pd
import jieba
#pip install jieba

数据源:http://www.sogou.com/labs/resource/ca.php

df_news = pd.read_table('./data/val.txt',names=['category','theme','URL','content'],encoding='utf-8')
df_news = df_news.dropna()
df_news.head()

在这里插入图片描述
对数据进行jieba分词

content = df_news.content.values.tolist()

content_S = []
for line in content:
    current_segment = jieba.lcut(line)
    if len(current_segment) > 1 and current_segment != '\r\n': #换行符
        content_S.append(current_segment)

df_content=pd.DataFrame({'content_S':content_S})

stopwords=pd.read_csv("stopwords.txt",index_col=False,sep="\t",quoting=3,names=['stopword'], encoding='utf-8')


去除停用词


def drop_stopwords(contents,stopwords):
    contents_clean = []
    all_words = []
    for line in contents:
        line_clean = []
        for word in line:
            if word in stopwords:
                continue
            line_clean.append(word)
            all_words.append(str(word))
        contents_clean.append(line_clean)
    return contents_clean,all_words
    #print (contents_clean)
        

contents = df_content.content_S.values.tolist()    
stopwords = stopwords.stopword.values.tolist()
contents_clean,all_words = drop_stopwords(contents,stopwords)

词云展示

df_content=pd.DataFrame({'contents_clean':contents_clean})

df_all_words=pd.DataFrame({'all_words':all_words})

words_count=df_all_words.groupby(by=['all_words'])['all_words'].agg({"count":numpy.size})
words_count=words_count.reset_index().sort_values(by=["count"],ascending=False)
words_count.head().values

from wordcloud import WordCloud
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)

wordcloud=WordCloud(font_path="./data/simhei.ttf",background_color="white",max_font_size=80)
word_frequence = {x[0]:x[1] for x in words_count.head(100).values}
wordcloud=wordcloud.fit_words(word_frequence)
plt.imshow(wordcloud)

在这里插入图片描述

LDA :主题模型

格式要求:list of list形式,分词好的的整个语料

from gensim import corpora, models, similarities
import gensim

dictionary = corpora.Dictionary(contents_clean)
corpus = [dictionary.doc2bow(sentence) for sentence in contents_clean]


lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=20)

print (lda.print_topic(1, topn=5))

0.012*"撒" + 0.004*"录制" + 0.003*"航母" + 0.003*"孩子" + 0.002*"说"


for topic in lda.print_topics(num_topics=20, num_words=6):
    print (topic)

(0, '0.008*"万" + 0.005*"号" + 0.004*"N" + 0.004*"转" + 0.004*"V" + 0.003*"L"')
(1, '0.012*"撒" + 0.004*"录制" + 0.003*"航母" + 0.003*"孩子" + 0.002*"说" + 0.002*"中"')
(2, '0.010*"中" + 0.009*"电影" + 0.008*"观众" + 0.006*"说" + 0.005*"导演" + 0.005*"孩子"')
(3, '0.012*"中国" + 0.008*"中" + 0.006*"节目" + 0.004*"卫视" + 0.004*"说" + 0.004*"音乐"')
(4, '0.006*"中国" + 0.004*"中" + 0.004*"发展" + 0.004*"文化" + 0.004*"小说" + 0.003*"市场"')
(5, '0.006*"中" + 0.004*"克" + 0.003*"食物" + 0.003*"万" + 0.003*"P" + 0.003*"维生素"')
(6, '0.004*"万" + 0.003*"号" + 0.003*"装扮" + 0.003*"M" + 0.002*"中" + 0.002*"企业"')
(7, '0.004*"台词" + 0.004*"英格兰" + 0.003*"中" + 0.003*"乌克兰" + 0.003*"球迷" + 0.002*"批"')
(8, '0.006*"说" + 0.004*"中" + 0.004*"麽" + 0.004*"女人" + 0.003*"老公" + 0.003*"离婚"')
(9, '0.026*"e" + 0.025*"a" + 0.017*"o" + 0.017*"n" + 0.017*"i" + 0.016*"r"')
(10, '0.009*"说" + 0.008*"比赛" + 0.006*"网友" + 0.005*"中" + 0.004*"o" + 0.004*"i"')
(11, '0.005*"中" + 0.004*"水" + 0.004*"肌肤" + 0.003*"吃" + 0.003*"说" + 0.003*"喝"')
(12, '0.006*"中" + 0.006*"球队" + 0.005*"选手" + 0.005*"导演" + 0.004*"两人" + 0.004*"V"')
(13, '0.005*"飞行" + 0.004*"葡萄牙" + 0.004*"元" + 0.003*"中" + 0.003*"西班牙" + 0.003*"下半场"')
(14, '0.006*"中" + 0.005*"剧中" + 0.005*"剧组" + 0.004*"剧" + 0.004*"李小璐" + 0.003*"""')
(15, '0.008*"节目" + 0.007*"中" + 0.005*"主持人" + 0.005*"卫视" + 0.004*"说" + 0.004*"中国"')
(16, '0.006*"中" + 0.006*"中国" + 0.005*"T" + 0.005*"S" + 0.003*"I" + 0.003*"比赛"')
(17, '0.008*"该剧" + 0.008*"饰演" + 0.006*"官兵" + 0.006*"中" + 0.004*"部队" + 0.003*"中国"')
(18, '0.006*"男人" + 0.006*"女性" + 0.005*"比基尼" + 0.004*"中" + 0.004*"阿迪达斯" + 0.003*"酸奶"')
(19, '0.013*"男人" + 0.011*"中" + 0.008*"女人" + 0.007*"说" + 0.005*"考生" + 0.004*"工作"')

贝叶斯进行分类

df_train=pd.DataFrame({'contents_clean':contents_clean,'label':df_news['category']})

df_train['label'] = df_train['label'].astype('category').cat.codes

from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(df_train['contents_clean'].values, df_train['label'].values, random_state=1)

#将list of list 格式进行转换成 空格间隔的字符串类型(做词频统计需要)
words = []
for line_index in range(len(x_train)):
    try:
        #x_train[line_index][word_index] = str(x_train[line_index][word_index])
        words.append(' '.join(x_train[line_index]))
    except:
        print (line_index,word_index)
words[0]    


from sklearn.feature_extraction.text import CountVectorizer

vec = CountVectorizer(analyzer='word', max_features=4000,  lowercase = False)
vec.fit(words)

from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(vec.transform(words), y_train)


test_words = []
for line_index in range(len(x_test)):
    try:
        #x_train[line_index][word_index] = str(x_train[line_index][word_index])
        test_words.append(' '.join(x_test[line_index]))
    except:
         print (line_index,word_index)
test_words[0]

classifier.score(vec.transform(test_words), y_test)

0.804

TFidfvectorizer

from sklearn.feature_extraction.text import TfidfVectorizer

vectorizer = TfidfVectorizer(analyzer='word', max_features=4000,  lowercase = False)
vectorizer.fit(words)

from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(vectorizer.transform(words), y_train)

classifier.score(vectorizer.transform(test_words), y_test)

0.8152
  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值