机器学习-贝叶斯新闻分类

贝叶斯新闻分类


前期数据处理主要用到结巴分词和LDA 建模
代码如下

#!/usr/bin/env python
# coding: utf-8

# In[1]:


import pandas as pd
import numpy as np
import jieba
#pip install jieba


# ### 数据源:http://www.sogou.com/labs/resource/ca.php ###

# In[2]:


df_news = pd.read_table('./data/val.txt',names=['category','theme','URL','content'],encoding='utf-8')
df_news = df_news.dropna()
df_news.head()


# In[3]:


df_news.shape


# ###  分词:使用结吧分词器 ###

# In[4]:


content = df_news.content.values.tolist()
print (content[1000])


# In[5]:


content_S = []
for line in content:    
    current_segment = jieba.lcut(line)
    if len(current_segment) > 1 and current_segment != '\r\n': #换行符
        content_S.append(current_segment)


# In[6]:


content_S[1000]


# In[7]:


df_content=pd.DataFrame({'content_S':content_S})
df_content.head()


# # 停用词表

# In[9]:


stopwords=pd.read_csv("stopwords.txt",index_col=False,sep="\t",quoting=3,names=['stopword'], encoding='utf-8')
stopwords.head(10)


# In[10]:


def drop_stopwords(contents,stopwords):
    contents_clean = []
    all_words = []
    for line in contents:
        line_clean = []
        for word in line:
            if word in stopwords:
                continue
            line_clean.append(word)
            all_words.append(str(word))
        contents_clean.append(line_clean)
    return contents_clean,all_words
    #print (contents_clean)
        

contents = df_content.content_S.values.tolist()    
stopwords = stopwords.stopword.values.tolist()
contents_clean,all_words = drop_stopwords(contents,stopwords)

#df_content.content_S.isin(stopwords.stopword)
#df_content=df_content[~df_content.content_S.isin(stopwords.stopword)]
#df_content.head()


# In[11]:


df_content=pd.DataFrame({'contents_clean':contents_clean})
df_content.head()


# In[12]:


df_all_words=pd.DataFrame({'all_words':all_words})
df_all_words.head()


# In[13]:


df_all_words.head()


# In[14]:


#words_count=df_all_words.groupby(by=['all_words'])['all_words'].agg({"count":np.size})
#words_count=df_all_words.groupby('all_words').agg({'sale':'count'})

#words_count=words_count.reset_index().sort_values(by=["count"],ascending=False)

words_count = df_all_words.groupby('all_words').agg(count=pd.NamedAgg(column='all_words', aggfunc='size')).reset_index().sort_values(
    by='count', ascending=False)
words_count.head()


# In[15]:


from wordcloud import WordCloud
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)

wordcloud=WordCloud(font_path="./data/simhei.ttf",background_color="white",max_font_size=80)
word_frequence = {x[0]:x[1] for x in words_count.head(10000).values}
wordcloud=wordcloud.fit_words(word_frequence)
plt.imshow(wordcloud)


# ###  TF-IDF :提取关键词###

# In[16]:


import jieba.analyse
index = 1000
print (df_news['content'][index])
content_S_str = "".join(content_S[index])  
print ("  ".join(jieba.analyse.extract_tags(content_S_str, topK=10, withWeight=False)))


# ###  LDA :主题模型###
# 
# 格式要求:list of list形式,分词好的的整个语料

# In[17]:


from gensim import corpora, models, similarities
import gensim
#http://radimrehurek.com/gensim/


# In[18]:


#做映射,相当于词袋
dictionary = corpora.Dictionary(contents_clean)
corpus = [dictionary.doc2bow(sentence) for sentence in contents_clean]


# In[19]:


lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=20) #类似Kmeans自己指定K值


# In[20]:


#一号分类结果
print (lda.print_topic(1, topn=5))


# In[30]:


for topic in lda.print_topics(num_topics=20, num_words=5):
    print (topic[1])


# In[31]:


df_train=pd.DataFrame({'contents_clean':contents_clean,'label':df_news['category']})
df_train.tail()


# In[32]:


df_train.label.unique()


# In[33]:


label_mapping = {"汽车": 1, "财经": 2, "科技": 3, "健康": 4, "体育":5, "教育": 6,"文化": 7,"军事": 8,"娱乐": 9,"时尚": 0}
df_train['label'] = df_train['label'].map(label_mapping)
df_train.head()


# In[34]:


from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(df_train['contents_clean'].values, df_train['label'].values, random_state=1)


# In[35]:


#x_train = x_train.flatten()
x_train[0][1]


# In[36]:


words = []
for line_index in range(len(x_train)):
    try:
        #x_train[line_index][word_index] = str(x_train[line_index][word_index])
        words.append(' '.join(x_train[line_index]))
    except:
        print (line_index,word_index)
words[0]        


# In[37]:


print (len(words))


# In[38]:


from sklearn.feature_extraction.text import CountVectorizer
texts=["dog cat fish","dog cat cat","fish bird", 'bird']
cv = CountVectorizer()
cv_fit=cv.fit_transform(texts)

print(cv.get_feature_names())
print(cv_fit.toarray())


print(cv_fit.toarray().sum(axis=0))


# In[39]:


from sklearn.feature_extraction.text import CountVectorizer
texts=["dog cat fish","dog cat cat","fish bird", 'bird']
cv = CountVectorizer(ngram_range=(1,4))
cv_fit=cv.fit_transform(texts)

print(cv.get_feature_names())
print(cv_fit.toarray())


print(cv_fit.toarray().sum(axis=0))


# In[40]:


from sklearn.feature_extraction.text import CountVectorizer

vec = CountVectorizer(analyzer='word', max_features=4000,  lowercase = False)
vec.fit(words)


# In[41]:


from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(vec.transform(words), y_train)


# In[42]:


test_words = []
for line_index in range(len(x_test)):
    try:
        #x_train[line_index][word_index] = str(x_train[line_index][word_index])
        test_words.append(' '.join(x_test[line_index]))
    except:
         print (line_index,word_index)
test_words[0]


# In[43]:


classifier.score(vec.transform(test_words), y_test)


# In[44]:


from sklearn.feature_extraction.text import TfidfVectorizer

vectorizer = TfidfVectorizer(analyzer='word', max_features=4000,  lowercase = False)
vectorizer.fit(words)


# In[45]:


from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB()
classifier.fit(vectorizer.transform(words), y_train)


# In[46]:


classifier.score(vectorizer.transform(test_words), y_test)


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值