Python数据分析与机器学习-新闻分类任务

源码下载:

http://download.csdn.net/download/adam_zs/10191189

import pandas as pd
import jieba
import numpy

pd.set_option('display.height', 9999)
pd.set_option('display.max_rows', 9999)
pd.set_option('display.max_columns', 9999)
pd.set_option('display.width', 9999)

df_news = pd.read_table("./data/val.txt", names=['category', 'theme', 'URL', 'content'], encoding="utf-8")
df_news = df_news.dropna()#删除有缺失值的行
# print(df_news.shape)  # (5000, 4)

content = df_news["content"].values.tolist()  # 新闻内容list
# print(content[1000])
content_S = []  # 新闻内容分词之后的list
for line in content:
    current_segment = jieba.lcut(line)
    if len(current_segment) > 1 and current_segment != "\r\n":#换行符
        content_S.append(current_segment)
# print(content_S[1000])

df_content = pd.DataFrame({"content_S": content_S})
# print(df_content.head())

stopwords = pd.read_csv("stopwords.txt", index_col=False, sep="\t", quoting=3, names=['stopword'], encoding='utf-8')


def drop_stopwords(contents, stopwords):
    '''去除新闻中的停用词'''
    contents_clean = []  # 新闻中去掉停用词
    all_words = []  # 所有词汇的集合(不包括停用词)
    for line in contents:
        line_clean = []
        for word in line:
            if word in stopwords:
                continue
            line_clean.append(word)
            all_words.append(word)
        contents_clean.append(line_clean)
    return contents_clean, all_words


contents = df_content["content_S"].values.tolist()
stopwords = stopwords["stopword"].values.tolist()

contents_clean, all_words = drop_stopwords(contents, stopwords)

df_content = pd.DataFrame({"contents_clean": contents_clean})
df_all_words = pd.DataFrame({'all_words': all_words})

words_count = df_all_words.groupby(by=['all_words'])['all_words'].agg({"count": numpy.size})
words_count = words_count.reset_index().sort_values(by=["count"], ascending=False)
# print(words_count.head().values)
'''
[['中' 5199]
 ['中国' 3115]
 ['说' 3055]
 ['S' 2646]
 ['万' 2390]]
'''
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import matplotlib

matplotlib.rcParams['figure.figsize'] = (10.0, 5.0)

wordcloud = WordCloud(font_path="./data/simhei.ttf", background_color="white", max_font_size=80)
word_frequence = {x[0]: x[1] for x in words_count.head(100).values}
wordcloud = wordcloud.fit_words(word_frequence)
plt.imshow(wordcloud)
plt.show()

'''TF-IDF :提取关键词'''
import jieba.analyse

index = 2400
print(df_news['content'][index])
content_S_str = "".join(content_S[index])
print("  ".join(jieba.analyse.extract_tags(content_S_str, topK=5, withWeight=False)))

'''LDA :主题模型'''
from gensim import corpora, models, similarities
import gensim

# 做映射,相当于词袋
dictionary = corpora.Dictionary(contents_clean)
corpus = [dictionary.doc2bow(sentence) for sentence in contents_clean]
lda = gensim.models.ldamodel.LdaModel(corpus=corpus, id2word=dictionary, num_topics=20)  # num_topics需要得到主题的数量
# 一号分类结果
print(lda.print_topic(1, topn=5))  # 第一类主题
for topic in lda.print_topics(num_topics=20, num_words=5):
    print(topic[1])
df_train = pd.DataFrame({'contents_clean': contents_clean, 'label': df_news['category']})
df_train.tail()
df_train.label.unique()
label_mapping = {"汽车": 1, "财经": 2, "科技": 3, "健康": 4, "体育": 5, "教育": 6, "文化": 7, "军事": 8, "娱乐": 9, "时尚": 0}
df_train['label'] = df_train['label'].map(label_mapping)
df_train.head()

from sklearn.cross_validation import train_test_split

x_train, x_test, y_train, y_test = train_test_split(df_train['contents_clean'].values, df_train['label'].values,
                                                    random_state=1)
x_train[0][1]

# 把list转换为字符串
words = []
for line_index in range(len(x_train)):
    try:
        words.append(' '.join(x_train[line_index]))
    except:
        print(line_index)
words[0]

print(len(words))

from sklearn.feature_extraction.text import CountVectorizer

texts = ["dog cat fish", "dog cat cat", "fish bird", 'bird']
cv = CountVectorizer()
cv_fit = cv.fit_transform(texts)

print(cv.get_feature_names())  # 不重复的单词
print(cv_fit.toarray())  # 把文本转换为向量

print(cv_fit.toarray().sum(axis=0))

from sklearn.feature_extraction.text import CountVectorizer

texts = ["dog cat fish", "dog cat cat", "fish bird", 'bird']
cv = CountVectorizer(ngram_range=(1, 4))  # ngram_range把词组合 一般为2
cv_fit = cv.fit_transform(texts)

print(cv.get_feature_names())
print(cv_fit.toarray())

print(cv_fit.toarray().sum(axis=0))

from sklearn.feature_extraction.text import CountVectorizer

vec = CountVectorizer(analyzer='word', max_features=4000, lowercase=False)
vec.fit(words)

# 贝叶斯算法
from sklearn.naive_bayes import MultinomialNB

classifier = MultinomialNB()

test_words = []
for line_index in range(len(x_test)):
    try:
        test_words.append(' '.join(x_test[line_index]))
    except:
        print(line_index)
test_words[0]

classifier.fit(vec.transform(words), y_train)
classifier.score(vec.transform(test_words), y_test)

'''Tf-idf'''
from sklearn.feature_extraction.text import TfidfVectorizer

vectorizer = TfidfVectorizer(analyzer='word', max_features=4000, lowercase=False)
vectorizer.fit(words)
from sklearn.naive_bayes import MultinomialNB

classifier = MultinomialNB()
classifier.fit(vectorizer.transform(words), y_train)

classifier.score(vectorizer.transform(test_words), y_test)



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值