sklearn速度复习-贝叶斯

#贝叶斯
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.naive_bayes import MultinomialNB,BernoulliNB,GaussianNB
iris = datasets.load_iris()# 载入数据
x_train,x_test,y_train,y_test = train_test_split(iris.data, iris.target) 
mul_nb = GaussianNB()
mul_nb.fit(x_train,y_train)
print(classification_report(mul_nb.predict(x_test),y_test))
print(confusion_matrix(mul_nb.predict(x_test),y_test))

 

#新闻分类
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
news = fetch_20newsgroups(subset='all')
print(news.target_names)
print(len(news.data))#18864篇文章
print(len(news.target))#分成20类
print(len(news.target_names))#20种的类名
news.data[0]#其中一种类的解析
print(news.target[0])
print(news.target_names[news.target[0]])
x_train,x_test,y_train,y_test = train_test_split(news.data,news.target)
# train = fetch_20newsgroups(subset='train')
# x_train = train.data
# y_train = train.target
# test = fetch_20newsgroups(subset='test')
# x_test = test.data
# y_test = test.target
#CountVectorizer方法构建单词的字典,每个单词实例被转换为特征向量的一个数值特征,每个元素是特定单词在文本中出现的次数 
from sklearn.feature_extraction.text import CountVectorizer
texts=["dog cat fish","dog cat cat","fish bird", 'bird']
cv = CountVectorizer()
cv_fit=cv.fit_transform(texts)
print(cv.get_feature_names())
print(cv_fit.toarray())
print(cv_fit.toarray().sum(axis=0))
from sklearn import model_selection 
from sklearn.naive_bayes import MultinomialNB
cv = CountVectorizer()
cv_data = cv.fit_transform(x_train)
mul_nb = MultinomialNB()
scores = model_selection.cross_val_score(mul_nb, cv_data, y_train, cv=3, scoring='accuracy')  
print("Accuracy: %0.3f" % (scores.mean())) 
#Frequency (TF-IDF)。这是一个衡量一个词在文本或语料中重要性的统计方法。直觉上讲,该方法通过比较在整个语料库的词的频率,寻求在当前文档中频率较高的词。这是一种将结果进行标准化的方法,可以避免因为有些词出现太过频繁而对一个实例的特征化作用不大的情况(我猜测比如a和and在英语中出现的频率比较高,但是它们对于表征一个文本的作用没有什么作用)
from sklearn.feature_extraction.text import TfidfVectorizer
# 文本文档列表
text = ["The quick brown fox jumped over the lazy dog.",
"The dog.",
"The fox"]
# 创建变换函数
vectorizer = TfidfVectorizer()
# 词条化以及创建词汇表
vectorizer.fit(text)
# 总结
print(vectorizer.vocabulary_)
print(vectorizer.idf_)
# 编码文档
vector = vectorizer.transform([text[0]])
# 总结编码文档
print(vector.shape)
print(vector.toarray())
# 创建变换函数
vectorizer = TfidfVectorizer()
# 词条化以及创建词汇表
tfidf_train = vectorizer.fit_transform(x_train)
scores = model_selection.cross_val_score(mul_nb, tfidf_train, y_train, cv=3, scoring='accuracy') 
print("Accuracy: %0.3f" % (scores.mean())) 
def get_stop_words():
    result = set()
    for line in open('stopwords_en.txt', 'r').readlines():
        result.add(line.strip())
    return result
# 加载停用词
stop_words = get_stop_words()
# 创建变换函数
vectorizer = TfidfVectorizer(stop_words=stop_words)
mul_nb = MultinomialNB(alpha=0.01)
# 词条化以及创建词汇表
tfidf_train = vectorizer.fit_transform(x_train)
scores = model_selection.cross_val_score(mul_nb, tfidf_train, y_train, cv=3, scoring='accuracy') 
print("Accuracy: %0.3f" % (scores.mean())) 
# 切分数据集
tfidf_data = vectorizer.fit_transform(news.data)
x_train,x_test,y_train,y_test = train_test_split(tfidf_data,news.target)
mul_nb.fit(x_train,y_train)
print(mul_nb.score(x_train, y_train))
print(mul_nb.score(x_test, y_test))



#拼写检查器
import re
# 读取内容
text = open('big.txt').read()
# 转小写,只保留a-z字符
text = re.findall('[a-z]+', text.lower())
dic_words = {}
for t in text:
    dic_words[t] = dic_words.get(t,0) + 1
print(dic_words)
# 字母表
alphabet = 'abcdefghijklmnopqrstuvwxyz'
#返回所有与单词 word 编辑距离为 1 的集合
def edits1(word):
    n = len(word)
    return set([word[0:i]+word[i+1:] for i in range(n)] +                     # deletion
               [word[0:i]+word[i+1]+word[i]+word[i+2:] for i in range(n-1)] + # transposition
               [word[0:i]+c+word[i+1:] for i in range(n) for c in alphabet] + # alteration
               [word[0:i]+c+word[i:] for i in range(n+1) for c in alphabet])  # insertion
apple = 'apple'
apple[0:0] + apple[1:]
apple = 'apple'
apple[0:1] + apple[2:]
apple = 'apple'
apple[0:2] + apple[3:]
apple = 'apple'
apple[0:3] + apple[4:]
apple = 'apple'
apple[0:4] + apple[5:]
#返回所有与单词 word 编辑距离为 2 的集合
#在这些编辑距离小于2的词中间, 只把那些正确的词作为候选词
def edits2(word):
    return set(e2 for e1 in edits1(word) for e2 in edits1(e1))
e1 = edits1('something')
e2 = edits2('something')
len(e1) + len(e2)
def known(words):
    w = set()
    for word in words:
        if word in dic_words:
            w.add(word)
    return w

# 先计算编辑距离,再根据编辑距离找到最匹配的单词
def correct(word):
    # 获取候选单词
    #如果known(set)非空, candidates 就会选取这个集合, 而不继续计算后面的
    candidates = known([word]) or known(edits1(word)) or known(edits2(word)) or word
    # 字典中不存在相近的词
    if word == candidates:
        return word
    # 返回频率最高的词
    max_num = 0
    for c in candidates:
        if dic_words[c] >= max_num:
            max_num = dic_words[c]
            candidate = c
    return candidate
correct('smoothig')
correct('battl')
correct('learww')
correct('dagsgasdfeg')

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值