# encoding:utf-8
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
import math
class NaiveBayes:
def __init__(self):
self.model = None
"""数学期望"""
"""@staticmethod不需要表示自身对象的self和自身类的cls参数,就跟使用函数一样"""
@staticmethod
def mean(x):
return sum(x) / float(len(x))
"""标准差(方差)"""
def stdev(self, x):
avg = self.mean(x)
return math.sqrt(sum([pow(k - avg, 2) for k in x]) / float(len(x)))
"""概率密度函数"""
def gaussian_probability(self, x, mean, stdev):
exponent = math.exp(-(math.pow(x - mean, 2) / (2 * math.pow(stdev, 2))))
return (1 / (math.sqrt(2 * math.pi * math.pow(stdev, 2)))) * exponent
"""处理x_train"""
def summarize(self, train_data):
"""分别计算x_train的平均值(期望),标准差(方差)"""
"""
>>>a = [1,2,3]
>>> b = [4,5,6]
>>> c = [4,5,6,7,8]
>>> zipped = zip(a,b) # 打包为元组的列表
[(1, 4), (2, 5), (3, 6)]
>>> zip(a,c) # 元素个数与最短的列表一致
[(1, 4), (2, 5), (3, 6)]
>>> zip(*zipped) # 与 zip 相反,*zipped 可理解为解压,返回二维矩阵式
[(1, 2, 3), (4, 5, 6)]
"""
sumaries = [(self.mean(i), self.stdev(i)) for i in zip(*train_data)]
return sumaries
"""分类别求出数学期望和标准差"""
def fit(self, x, y):
"""删除y中重复地元素"""
labels = list(set(y))
data = {label: [] for label in labels}
for f, label in zip(x, y):
data[label].append(f)
self.model = {
label: self.summarize(value)
for label, value in data.items()
}
return "gaussianNB train done!"
"""计算概率"""
def calculate_probabilities(self, input_data):
probabilities = {}
for label, value in self.model.items():
probabilities[label] = 1
for i in range(len(value)):
mean, stdev = value[i]
probabilities[label] *= self.gaussian_probability(input_data[i], mean, stdev)
return probabilities
"""类别"""
def predict(self, x_test):
label = sorted(self.calculate_probabilities(x_test).items(),
key=lambda x: x[-1])[-1][0]
return label
def score(self, x_test, y_test):
right = 0
for x, y in zip(x_test, y_test):
label = self.predict(x)
if label == y:
right += 1
return right / float(len(x_test))
"""数据"""
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
data = np.array(df.iloc[:100, :])
return data[:, :-1], data[:, -1]
x, y = create_data()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
model = NaiveBayes()
string = model.fit(x_train, y_train)
print(string)
print(model.predict([4.4, 3.2, 1.3, 0.2]))
print(model.score(x_test, y_test))
"""scikit-learn实例"""
clf = GaussianNB()
clf.fit(x_train, y_train)
print(clf.score(x_test, y_test))
print(clf.predict([[4.4, 3.2, 1.3, 0.2]]))
《统计学习方法》第4章_朴素贝叶斯法
最新推荐文章于 2022-07-16 16:10:12 发布