数据准备
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from collections import Counter
import math
# data
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
data = np.array(df.iloc[:100, :])
# print(data)
return data[:,:-1], data[:,-1]
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
在这里插入代码片
核心代码
class NaiveBayes:
def __init__(self):
self.model = None
# 数学期望
@staticmethod
def mean(X):
#X是传入的特征向量(此处为二维)
return sum(X) / float(len(X))
# 标准差(方差)
def stdev(self, X):
avg = self.mean(X)
return math.sqrt(sum([pow(x - avg, 2) for x in X]) / float(len(X)))
# 概率密度函数
def gaussian_probability(self, x, mean, stdev):
exponent = math.exp(-(math.pow(x - mean, 2) /
(2 * math.pow(stdev, 2))))
return (1 / (math.sqrt(2 * math.pi) * stdev)) * exponent
# 处理X_train
def summarize(self, train_data):
summaries = [(self.mean(i), self.stdev(i)) for i in zip(*train_data)]
#此处将X_train(train_data)拆解为100个列表
#将每一个数据,例如[3,4,...],求出它的期望和方差,得到一个新的列表
return summaries
# 分类别求出数学期望和标准差
#fit():训练模型,得到参数
def fit(self, X, y):
labels = list(set(y))
#得到的labels=[0,1](前一百个数据的标签)
#iris[字典]
#df[DataFrame]
#data[ndarray]
#set(y)[set(集合)]
data = {label: [] for label in labels}
#字典推导式 label是key,[]是value
#data:{0.0: [], 1.0: []}
for f, label in zip(X, y):
data[label].append(f)
#这个for循环:data:{0.0: [[1,2,...],[3,4,...],...], 1.0: [[],[],...]}
#model是字典,label做键,[(均值of data1,方差of data1),(均值of data2,方差of data2)]做值
self.model = {
label: self.summarize(value)
for label, value in data.items()
#遍历data
#data.items():
#dict_items([(1, [5,4,6,7,...]), (0, [6,9,1,2,...])])
# summaries:{0.0: [(5.0, 0.37),(3.42, 0.40)], 1.0: [(5.8, 0.449),(2.7, 0.27)]}
#model{0:[(5.0, 0.37),(3.42, 0.40)],1.0:[(5.8, 0.449),(2.7, 0.27)]}
}
return 'gaussianNB train done!'
# 计算概率
def calculate_probabilities(self, input_data):
# summaries:{0.0: [(5.0, 0.37),(3.42, 0.40)], 1.0: [(5.8, 0.449),(2.7, 0.27)]}
# input_data:[1.1, 2.2,3,4]
# input_data是一个多维向量,input_data=x_test
# model.model.items():
# (0.0, [(4.96875, 0.3786798614925278), (3.328125000000001, 0.37519526166384365), (1.4562499999999996, 0.16189792308735776), (0.2468750000000001, 0.09995115994824672)]),
# 1.0, [(5.91578947368421, 0.4676516448616729), (2.78421052631579, 0.30480909969621606), (4.292105263157894, 0.4521187425231565), (1.3447368421052632, 0.19289671141374656)])]
)
#iris数据集有四个特征向量!
probabilities = {}
#这个嵌套for循环对每一个特征向量进行操作
for label, value in self.model.items():
probabilities[label] = 1
#probabilities = {1:1,0:1}
#这个for循环对每一个特征向量,计算它的概率密度
for i in range(len(value)):
mean, stdev = value[i]#来自model的均值和方差
probabilities[label] *= self.gaussian_probability(
input_data[i], mean, stdev)#x_test数据
return probabilities
#probabilities={1:[0.9999,0.8],0:[0.2,0.6]}
# 类别
def predict(self, X_test):
# {0.0: 2.9680340789325763e-27, 1.0: 3.5749783019849535e-26}
label = sorted(
self.calculate_probabilities(X_test).items(),
key=lambda x: x[-1])[-1][0]
return label
def score(self, X_test, y_test):
right = 0
for X, y in zip(X_test, y_test):
label = self.predict(X)
if label == y:
right += 1
return right / float(len(X_test))
for label, value in self.model.items():
probabilities[label] = 1
for i in range(len(value)):
mean, stdev = value[i]
probabilities[label] *= self.gaussian_probability(
input_data[i], mean, stdev)
重点!!:
分label=0遍历一次四元素二元组,和label=1遍历一次四元素二元组。
len(value)=4
>>model.model.items()
>>dict_items(
(0.0, [(4.96875, 0.3786798614925278), (3.328125000000001, 0.37519526166384365), (1.4562499999999996, 0.16189792308735776), (0.2468750000000001, 0.09995115994824672)]),
(1.0, [(5.91578947368421, 0.4676516448616729), (2.78421052631579, 0.30480909969621606), (4.292105263157894, 0.4521187425231565), (1.3447368421052632, 0.19289671141374656)])]
)
>>model.calculate_probabilities([1.11111, 2.2,3,4])
>>{0.0: 0.0, 1.0: 4.691960853801238e-67}
>>import pandas as pd
>>pd.DataFrame(model.model.values())
zip(*a)
- zip(*a)与zip(a)相反,理解为解压
>>> a = list(zip([1,2,3],[4,5,6],[4,5,6,7,8]))
>>> a
[(1, 4, 4), (2, 5, 5), (3, 6, 6)]
>>> c,d,e = zip(*a)
>>> c
(1, 2, 3)
>>> d
(4, 5, 6)
>>> e
(4, 5, 6)
set()
- Python 中 set 基本特点:
(1) 无序性
(2) 确定性
(3) 不重复性 - Python 中 set() 实质:
内部进行 可迭代性的 for 循环 - 创建set:
>>> set([1,2,3])
{1, 2, 3}
>>> set('123')
{'1', '2', '3'}
>>> set()
set() #创建一个空set
>>>set("Hello")
{'H', 'e', 'l', 'o'}
-
增加一个元素:
add()用于增加一个元素值,update():是把要传入的元素拆分,做为个体传入到集合中
update([]),用于增加多个元素值,参数为list,注意如果用add增加多个值,会报参数类型错误。
>>> a = set()
>>> a.add('python')
>>> a
{'python'}
>>> a.add('love')
>>> a
{'love', 'python'}
>>> a.add('i')
>>> a
{'love', 'i', 'python'}
>>> a = set('love')
>>> a
{'l', 'v', 'o', 'e'}
>>> a.update('python')
>>> a
{'h', 'y', 'e', 'n', 'o', 'p', 't', 'l', 'v'}
>>> a.update(['you','me'])
{'e', 'h', 'l', 'me', 'n', 'o', 'p', 't', 'v', 'y', 'you'}
字典的.append()
a = (0,0)
b = ('a','b')
d = dict(zip(b,a))
l = []
for i in [1, 2, 3]:
d['a'] = i
d['b'] = i*2
l.append(d)
print (l)
[{'a': 3, 'b': 6}, {'a': 3, 'b': 6}, {'a': 3, 'b': 6}]
items()
字典里对应的一对键和值以元组的形式(键, 值),存储为所生成序列里的单个元素
favorite_languages = {
'jen':[1,2,3],
'sarah':'c',
'edward':'rby',
'phil':'python',
}
test_items = favorite_languages.items()
print(test_items)
print('\n测试items()方法的返回值是不是list类\n')
print(isinstance(test_items,list))
print('\n测试items()方法的返回值是哪种类\n')
print(type(test_items))
print('\n测试dict_items类的每一个元素是哪种类\n')
for i in test_items:
print(i)
print(type(i))
dict_items([('jen', [1, 2, 3]), ('sarah', 'c'), ('edward', 'rby'), ('phil', 'python')])
测试items()方法的返回值是不是list类
False
测试items()方法的返回值是哪种类
<class 'dict_items'>
测试dict_items类的每一个元素是哪种类
('jen', [1, 2, 3])
<class 'tuple'>
('sarah', 'c')
<class 'tuple'>
('edward', 'rby')
<class 'tuple'>
('phil', 'python')
<class 'tuple'>
dict 妙用
probabilities = {}
probabilities[0] = 1
probabilities[1] = 2
probabilities
{0: 1, 1: 2}
高斯模型
- 高斯分布:正态分布,一种连续型变量的概率分布。
- 高斯概率分布是由均值μ和标准差σ唯一确定的。
在这里插入图片描述
- 在贝叶斯分类中,高斯模型就是用来处理连续型特征变量的,当使用此模型时,我们会假定特征属于高斯分布,然后基于训练样本计算特征均值和标准差,这样就可以得到此特征下每一个属性值的先验概率。