朴素贝叶斯属于生成模型,学习数据概率分布P(X,Y),然后求后验概率P(Y|X)。对条件概率分布作条件独立性假设。
模型:贝叶斯定理
策略:后验概率最大化(等价于期望风险最小化)
算法:略
朴素贝叶斯在进行概率估计时有两种方式:基于最大似然估计、基于贝叶斯估计。朴素贝叶斯可以进一步扩展成贝叶斯网络
import numpy as np
def Train(X_train,Y_train,feature):
global class_num,label
class_num=2
label=[1,-1]
feature_len=3
feature=[[1,'S'],[2,'M'],[3,'L']]
prior_probability=np.zeros(class_num)
conditional_probability=np.zeros((class_num,feature_len,2))
pos,neg=0,0
for i in range(len(Y_train)):
if Y_train[i] == 1:
pos+= 1
else:
neg += 1
##计算出P(Y)
prior_probability[0]=pos/len(X_train)
prior_probability[1]=neg/len(X_train)
##统计P(X1,X2|Y),假设X1与X2相互独立,计算P(X1,X2|Y)=P(X1|Y)*P(X2|Y),所以统计X1,X2不同取值对应的不同Y的数量
for i in range(class_num):
for j in range(feature_len):
for k in range(len(Y_train)):
if Y_train[k]==label[i]:
if X_train[k][0]==feature[j][0]:
conditional_probability[i][j][0]+=1
if X_train[k][1]==feature[j][1]:
conditional_probability[i][j][1]+=1
class_label_num=[pos,neg]
##计算P(X1,X2|Y)
for i in range(class_num):
for j in range(feature_len):
conditional_probability[i][j][0]/=class_label_num[i]
conditional_probability[i][j][1]/=class_label_num[i]
return prior_probability,conditional_probability
def Predict(X_test,prior_probability,conditional_probability,feature):
result=np.zeros(len(label))
for i in range(class_num):
fea0,fea1=0,0
for j in range(len(feature)):
if feature[j][0]==X_test[0]:
fea0=conditional_probability[i][j][0]
if feature[j][1]==X_test[1]:
fea1=conditional_probability[i][j][1]
result[i]=fea0*fea1*prior_probability[i]
result=np.vstack([result,label])
return result
def main():
X_train=[[1, 'S'], [1, 'M'], [1, 'M'], [1, 'S'], [1, 'S'],
[2, 'S'], [2, 'M'], [2, 'M'], [2, 'L'], [2, 'L'],
[3, 'L'], [3, 'M'], [3, 'M'], [3, 'L'], [3, 'L']]
Y_train = [-1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1]
feature = [[1, 'S'],
[2, 'M'],
[3, 'L']]
testset = [2, 'S']
prior_probability, conditional_probability = Train(X_train, Y_train, feature)
result = Predict(testset, prior_probability, conditional_probability, feature)
print(result)
if __name__ == '__main__':
main()