1. 感知机学习算法的原始形式
# -*- coding: utf-8 -*-
'''
《李航统计学习方法》
算法2.1(感知机学习算法的原始形式)
e.g.2.2
'''
def Perceptron_Study_basic(x_list, y_list):
w = [0, 0]
b = 0
rate = 1 # 学习率
item = 0 # 迭代次数
while True:
flag = 0
for x, y in zip(x_list, y_list):
if (w[0]*x[0] + w[1]*x[1] + b)*y > 0: # 分类正确
flag += 1
continue
else:
item += 1
deta_w0 = -x[0]*y
deta_w1 = -x[1]*y
deta_b = -y
w[0] -= rate * deta_w0
w[1] -= rate * deta_w1
b -= rate * deta_b
print('第{}次迭代:w = ({},{}), b = {}, 错划分点:({},{})'.format(item, w[0], w[1], b, x[0], x[1]))
break
if flag == len(y_list): # 全部分对,不再调整方程
print('超平面方程为: {}*x1 + {}*x2 + {} = 0'.format(w[0], w[1], b))
break
if __name__ == '__main__':
x_list = [(3,3), (4,3), (1,1)]
y_list = [1, 1, -1]
Perceptron_Study_basic(x_list, y_list)
2. 感知机学习算法的对偶形式
# -*- coding: utf-8 -*-
'''
《李航统计学习方法》
算法2.1(感知机学习算法的对偶形式)
e.g.2.2
'''
import numpy as np
def Gram(x_list):
''' 计算Gram矩阵 '''
G = [[0 for j in range(len(x_list))] for i in range(len(x_list))]
for i in range(len(x_list)):
for j in range(i, len(x_list)):
G[i][j] = x_list[i][0]*x_list[j][0] + x_list[i][1]*x_list[j][1]
G[j][i] = G[i][j]
return G
def Perceptron_Study_antithesis(x_list, y_list):
alpha = np.zeros(len(y_list))
b = 0
rate = 1 # 学习率
item = 0 # 迭代次数
G = Gram(x_list) # 计算Gram矩阵
while True:
flag = 0
for i in range(len(y_list)):
temp = 0
for j in range(len(y_list)):
temp += alpha[j]*y_list[j]*G[j][i]
if y_list[i]*(temp + b) > 0:
flag += 1
continue
else:
item += 1
alpha[i] += rate
b += y_list[i]*rate
print('第{}次迭代:alpha = ({},{}, {}), b = {}, 错划分点:({},{})'.format(item, alpha[0], alpha[1], alpha[2], b, x_list[i][0], x_list[i][1]))
break
if flag == len(y_list): # 全部分对,不再调整方程
w = [0, 0]
for i in range(len(y_list)):
w[0] += alpha[i]*y_list[i]*x_list[i][0]
w[1] += alpha[i]*y_list[i]*x_list[i][1]
print('超平面方程为: {}*x1 + {}*x2 + {} = 0'.format(w[0], w[1], b))
break
if __name__ == '__main__':
x_list = [(3,3), (4,3), (1,1)]
y_list = [1, 1, -1]
Perceptron_Study_antithesis(x_list, y_list)