方法一:损失函数L(ω,b)=−sum(yi * (ω * xi + b))
import numpy as np
def perceptron(xi, yi):
res, leaning_rate, b, w = [], 0.1, 0, np.array([0.5, 0.5]) # res:存储算得的预测值 b:偏置项 w:x的权重
# np.sign(np.dot(w, xi) + b) # 激活函数,输出结果为{-1, +1}, -1为假, +1为真
for i in range(len(yi)):
if yi[i] * (np.dot(w, xi[i]) + b) <= 0:
w += leaning_rate * yi[i] * xi[i] # 对w进行更新
b += leaning_rate * yi[i] # 对b进行更新
for j in range(len(xi)):
y = np.sign(np.dot(w, xi[j]) + b)
res.append(y)
print(f"梯度下降后的权重:{w}, 梯度下降后的偏置项:{b}, 梯度下降后的输出值:{res}")
def AND(): # and运算
x = np.array([[-1, -1], [-1, 1], [1, -1], [1, 1]]) # -1为假 1为真
y = np.array([-1, -1, -1, 1])
return [x, y]
perceptron(AND()[0], AND()[1])
运行结果:
梯度下降后的权重:[0.5 0.5], 梯度下降后的偏置项:-0.2, 梯度下降后的输出值:[-1.0, -1.0, -1.0, 1.0]
方法二:损失函数L(ω,b)=−0.5 * sum((ω * xi + b - yi)**2)
# 方法二
import numpy as np
def perceptron(xi, yi):
count, res, leaning_rate, b, w = 0, [], 0.0000001, 0, np.array([0.5, 0.5]) # b:偏置项 w:x的权重
# np.sign(np.dot(w, xi) + b) # 激活函数,输出结果为{-1, +1}, -1为假, +1为真
for i in range(len(yi)):
while count < 1000000:
J = (1 / 2) * (np.dot(w, xi[i]) + b - yi[i]) ** 2
w += leaning_rate * (np.dot(w, xi[i]) + b - yi[i]) * xi[i]
b += leaning_rate * (np.dot(w, xi[i]) + b - yi[i])
if (J - (1 / 2) * (np.dot(w, xi[i]) + b - yi[i]) ** 2).all() == 0 and \
((np.dot(w, xi[i]) + b - yi[i]) * xi[i]).all() == 0 \
and (np.dot(w, xi[i]) + b - yi[i]).all() == 0 and np.sign(np.dot(w, xi[i]) + b) * yi[i] == 1:
break
count += 1
for j in range(len(xi)):
y = np.sign(np.dot(w, xi[j]) + b)
res.append(y)
print(f"梯度下降后的权重:{w}, 梯度下降后的偏置项:{b}, 梯度下降后的输出值:{res}")
def AND(): # and运算
x = np.array([[-1, -1], [-1, 1], [1, -1], [1, 1]]) # -1为假 1为真
y = np.array([-1, -1, -1, 1])
return [x, y]
perceptron(AND()[0], AND()[1])
运行结果:
梯度下降后的权重w:[0.38338042, 0.61661958]
梯度下降后的偏置项b:0.11661960682579162
梯度下降后的输出值:[-1.0, 1.0, -1.0, 1.0]----结果依旧存在误差