通过仅向计算机展示一些标记样本来教它学会或问题。
逻辑或(OR):如果一个表达式的一边或另一边为真(或两边都为真),则逻辑或语句的结果为真。
from random import random
import numpy as np
sample_data = [[0, 0], # False, False
[0, 1], # False, True
[1, 0], # True, False
[1, 1]] # True, True
expected_results = [0, # (False OR False) gives False
1, # (False OR True ) gives True
1, # (True OR False) gives True
1] # (True OR True ) gives True
# 阈值
activation_threshold = 0.5
# 随机的权重值
weights = np.random.random(2)/1000 # Small random float 0 < w < .001
print(weights)
# 偏置
bias_weight = np.random.random() / 1000
print(bias_weight)
# 感知机随机预测
# 随机的权重值对这个神经元没有多大帮助,我们得到 1 个正确、3 个错误的预测结果。
for idx, sample in enumerate(sample_data):
input_vector = np.array(sample)
activation_level = np.dot(input_vector, weights) + (bias_weight * 1)
if activation_level > activation_threshold:
perceptron_output = 1
else:
perceptron_output = 0
print('Predicted {}'.format(perceptron_output))
print('Expected: {}'.format(expected_results[idx]))
print()
# 感知机学习
for iteration_num in range(6): # 迭代6次
correct_answers = 0
for idx, sample in enumerate(sample_data):
input_vector = np.array(sample)
weights = np.array(weights)
activation_level = np.dot(input_vector, weights) + (bias_weight * 1)
if activation_level > activation_threshold:
perceptron_output = 1
else:
perceptron_output = 0
if perceptron_output == expected_results[idx]:
correct_answers += 1
new_weights = []
for i, x in enumerate(sample):
# 每个权重是由其输入(xi)更新的。如果输入数据很小或为零,则无论误差大小,该输入对该权重的影响都
# 将会很小。相反,如果输入数据很大,则影响会很大
new_weights.append(weights[i] + (expected_results[idx] - perceptron_output) * x)
# 偏置权重也会随着输入一起更新
bias_weight = bias_weight + ((expected_results[idx] - perceptron_output) * 1)
weights = np.array(new_weights)
# 当一个模型的误差函数达到了最小值,或者稳定在一个值上,该模型就
# 被称为收敛。有时候可能没有这么幸运。有时神经网络在寻找最优权值时不断波动以满足一批数
# 据的相互关系,但无法收敛。
print('{} correct answers out of 4, for iteration {}'.format(correct_answers, iteration_num))