前言:学习机器学习还是要从简单的实践开始吧
原理:
感知机
其中weighted sum就是把输入的元素乘以权重后相加,step function则是一个激活函数,一般用阶跃函数或是sigmod等
正向计算结果函数:Output(x) = 阶越函数(x*W + bias)
反向计算梯度得到更新: w = w+rate * x * (labels - output)
bias = bias + rate * (labels - output)
下面给出分类二维平面上的几个点, BUT这里的感知机只能进行直线的二分类划分,不能完成异或等比较复杂的划分
python3例:
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from functools import reduce
class Perceptron:
def __init__(self, input_num, activator):
self.activator = activator
self.weights = [0.0 for _ in range(input_num)]
self.bias = 0.0
def __str__(self):
return "weiths\t: {0}]\nbias\t: {1}\n".format(self.weights, self.bias)
def predict(self, input_vec):
return self.activator(
reduce(lambda x,y:x+y,
map(lambda x:x[0]*x[1], zip(self.weights, input_vec))
, 0.0) + self.bias
)
def train(self, input_vecs, labels, iteration, rate):
for i in range(iteration):
self._one_iteration(input_vecs, labels, rate)
def _one_iteration(self,input_vecs,labels,rate):
samples = zip(input_vecs, labels)
for input_vec, label in samples:
output = self.predict(input_vec)
self._update_weights(input_vec, output, label, rate)
def _update_weights(self,input_vec, output, label, rate):
delta = label - output
self.weights = list(map(lambda x: x[0] + x[1]*rate*delta, zip(self.weights, input_vec)))
self.bias += rate*delta
def f(x):
return 1 if x>0 else 0
def get_data_set():
x = [[1,1], [0,0], [0,1], [1,0]]
labels = [1, 1, 1 ,0]
return x, labels
def train_and_perceptron():
p = Perceptron(2, f)
input_vecs, labels = get_data_set()
p.train(input_vecs, labels, 100, 0.01)
return p
if __name__ == "__main__":
print ("I am main perceptron!")
p = train_and_perceptron()
print (p)
print ('1 1 -->{}'.format(p.predict((1,1))))
print('1 0 -->{}'.format(p.predict((1, 0))))
print('0 1 -->{}'.format(p.predict((0, 1))))
print ('0 0 -->{}'.format(p.predict((0,0))))