简单说明
相对于感知机,Adaline算法有趣的多,因为在学习Adaline的过程中涉及到机器学习中一个重要的概念:定义、最小化损失函数。学习Adaline为以后学习更复杂高端的算法(比如逻辑斯蒂回归、SVM等)起到抛砖引玉的作用。
Adaline和感知机的一个重要区别是Adaline算法中权重参数更新按照线性激活函数而不是单位阶跃函数。
实现
下面用python来实现简单的感知机模型和adaline模型
感知机
import numpy as np
class Perceptron(object):
def __init__(self,eta = 0.1,n_iter = 10):
self.eta = eta
self.n_iter = n_iter
def fit(self,X,y):
self.w_ = np.zeros(1+X.shape[1])
self.errors = []
for _ in range(self.n_iter):
errors = 0
for feature, target in zip(X, y):
update =target - self.predict(feature)
self.w_[1:] += update*feature
self.w_[0] += update
errors += int(update != 0.0)
self.errors.append(errors)
def net_input(self,X):
return np.dot(X,self.w_[1:]+self.w_[0])
def predict(self,X):
return np.where(self.net_input(X)>0.0,1,-1)
adaline
import numpy as np
class Adaline(object):
def __init__(self,eta = 0.1,n_iter = 10):
self.eta = eta
self.n_iter = n_iter
def fit(self,X,y):
self.w_ = np.zeros(1+X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
output = self.predit(X)
errors = y - output
self.w_[1:] = self.eta * X.T.dot(errors)
self.w_[0] = self.eta * errors.sum()
cost = (errors ** 2).sum()/2.0
self.cost_.append(cost)
return self
def predit(self,X):
return np.dot(X,self.w_[1:])+self.w_[0]
def predict(self, X):
return np.where(self.predit(X) > 0.0, 1, -1)