刚才是单样本感知器算法,每次迭代时,我们只考虑用一个训练模式修正权矢量,实际上,我们也可用几个训练模式一起考虑,这样可以考虑到代价函数,以及进行分类误差率的控制
本博客所有内容是原创,未经书面许可,严禁任何形式的转载
http://blog.csdn.net/u010255642
我们用PY编写代码如下:
[1 1 8]
-1
[ 1 2 15]
-1
[1 1 3]
1
[1 2 3]
1
[1 1 3]
1
[1 2 3]
1
[1 1 3]
1
[1 2 3]
1
[1 1 3]
1
1 and 3 => 1
2 and 3 => 1
1 and 8 => -1
2 and 15 => -1
9 and 19 => 1
3 and 22 => -1
>>>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
b=1
a=0.5
x = np.array([[1,1,3],[1,2,3],[1,1,8],[1,2,15]])
d =np.array([1,1,-1,-1])
w=np.array([b,0,0])
wucha=0
ddcount=50
def sgn(v):
if v>0:
return 1
else:
return -1
def comy(myw,myx):
return sgn(np.dot(myw.T,myx))
def tiduxz(myw,myx,mya):
i=0
sum_x=np.array([0,0,0])
for xn in myx:
if comy(myw,xn)!=d[i]:
sum_x+=d[i]*xn
print xn
print d[i]
i+=1
return mya*sum_x
i=0
while True:
tdxz=tiduxz(w,x,a)
w=w+tdxz
i=i+1
if abs(tdxz.sum())<=wucha or i>=ddcount:break
for xn in x:
print "%d and %d => %d "%(xn[1],xn[2],comy(w,xn))
test=np.array([1,9,19])
print "%d and %d => %d "%(test[1],test[2],comy(w,test))
test=np.array([1,3,22])
print "%d and %d => %d "%(test[1],test[2],comy(w,test))
-1
[ 1 2 15]
-1
[1 1 3]
1
[1 2 3]
1
[1 1 3]
1
[1 2 3]
1
[1 1 3]
1
[1 2 3]
1
[1 1 3]
1
1 and 3 => 1
2 and 3 => 1
1 and 8 => -1
2 and 15 => -1
9 and 19 => 1
3 and 22 => -1
>>>