感知机神经元编程-和-sigmoid神经元编程练习

perceptron:

激活函数 image.png 感知机模型:f(x)=sign(w*x+b)


import numpy as np

class Perceptron:
    """
    This class models an artificial neuron with step activation function.
    """
    def __init__(self, weights = np.array([1]), threshold = 0):
        self.weights = weights.astype(float) 
        self.threshold = threshold

    def activate(self, values):
        strength = np.dot(values,self.weights)
        return int(strength > self.threshold)

    def update(self, values, train, eta=.1):

        for data_point in xrange(len(values)):
            prediction = self.activate(values[data_point])
            error = train[data_point] - prediction

            weight_update = values[data_point]*error*eta# TODO
            self.weights += weight_update

def test():

    def sum_almost_equal(array1, array2, tol = 1e-6):
        return sum(abs(array1 - array2)) < tol

    p1 = Perceptron(np.array([1,1,1]),0)
    p1.update(np.array([[2,0,-3]]), np.array([1]))
    assert sum_almost_equal(p1.weights, np.array([1.2, 1, 0.7]))

    p2 = Perceptron(np.array([1,2,3]),0)
    p2.update(np.array([[3,2,1],[4,0,-1]]),np.array([0,0]))
    assert sum_almost_equal(p2.weights, np.array([0.7, 1.8, 2.9]))

    p3 = Perceptron(np.array([3,0,2]),0)
    p3.update(np.array([[2,-2,4],[-1,-3,2],[0,2,1]]),np.array([0,1,0]))
    assert sum_almost_equal(p3.weights, np.array([2.7, -0.3, 1.7]))

if __name__ == "__main__":
    test()

sigmoid:

激活函数:sigmoid:1/(1+exp(x)) image.png

感知机与logistic regression的差别就是感知机激活函数是sign,logistic regression的激活函数是sigmoid 逻辑回归模型:f(x)= sigmoid(w*x+b)


import numpy as np


class Sigmoid:

    def __init__(self, weights = np.array([1])):

        self.weights = weights
        self.last_input = 0 # strength of last input
        self.delta      = 0 # error signal

    def activate(self, values):   
        strength = np.dot(values, self.weights)
        self.last_input = strength

        result = 1/(1+np.exp(-self.last_input))
        return result
    
    def update(self, values, train, eta=.1):

        for X, y_true in zip(values, train):
            y_pred = self.activate(X)
            
            error = y_true - y_pred
            dx = 1/(1+np.exp(-self.last_input))*(1-1/(1+np.exp(-self.last_input)))
            
            dw = eta*error*dx*X
            self.weights += dw

def test():
    def sum_almost_equal(array1, array2, tol = 1e-5):
        return sum(abs(array1 - array2)) < tol

    u1 = Sigmoid(weights=[3,-2,1])
    assert abs(u1.activate(np.array([1,2,3])) - 0.880797) < 1e-5
    
    u1.update(np.array([[1,2,3]]),np.array([0]))
    assert sum_almost_equal(u1.weights, np.array([2.990752, -2.018496, 0.972257]))

    u2 = Sigmoid(weights=[0,3,-1])
    u2.update(np.array([[-3,-1,2],[2,1,2]]),np.array([1,0]))
    assert sum_almost_equal(u2.weights, np.array([-0.030739, 2.984961, -1.027437]))

if __name__ == "__main__":
    test()

转载于:https://my.oschina.net/u/3851199/blog/1942031

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值