# encoding: utf-8 import numpy as np def sigmoid(x): """ Calculate sigmoid """ return 1/(1+np.exp(-x)) def sigmoid_prime(x): """ # Derivative of the sigmoid function """ return sigmoid(x) * (1 - sigmoid(x)) learnrate = 0.5 x = np.array([1, 2, 3, 4]) y = np.array(0.5) # Initial weights w = np.array([0.5, -0.5, 0.3, 0.1]) ### Calculate one gradient descent step for each weight ### Note: Some steps have been consilated, so there are ### fewer variable names than in the above sample code # TODO: Calculate the node's linear combination of inputs and weights 计算线性组合值 h = np.dot(x,w) # TODO: Calculate output of neural network 神经网络输出 nn_output = sigmoid(h) # TODO: Calculate error of neural network 输出误差 error = y-nn_output # TODO: Calculate the error term # Remember, this requires the output gradient, which we haven't # specifically added a variable for. # output gradient (f'(h)) # 输出梯度 输出函数的倒数 output_grad = sigmoid_prime(h) # error term (lowercase delta) error_term = error * output_grad # TODO: Calculate change in weights del_w = learnrate * error_term * x print('Neural Network output:') print(nn_output) print('Amount of Error:') print(error) print('Change in Weights:') print(del_w)
梯度下降算法简单实现
最新推荐文章于 2024-05-18 20:02:41 发布