一、前馈神经网络
第三章: 编写前馈神经网络的代码:
#Layer Neural Network for Regression
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd import grad
import sklearn.metrics
import pylab
#Generate Dataset 初始化数据集
examples = 1000
features = 100
D = (npr.randn(examples,features),npr.randn(examples))
#specify the network 定义神经网络
layer1_units = 10
layer2_units = 1
w1 = npr.rand(features,layer1_units) #定义w,b
b1 = npr.rand(layer1_units)
w2 = npr.rand(layer1_units,layer2_units)
b2 = 0.0
theta = (w1,b1,w2,b2) #θ
#define the loss function 定义损失函数
def squared_loss(y,y_hat): #y_hat:即为f函数
return np.dot((y-y_hat),(y-y_hat)) #np.dot 矩阵的乘法
#output layer 输出层
def binary_cross_entropy(y,y_hat): #交叉熵
return np.sum(-(y*np.log(y_hat))+(1-y)*np.log(1-y_hat))
#wraper around the NN 包装神经网络
def neural_network(x,theta):
w1,b1,w2,b2 = theta
return np.tanh(np.dot((np.tanh(np.dot(x,w1)+b1)),w2)+b2)
#wrapper around the bojective function to be optimised 最优化
def objective(theta,idx):
return squared_loss(D[1][idx],neural_network(D[0][idx],theta))
#update 更新
def updata_theta(theta,delta,alpha):
w1,b1,w2,b2 = theta
w1_delta,b1_delta,w2_delta,b2_delta = delta #delta 增量 △
w1_new = w1 - alpha * w1_delta
b1_new = b1 - alpha * b1_delta
w2_new = w2 - alpha * w2_delta
b2_new = b2 - alpha * b2_delta
new_theta = (w1_new,w2_new,b1_new,b2_new)
return new_theta
#compute gradient 计算梯度
grad_objective = grad(objective)
#Train the NN 训练神经网络
epochs = 10
print("RMSE before traning:",sklearn.metrics.mean_squared_error(D[1],neural_network(D[0],theta))) #标准误差
rmse = []
for i in range(0,epochs):
for j in range(0,examples):
delta = grad_objective(theta,j)
theta = updata_theta(theta,delta,0.01)
rmse.append(sklearn.metrics.mean_squared_error(D[1],neural_network(D[0],theta)))
print("RMSE after traning",sklearn.metrics.mean_squared_error(D[1],neural_network(D[0],theta)))
pylab.plot(rmse)
pylab.show()
结果:
RMSE before traning: 1.89717170214
RMSE after traning 1.06282114173