一个表达神经网络原理的python demo,源自 http://edu.csdn.net/course/detail/3903 视频;稍作修改。
# SimpleNeuralNet.py
# 简易神经网络,python3.5,依赖numpy库
# 三层神经网络
# X y
# |-| |-| |-|
# |-| W0 |-| W1 |-|
# |-|===>|-|===>|-|
# |-| |-| |-|
# |-| |-| |-|
# l0 l1 l2
# 输入层 中间层 输出层
import numpy as np
# 非线性操作 Sigmoid
def Nonlinear(x, deriv = False):
if(deriv == True):
return x * (1 - x)
return 1 / (1 + np.exp(-x))
# 样本(4, 5) 四个样本,每个样本5个特征
X = np.array([[0,0,1,1,0], #样本1
[0,1,1,1,1], # 样本2
[1,0,1,0,0], # 样本3
[1,1,1,0,1]]) # 样本4
# label (4,1)
y = np.array([[0], # 第一个样本是0类
[1], # 第二个样本是1类
[1], # 第三个样本是1类
[0]]) # 第四个样本是0类
print("X.shape: ", X.shape, "; y.shape: ", y.shape)
np.random.seed(1)
# randomly initialize our weights with mean 0
# 参数初始化、权重
w0 = 2 * np.random.random((5, 4)) - 1
w1 = 2 * np.random.random((4, 1)) - 1
print("w0: ", w0)
print("w1: ", w1)
print("w0.shape: ", w0.shape, "; w1.shape: ", w1.shape)
for j in range(50005):
l0 = X # 输入层
l1 = Nonlinear(np.dot(l0, w0)) # 前向传播
l2 = Nonlinear(np.dot(l1, w1)) # 前向传播
# 计算loss值
l2_error = l2 - y
# 每10000次打印一次
if (j% 10000) == 0:
print("loss: " + str(np.mean(np.abs(l2_error))))
# 反向传播
l2_delta = l2_error * Nonlinear(l2, deriv = True)
l1_error = l2_delta.dot(w1.T)
l1_delta = l1_error * Nonlinear(l1, deriv = True)
# 修正参数w1、w0
w1 -= l1.T.dot(l2_delta)
w0 -= l0.T.dot(l1_delta)