神经网络构建
下面是我构建的一个简单的神经网络,用于我记笔记,原理如图
import tensorflow as tf
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def sigmoid(x):
# 第一层到第二层的激活函数
return 1 / (1 + np.exp(-x))
def deriv_sigmoid(x):
# 第一层到第二层的激活函数的求导函数
fx = sigmoid(x)
return fx * (1 - fx)
def mse_loss(y_true, y_pred):
# 使用方差作为损失函数
return ((y_true - y_pred) ** 2).mean()
class OurNeuralNetwork:
w11 = None
w12 = None
w21 = None
w22 = None
w31 = None
w32 = None
w41 = None
w42 = None
w1 = None
w2 = None
b1 = None
b2 = None
b3 = None
Loss = []
def __init__(self):
self.w11 = np.random.normal()
self.w12 = np.random.normal()
self.w21 = np.random.normal()
self.w22 = np.random.normal()
self.w31 = np.random.normal()
self.w32 = np.random.normal()
self.w41 = np.random.normal()
self.w42 = np.random.normal()
# 第二层到第三层的函数
self.w1 = np.random.normal()
self.w2 = np.random.normal()
# 截距项,Biases
self.b1 = np.random.normal()
self.b2 = np.random.normal()
self.b3 = np.random.normal()
def layer1(self,inputs, in_size, out_size, activation_funcution=None):
# 第一层,输入层,其中inputs为当前层的输入,in_size为上一层神经元数,out_size 为该层神经元个数
#参数为 inputs = [x1 x2 x3 x4],in_size = 1,out_size = 4
weights = tf.Variable(np.array([[self.w11,self.w12],[self.w21,self.w22],[self.w31,self.w32],[self.w41,self.w42]]),dtype=tf.float32)
inputs = tf.Variable(np.array(inputs),dtype=tf.float32)
biases = tf.Variable(np.array([self.b1,self.b2]),dtype=tf.float32)
# 偏置
wx_plus_b = tf.matmul(inputs, weights) + biases
if activation_funcution is None:
outputs = wx_plus_b
else:
outputs = activation_funcution(wx_plus_b)
return outputs
def layer2(self,inputs, in_size, out_size, activation_funcution=None):
# 第二层,输入层,其中inputs为当前层的输入,in_size为上一层神经元数,out_size 为该层神经元个数
#参数为 inputs = [x1 x2 x3 x4],in_size = 4,out_size = 2
if activation_funcution is None:
outputs = inputs
else:
outputs = activation_funcution(inputs)
return outputs
def layer3(self, inputs, in_size, out_size, activation_funcution=None):
# 第三层,输入层,其中inputs为当前层的输入,in_size为上一层神经元数,out_size 为该层神经元个数
# 参数为 inputs = [output1,output2],in_size = 2,out_size = 1
weights = tf.Variable(np.array([[self.w1], [self.w2]])
, dtype=tf.float32)
biases = tf.Variable(np.array([self.b3]), dtype=tf.float32)
# 偏置
wx_plus_b = tf.matmul(inputs, weights) + biases
if activation_funcution is None:
outputs = wx_plus_b
else:
outputs = activation_funcution(wx_plus_b)
return outputs
def feedforward(self,x):
# 前向传播学习
h1=sigmoid(self.w11*x[0]+self.w21*x[1]+self.w31*x[2]+self.w41*x[3]+self.b1)
h2=sigmoid(self.w12*x[0]+self.w22*x[1]+self.w32*x[2]+self.w42*x[3]+self.b2)
o1=self.w1*h1+self.w2*h2+self.b3
return o1
def train(self,length,learn_,data,y_true):
for i in range(length):
print("第%s次训练"%(i+1))
list = []
for x,y in zip(data,y_true):
layer1_y = self.layer1([x],1,4)
layer2_y = self.layer2(layer1_y,4,2,sigmoid)
layer3_y = self.layer3(layer2_y,1,2)
y_pred = np.array(layer3_y).ravel()[0]
layer1_y = np.array(layer1_y).ravel()
layer2_y = np.array(layer2_y).ravel()
sum1 = layer1_y[0]
sum2 = layer1_y[1]
output1 = layer2_y[0]
output2 = layer2_y[1]
loss = y_pred - y
self.w11 -= learn_*loss*self.w1*deriv_sigmoid(sum1)*x[0]
self.w21 -= learn_ * loss * self.w1 * deriv_sigmoid(sum1) * x[1]
self.w31 -= learn_ * loss * self.w1 * deriv_sigmoid(sum1) * x[2]
self.w41 -= learn_ * loss * self.w1 * deriv_sigmoid(sum1) * x[3]
self.w12 -= learn_ * loss * self.w2 * deriv_sigmoid(sum1) * x[0]
self.w22 -= learn_ * loss * self.w2 * deriv_sigmoid(sum1) * x[1]
self.w32 -= learn_ * loss * self.w2 * deriv_sigmoid(sum1) * x[2]
self.w42 -= learn_ * loss * self.w2 * deriv_sigmoid(sum1) * x[3]
self.b1 -= learn_*loss*self.w1*deriv_sigmoid(sum1)
self.b2 -= learn_ * loss * self.w2 * deriv_sigmoid(sum2)
self.w1 -= learn_*loss*output1
self.w2 -= learn_*loss*output2
self.b3 -= learn_*loss
list.append(self.feedforward(x))
list = np.array(list)
z = mse_loss(list,y_true)
self.Loss.append(z)