import numpy as np
a1 = -0.9
a2 = 0.5
a3 = 0.7
input1 = np.array([[a1, a2, a3],
[-a1, -a2, -a3]])
# 激活函数
def activation_relu(input):
return np.maximum(0, input)
# 权重生成函数
def create_weights(n_inputs, n_neurons):
return np.random.randn(n_inputs, n_neurons)
def create_bias(n_neurons):
return np.random.randn(n_neurons)
# 定义一个层类
class Layer():
def __init__(self, n_inputs, n_neurons):
self.n_inputs = n_inputs
self.n_neurons = n_neurons
def create_weights(self):
return np.random.randn(self.n_inputs, self.n_neurons)
def create_bias(self):
return np.random.randn(self.n_neurons)
def layer_forward(self, input):
return np.dot(input, self.create_weights()) + self.create_bias()
# 第一层
layer1 = Layer(3, 4)
# 第二层
layer2 = Layer(4, 5)
# 第三层
layer3 = Layer(5, 3)
# 第一层运算
sum1 = layer1.layer_forward(input1)
output1 = activation_relu(sum1)
#第二层运算
sum2 = layer2.layer_forward(output1)
output2 = activation_relu(sum2)
#第三层运算
sum3 = layer3.layer_forward(output2)
output3 = activation_relu(sum3)
print("第一层")
print(sum1)
print(output1)
print("第二层")
print(sum2)
print(output2)
print("第三层")
print(sum3)
print(output3)
03_面向对象的层
最新推荐文章于 2024-08-10 10:46:00 发布