import numpy as np
a1 = 0.3
a2 = 0.5
a3 = 0.7
inputs = np.array([[a1, a2, a3],
[-a1, -a2, -a3]])
def activation_relu(input):
return np.maximum(0, input)
def activation_softmax(input):
max_values = np.max(input, axis=1, keepdims=True)# 取每一行的最大值并返回一个长度为样本量的向量
slide_inputs = input - max_values# input的每一个数减去每一行的最大值后最大值为0,其余为负数,指数函数的性质即横坐标距离相同,数值比值不变,有利于防止指数爆炸
exp_values = np.exp(slide_inputs)
exp_sums = np.sum(exp_values, axis=1, keepdims=True)
return exp_values / exp_sums# 每一行每一个数在整行的概率
# print(activation_softmax(inputs))
# 将数据进行标准化,避免梯度爆炸或梯度消失
def normalize(array):
max_number = np.max(np.absolute(array), axis=1, keepdims=True)
scale_rate = np.where(max_number == 0, 1, 1/max_number)
norm = array * scale_rate
return norm
class Layer():
def __init__(self, n_inputs, n_neurons):
self.weights = np.random.randn(n_inputs, n_neurons)
self.biases = np.random.randn(n_neurons)
def layer_forward(self, inputs):
output = np.dot(inputs, self.weights) + self.biases
# output = activation_relu(sum1)
return output
class Net():
def __init__(self, Net_shape):
self.shape = Net_shape
self.layers = []
for i in range(len(self.shape)-1):
layer = Layer(self.shape[i], self.shape[i+1])
self.layers.append(layer)
def net_forward(self, inputs):
outputs = [inputs]
for i in range(len(self.layers)):
if(i < len(self.layers) - 1):
output = activation_relu(self.layers[i].layer_forward(outputs[i]))
print(output)
output = normalize(output)
print(output)
else:
output = activation_softmax(self.layers[i].layer_forward(outputs[i]))
outputs.append(output)
print(outputs)
return(outputs)
net1_shape = [3, 4, 5, 3]
net1 = Net(net1_shape)
net1.net_forward(inputs)
05_softmax激活函数+数据标准化
最新推荐文章于 2024-07-12 16:16:27 发布