函数实现
关于人工神经网络实现所需要的一些公式代码,包括softmax函数,sigmoid函数,数值微分法计算梯度,误差反向传播发计算梯度,均方误差,交叉熵误差,分段平滑曲线。
import numpy as np
def sigmoid(x): #即使输入数组也可以,广播功能
return 1/(1+np.exp(-x))
def softmax(x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x) # 溢出对策
return np.exp(x) / np.sum(np.exp(x))
def mean_squared_error(y,t):
return 0.5*np.sum((y-t)**2)
def cross_entropy_error(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
# 监督数据是one-hot-vector的情况下,转换为正确解标签的索引
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size
def numerical_diff(f,x):
h = 1e-4
return (f(x+h)-f(x-h))/(2*h)
def numerical_gradient_1(f,x): #求梯度
h = 1e-4
grad = np.zeros_like(x)
for idx in range(x.size):
tem_val = x[idx]
x[idx] = tem_val + h
fxh1 = f(x)
x[idx] = tem_val - h
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tem_val
return grad
# 求梯度
def numerical_gradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
#it是获得的迭代器,在下面循环中会便利每一个元素 idx是元素的索引例如(0,1)
#这样每一个元素都求偏导数就是求梯度
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tmp_val # 还原值
it.iternext()
return grad
#梯度下降法调参数
def gradient_descent(f,init_x,lr = 0.01,step_num = 100):
x = init_x
for i in range(step_num):
grad = numerical_gradient(f,x)
x = x-lr*grad
return x
def sigmoid_grad(x):
return (1.0 - sigmoid(x)) * sigmoid(x)
def smooth_curve(x):
"""分段平滑曲线,用于使损失函数的图形变圆滑 """
window_len = 11
s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
w = np.kaiser(window_len, 2)
y = np.convolve(w/w.sum(), s, mode='valid')
return y[5:len(y)-5]