# 数值微分
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
def _numerical_gradient_no_batch(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
for idx in range(x.size):
tem_val = x[idx]
x[idx] = float(tem_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tem_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tem_val # 还原值
return grad
def numerical_gradient(f, X):
if X.ndim == 1:
return _numerical_gradient_no_batch(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
grad[idx] = _numerical_gradient_no_batch(f, x)
return grad
def function_2(x):
if x.ndim == 1:
return np.sum(x ** 2)
else:
return np.sum(x ** 2, axis=1)
def tangent_line(f, x):
d = numerical_gradient(f, x)
print(d)
y = f(x) - d * x
return lambda t: d * t + y
if __name__ == '__main__':
x0 = np.arange(-2, 2.5, 0.25)
x1 = np.arange(-2, 2.5, 0.25)
X, Y = np.meshgrid(x0, x1)
X = X.flatten()
Y = Y.flatten()
grad = numerical_gradient(function_2, np.array([X, Y]))
plt.figure()
plt.quiver(X, Y, -grad[0], -grad[1], angles="xy",
color="#666666") # ,headwidth = 10, scale = 40, color = "#444444")
plt.xlim([-2, 2])
plt.ylim([-2, 2])
plt.xlabel('x0')
plt.ylabel('x1')
plt.grid()
plt.legend()
plt.draw()
plt.show()
# 损失函数
# 均方误差
def mean_squared_error(y, t):
return 0.5 * np.sum((y - t)**2)
# 交叉熵误差
# def cross_entropy_error(y, t):
# delta = 1e-7
# return -np.sum(t * np.log(y + delta))
# mini-batch 交叉熵误差
# y:神经网络输出,t:监督数据
def cross_entropy_