阶跃函数
f ( x ) = { 1 x ≥ 0 0 x < 0 f(x)=\left\{ \begin{aligned} 1 & & x\geq 0 \\ 0 & & x<0 \\ \end{aligned} \right. f(x)={10x≥0x<0
import numpy as np
import matplotlib.pylab as plt
def step_function(x):
return np.array(x > 0, dtype=np.int)
X = np.arange(-5.5, 5.0, 0.1)
Y = step_function(X)
plt.plot(X, Y)
plt.ylim(-0.1, 1.1)
plt.show()
sigmoid函数
f ( x ) = 1 1 + e − x f(x)=\frac{1}{1+e^{-x}} f(x)=1+e−x1
def sigmoid(x):
return 1 / (1 + np.exp(-x))
X = np.arange(-5.0, 5.0, 0.1)
Y = sigmoid(X)
plt.plot(X, Y)
plt.ylim(-0.1, 1.1)
plt.show()
relu函数
f ( x ) = { x x ≥ 0 0 x < 0 f(x)=\left\{ \begin{aligned} x & & x\geq 0 \\ 0 & & x<0 \\ \end{aligned} \right. f(x)={x0x≥0x<0
def relu(x):
return np.maximum(0, x)
X = np.arange(-5.0, 5.0, 0.1)
Y = relu(X)
plt.plot(X, Y)
plt.ylim(-1.0, 5.5)
plt.show()
阶跃函数 VS sigmoid函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def step_function(x):
return np.array(x > 0, dtype=np.int)
X = np.arange(-5.0, 5.0, 0.1)
Y1 = sigmoid(X)
Y2 = step_function(X)
plt.plot(X, Y1)
plt.plot(X, Y2, 'k--')
plt.ylim(-0.1, 1.1)
plt.show()
softmax函数
def softmax(x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x) # 溢出操作
return np.exp(x) / np.sum(np.exp(x))
x = np.array([3.0, 1.0, -3.0])
y = softmax(x)
print(y)
np.sum(y)
plt.plot(X, Y)
plt.ylim(0, 1)
plt.show()
交叉熵函数
def cross_entropy_error(y, t):
delta = 1e-7
return -np.sum(t*np.log(y+delta))
t = [1, 0, 0]
cross_entropy_error(np.array(y), np.array(t))
y = [0.9, 0.05, 0.05]
cross_entropy_error(np.array(y), np.array(t))