import numpy as np
from scipy import optimize
from scipy.special import expit, logit
from scipy.stats import logistic
from sklearn import base, metrics
from sklearn.utils.validation import check_X_y
"""expit就是sigmoid函数"""
print(expit([-np.inf, -5.0,-1.0, 0.,1.,5.0,np.inf]))
"""logit is the inverse of expit:"""
print(logit(expit([-np.inf, -5.0,-1.0, 0.,1.,5.0,np.inf])))
sigmoid 函数
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import expit, logit
A = np.linspace(-10,10, 100)
H = expit(A)
代码:
import numpy as np
import matplotlib.pyplot as plt
def MarginFunction(x):
return 1 / (1 + np.exp(-x))
x = np.linspace(-10,10,100)
y = MarginFunction(x)
plt.scatter(x,y,c=y,marker='.')
plt.grid()
plt.show()
重要等式
===============================================================
在机器学习中,sigmoid函数经常用于构建损失函数(log_loss)。
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import expit, logit
def log_loss(Z):
# stable computation of the logistic loss
idx = Z > 0
out = np.zeros_like(Z)
out[idx] = np.log(1 + np.exp(-Z[idx]))
out[~idx] = (-Z[~idx] + np.log(1 + np.exp(Z[~idx])))
return out
A = np.linspace(-10,10, 100)
loss = -np.log(expit(A))
plt.scatter(A, loss)
plt.show()
代码
import numpy as np
import matplotlib.pyplot as plt
def MarginFunction(x):
return 1 / (1 + np.exp(-x))
x = np.linspace(-10,10,100)
y = MarginFunction(x)
y = -np.log2(y)
plt.scatter(x,y,c=y,marker='.')
plt.ylabel("L(z)")
plt.xlabel("z")
plt.grid()
plt.show()
sigmoid函数求导过程
对数 sigmoid函数求导
log_loss的导数(即负的对数sigmoid函数的导数)
3. logistic loss and zero-one loss
we can see that the logistic loss does not upper bound the zero-one loss
import numpy as np
import matplotlib.pyplot as plt
def MarginFunction(x):
return 1 / (1 + np.exp(-x))
x = np.linspace(-3,3,100)
y = MarginFunction(x)
y = -np.log(y)
y2 = np.zeros_like(x)
for i, ele in enumerate(x):
if ele < 0:
y2[i] = 1
else:
y2[i] = 0
plt.scatter(x,y,c=y,marker='.')
plt.scatter(x, y2, c="r", marker=".")
plt.ylabel("L(z)")
plt.xlabel("z")
plt.grid()
plt.show()
4. the square error with sigmoid function VS the zero-one error
import numpy as np
import matplotlib.pyplot as plt
def pi(x):
return 1 / (1 + np.exp(-x))
x = np.linspace(-10,10,1000)
y = pi(x)
# plt.scatter(x,y,c=y,marker='.')
# plt.grid()
# plt.show()
X = pi(x)
Y = np.zeros_like(X)
for i, ele in enumerate(X):
if ele < 0.5:
Y[i] = 1
else:
Y[i] = 0
Y2 = np.zeros_like(X)
for i, ele in enumerate(X):
if ele < 0.5:
Y2[i] = (1-ele)**2
else:
Y2[i] = (ele - 0)**2
plt.scatter(X,Y, c="b", marker=".")
plt.scatter(X,Y2, c="r", marker=".")
plt.grid()
plt.show()