1、梯度
梯度是一个向量,表示某一函数在该点处的方向导数沿着该方向取的最大值,即函数在该点处沿着该方向变化最快,变化率最大(即该梯度向量的模);当函数为一维函数的时候,梯度其实就是导数。
2、原理推导
# 原函数
def f(x):
return x**2
# 导数
def h(x):
return 2*x
X=[]
Y=[]
# 初始化起点位置
x=2 # 初始位置
step=0.8 # 步长
f_change = f(x)
f_current = f(x)
X.append(x)
Y.append(f_current)
# 迭代准则:f_change<1e-10 1*(10^-10)=0.0000..01
while f_change>1e-10:
x = x-step*h(x)
tmp = f(x)
f_change = np.abs(f_current-tmp) # 前后两点的高度差
f_current = tmp
X.append(x)
Y.append(f_current)
X2=np.arange(-2.1,2.15,0.05)
Y2=X2**2
plt.plot(X2,Y2,linewidth=2) # y=x^2的图
plt.plot(X,Y,'bo--') # 梯度下降的图
plt.title('$y=x^2$的最小值的坐标为:({},{})'.format(x,f_current))
plt.show()
3、多维更能体现梯度
梯度:函数在该点处沿着该方向变化最快,类比下山,不考虑平衡,当然是越陡峭下山越快。
from mpl_toolkits.mplot3d import Axes3D
# 原函数
def f(x,y):
return x**2+y**2
# 导数
def h(t):
return 2*t
X=[]
Y=[]
Z=[]
# 初始化起点位置
x=2 # 初始位置
y=2
step=0.1 # 步长
f_change = x**2+y**2
f_current = f(x,y)
X.append(x)
Y.append(y)
Z.append(f_current)
# 迭代准则:f_change<1e-10 1*(10^-10)=0.0000..01
while f_change>1e-10:
x = x-step*h(x)
y = y-step*h(y)
f_change = np.abs(f_current-f(x,y)) # 前后两点的高度差
f_current = f(x,y)
X.append(x)
Y.append(y)
Z.append(f_current)
fig = plt.figure()
ax=Axes3D(fig)
X2=np.arange(-2,2,0.2)
Y2=np.arange(-2,2,0.2)
X2,Y2=np.meshgrid(X2,Y2)
Z2=X2**2+Y2**2
#rstride(row)指定行的跨度,设置颜色映射
ax.plot_surface(X2,Y2,Z2,rstride=1,cstride=1,cmap='rainbow')
ax.plot(X,Y,Z,'ro--') # 梯度下降的图
ax.set_title('$z=x^2+y^2$的最小值的坐标为:({},{},{})'.format(x,y,f_current))
plt.show()
4、标准公式
5、案例
# 设置在jupyter中matplotlib的显示情况
%matplotlib tk
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import math
# 解决中文显示问题
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
from mpl_toolkits.mplot3d import Axes3D
# 一维原始图像
def f1(x):
return 0.5 * (x - 0.25) ** 2
# 导函数
def h1(x):
return 0.5 * 2 * (x - 0.25)
# 使用梯度下降法求解
GD_X = []
GD_Y = []
x = 4
alpha = 0.5
f_change = f1(x)
f_current = f_change
GD_X.append(x)
GD_Y.append(f_current)
iter_num = 0
while f_change > 1e-10 and iter_num < 100:
iter_num += 1
x = x - alpha * h1(x)
tmp = f1(x)
f_change = np.abs(f_current - tmp)
f_current = tmp
GD_X.append(x)
GD_Y.append(f_current)
print(u"最终结果为:(%.5f, %.5f)" % (x, f_current))
print(u"迭代过程中X的取值,迭代次数:%d" % iter_num)
print(GD_X)
# 构建数据
X = np.arange(-4, 4.5, 0.05)
Y = np.array(list(map(lambda t: f1(t), X)))
# 画图
plt.figure(facecolor='w')
plt.plot(X, Y, 'r-', linewidth=2)
plt.plot(GD_X, GD_Y, 'bo--', linewidth=2)
plt.title(u'函数$y=0.5 * (θ - 0.25)^2$; \n学习率:%.3f; 最终解:(%.3f, %.3f);迭代次数:%d' % (alpha, x, f_current, iter_num))
plt.show()
# 二维原始图像
def f2(x, y):
return 0.6 * (x + y) ** 2 - x * y
# 导函数
def hx2(x, y):
return 0.6 * 2 * (x + y) - y
def hy2(x, y):
return 0.6 * 2 * (x + y) - x
# 使用梯度下降法求解
GD_X1 = []
GD_X2 = []
GD_Y = []
x1 = 4
x2 = 4
alpha = 0.5
f_change = f2(x1, x2)
f_current = f_change
GD_X1.append(x1)
GD_X2.append(x2)
GD_Y.append(f_current)
iter_num = 0
while f_change > 1e-10 and iter_num < 100:
iter_num += 1
prex1 = x1
prex2 = x2
x1 = x1 - alpha * hx2(prex1, prex2)
x2 = x2 - alpha * hy2(prex1, prex2)
tmp = f2(x1, x2)
f_change = np.abs(f_current - tmp)
f_current = tmp
GD_X1.append(x1)
GD_X2.append(x2)
GD_Y.append(f_current)
print(u"最终结果为:(%.5f, %.5f, %.5f)" % (x1, x2, f_current))
print(u"迭代过程中X的取值,迭代次数:%d" % iter_num)
print(GD_X1)
# 构建数据
X1 = np.arange(-4, 4.5, 0.2)
X2 = np.arange(-4, 4.5, 0.2)
X1, X2 = np.meshgrid(X1, X2)
Y = np.array(list(map(lambda t: f2(t[0], t[1]), zip(X1.flatten(), X2.flatten()))))
Y.shape = X1.shape
# 画图
fig = plt.figure(facecolor='w')
ax = Axes3D(fig)
ax.plot_surface(X1, X2, Y, rstride=1, cstride=1, cmap=plt.cm.jet)
ax.plot(GD_X1, GD_X2, GD_Y, 'bo--',linewidth=2)
ax.set_title(u'函数$y=0.6 * (θ1 + θ2)^2 - θ1 * θ2$;\n学习率:%.3f; 最终解:(%.3f, %.3f, %.3f);迭代次数:%d' % (alpha, x1, x2, f_current, iter_num))
plt.show()
6、分类
一般用SGD,速度足够大了
学习文档
http://python.usyiyi.cn/
https://docs.scipy.org/doc/
http://pandas.pydata.org/pandas-docs/stable/index.html