airfoil4755 下载
链接:https://pan.baidu.com/s/1YEtNjJ0_G9eeH6A6vHXhnA
提取码:dwjq
梯度下降
%matplotlib inline
import numpy as np
import torch
import time
from torch import nn, optim
import math
import sys
sys.path.append('/home/kesci/input')
import d2lzh1981 as d2l
一维梯度下降
证明:沿梯度反方向移动自变量可以减小函数值
泰勒展开:
f ( x + ϵ ) = f ( x ) + ϵ f ′ ( x ) + O ( ϵ 2 ) f(x+\epsilon)=f(x)+\epsilon f^{\prime}(x)+\mathcal{O}\left(\epsilon^{2}\right) f(x+ϵ)=f(x)+ϵf′(x)+O(ϵ2)
代入沿梯度方向的移动量 η f ′ ( x ) \eta f^{\prime}(x) ηf′(x):
f ( x − η f ′ ( x ) ) = f ( x ) − η f ′ 2 ( x ) + O ( η 2 f ′ 2 ( x ) ) f\left(x-\eta f^{\prime}(x)\right)=f(x)-\eta f^{\prime 2}(x)+\mathcal{O}\left(\eta^{2} f^{\prime 2}(x)\right) f(x−ηf′(x))=f(x)−ηf′2(x)+O(η2f′2(x))
f ( x − η f ′ ( x ) ) ≲ f ( x ) f\left(x-\eta f^{\prime}(x)\right) \lesssim f(x) f(x−ηf′(x))≲f(x)
x ← x − η f ′ ( x ) x \leftarrow x-\eta f^{\prime}(x) x←x−ηf′(x)
e.g.
f ( x ) = x 2 f(x) = x^2 f(x)=x2
def f(x):
return x**2 # Objective function
def gradf(x):
return 2 * x # Its derivative
def gd(eta):
x = 10
results = [x]
for i in range(10):
x -= eta * gradf(x)
results.append(x)
print('epoch 10, x:', x)
return results
res = gd(0.2)
epoch 10, x: 0.06046617599999997
def show_trace(res):
n = max(abs(min(res)), abs(max(res)))
f_line = np.arange(-n, n, 0.01)
d2l.set_figsize((3.5, 2.5))
d2l.plt.plot(f_line, [f(x) for x in f_line],'-')
d2l.plt.plot(res, [f(x) for x in res],'-o')
d2l.plt.xlabel('x')
d2l.plt.ylabel('f(x)')
show_trace(res)
学习率
show_trace(gd(0.05))
epoch 10, x: 3.4867844009999995
show_trace(gd(1.1))
epoch 10, x: 61.917364224000096
局部极小值
e.g.
f ( x ) = x cos c x f(x) = x\cos cx f(x)=xcoscx
c = 0.15 * np.pi
def f(x):
return x * np.cos(c * x)
def gradf(x):
return np.cos(c * x) - c * x * np.sin(c * x)
show_trace(gd(2))
epoch 10, x: -1.528165927635083
多维梯度下降
∇ f ( x ) = [ ∂ f ( x ) ∂ x 1 , ∂ f ( x ) ∂ x 2 , … , ∂ f ( x ) ∂ x d ] ⊤ \nabla f(\mathbf{x})=\left[\frac{\partial f(\mathbf{x})}{\partial x_{1}}, \frac{\partial f(\mathbf{x})}{\partial x_{2}}, \dots, \frac{\partial f(\mathbf{x})}{\partial x_{d}}\right]^{\top} ∇f(x)=[∂x1∂f(x),∂x2∂f(x),…,∂xd∂f(x)]⊤
f ( x + ϵ ) = f ( x ) + ϵ ⊤ ∇ f ( x ) + O ( ∥ ϵ ∥ 2 ) f(\mathbf{x}+\epsilon)=f(\mathbf{x})+\epsilon^{\top} \nabla f(\mathbf{x})+\mathcal{O}\left(\|\epsilon\|^{2}\right) f(x+ϵ)=f(x)+ϵ⊤∇f(x)+O(∥ϵ∥2)
x ← x − η ∇ f ( x ) \mathbf{x} \leftarrow \mathbf{x}-\eta \nabla f(\mathbf{x}) x←x−η∇f(x)
def train_2d(trainer, steps=20):
x1, x2 = -5, -2
results = [(x1, x2)]
for i in range(steps):
x1, x2 = trainer(x1, x2)
results.append((x1, x2))
print('epoch %d, x1 %f, x2 %f'