优化算法.梯度下降算法

 无它,关键思想就是梯度的数学原理

梯度下降算法二维代码

import numpy as np
import matplotlib.pyplot as plt
def f(x):
    return (x - 3)**2

def grad_f(x):
    return 2 * (x - 3)
def gradient_descent(starting_point, learning_rate, num_iterations):
    x = starting_point
    x_history = [x]
    
    for _ in range(num_iterations):
        gradient = grad_f(x)
        x = x - learning_rate * gradient
        x_history.append(x)
    
    return x, x_history
starting_point = 0  # 初始点
learning_rate = 0.1  # 学习率
num_iterations = 20  # 迭代次数

optimal_x, x_history = gradient_descent(starting_point, learning_rate, num_iterations)
# 绘制目标函数
x_values = np.linspace(-1, 5, 400)
y_values = f(x_values)

plt.figure(figsize=(10, 6))
plt.plot(x_values, y_values, label='Objective Function $(x-3)^2$', color='blue')
plt.scatter(x_history, [f(x) for x in x_history], color='red', label='Gradient Descent Steps')
plt.plot(x_history, [f(x) for x in x_history], linestyle='--', color='red')
plt.axvline(x=3, color='green', linestyle='--', label='Minimum x=3')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Gradient Descent Visualization')
plt.legend()
plt.grid()
plt.show()

梯度下降算法三维代码

import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

# 目标函数
def f(x, y):
    return (x - 2)**2 + (y - 3)**2

# 目标函数的梯度
def grad_f(x, y):
    df_dx = 2 * (x - 2)
    df_dy = 2 * (y - 3)
    return df_dx, df_dy

# 梯度下降算法
def gradient_descent(starting_point, learning_rate, num_iterations):
    x, y = starting_point
    x_history, y_history = [x], [y]
    
    for _ in range(num_iterations):
        df_dx, df_dy = grad_f(x, y)
        x = x - learning_rate * df_dx
        y = y - learning_rate * df_dy
        x_history.append(x)
        y_history.append(y)
    
    return (x, y), x_history, y_history

# 参数设置
starting_point = (0, 0)  # 初始点
learning_rate = 0.1  # 学习率
num_iterations = 30  # 迭代次数

# 执行梯度下降
optimal_point, x_history, y_history = gradient_descent(starting_point, learning_rate, num_iterations)

# 绘制目标函数的3D图
x = np.linspace(0, 4, 100)
y = np.linspace(0, 6, 100)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)

fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111, projection='3d')

# 绘制目标函数的表面图
ax.plot_surface(X, Y, Z, cmap='viridis', alpha=0.7)

# 绘制梯度下降的路径
ax.plot(x_history, y_history, f(np.array(x_history), np.array(y_history)), color='red', marker='o')

# 绘制目标函数的最小值位置
ax.scatter(2, 3, f(2, 3), color='green', s=100, label='Minimum (2, 3)')

ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('f(X, Y)')
ax.set_title('3D Gradient Descent Visualization')
ax.legend()

plt.show()

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值