一个梯度下降法的python实现
# 梯度下降法,参考 https://blog.csdn.net/pengjian444/article/details/71075544
import numpy as np
def cal_rosenbrock(x1, x2):
"""
计算rosenbrock函数的值
:param x1:
:param x2:
:return:
"""
return (1 - x1) ** 2 + 100 * (x2 - x1 ** 2) ** 2
def cal_rosenbrock_prax(x1, x2):
"""
对x1求偏导
"""
return -2 + 2 * x1 - 400 * (x2 - x1 ** 2) * x1
def cal_rosenbrock_pray(x1, x2):
"""
对x2求偏导
"""
return 200 * (x2 - x1 ** 2)
def for_rosenbrock_func(max_iter_count=100000, step_size=0.001):
pre_x = np.zeros((2,), dtype=np.float32) # 给出x, y初始值,为0,0
loss = 10 # 随便初始化一个数
iter_count = 0
while loss > 0.00001 and iter_count < max_iter_count: # 终止条件 已知loss最小值为0
error = np.zeros((2,), dtype=np.float32)
error[0] = cal_rosenbrock_