%最速下降法
clear all
clc
syms x1 x2 a b
Theta_error=0.001;
x0=[3;2];
X = [x1;x2];
f = x1^2+25*x2^2;
df = gradient(f,X);
dfx = subs(df,X,x0); %求函数在x0时的梯度
Fx0 = subs(f,X,x0);
X1 = x0-a.*dfx;
h = X1(1)^2+25*X1(2)^2; %构造函数h=f(x)
dh = gradient(h,a);
a = solve(dh); %求出a
X1 = x0-a.*dfx;
FX1 = subs(f,X,X1);
dfX = subs(df,X,X1); %求函数在X1时的梯度
k=0;
Da=[]; %为迭代中的各个变化创建存储空间
DFx0=[];
DFX1=[];
Dx=[]; %因为x0和X1是二维向量,所以为向量设置两个存储空间分别存储每个元素
Dxx=[];
DX=[];
DXX=[];
while norm(X1-x0)>Theta_error
k=k+1;
DX(k)=X1(1);
Dx(k)=x0(1);
Dxx(k)=x0(2);
DXX(k)=X1(2);
Da(k)=a; %每一次迭代后把变化后的值赋给D
DFx0(k)=Fx0;
DFX1(k)=FX1;
x0=X1; %对x0进行重新赋值,此时的x0相当于X1
Fx0 = FX1;
dfx = dfX;
a=b;
X1 = x0-a.*dfx; %对X1进行重新赋值,此时的X1相当于X2
h = X1(1)^2+25*X1(2)^2;
dh = gradient(h,a);
a = solve(dh);
X1 = x0-a.*dfx;
FX1 = subs(f,X,X1);
dfX = subs(df,X,X1);
end
xx=X1;