from numpy import *
import numpy
import numpy as np
def f(A,x,b):
return 0.5*np.dot(np.dot(x,A),x)+np.dot(b,x)
def g(A,x,b):
return np.dot(A,x)+b
#
def minimize_cg(A,b, x0, jac=None,gtol=1e-5,maxiter=None,disp=False):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
"""
if maxiter is None:
maxiter = len(x0) * 200
gfk = np.dot(A, x0) + b
k = 0
xk = x0
warnflag = 0
pk = -gfk
gnorm = numpy.amax(numpy.abs(gfk))
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk, gfk)
alpha_k = -np.dot(gfk, pk) / (np.dot(np.dot(pk,A.T ),pk))
xk = xk + alpha_k * pk
gfkp1=np.dot(A, xk) + b
beta_k = max(0, numpy.dot(gfkp1, gfkp1) / deltak)
pk = -gfkp1 + beta_k * pk
gfk = gfkp1
gnorm=numpy.amax(numpy.abs(gfk))//最大值作为范数
k += 1
if warnflag == 0:
# msg = _status_message['success']
if disp:
print('success')
print(" Current function value: " , xk )
print(" Iterations: %d" % k)
print(" Function evaluations: " , f(A,xk,b))
print(" actual value x*: ",-np.dot(np.linalg.inv(A),b))
print(" actual function value f(x*): ",f(A,-np.dot(np.linalg.inv(A),b),b))
if __name__ == '__main__':
x0 = [2, 1]
A=np.array([[4,1],[1,3]])
b=[-1,-2]
g0=g(A,x0,b)
minimize_cg(A,b,x0,g0,disp=True,maxiter=100)
Numerical analysis 共轭梯度法 python 实现
最新推荐文章于 2023-03-08 11:33:10 发布