共轭梯度法:
QR法:
import numpy as np
import numpy.linalg as nl
def ConjugateGradient(A, b, n, x0):
x = x0
r = b - np.dot(A, x0)
p = r
for i in range(0, n):
a = np.dot(r.transpose(), r) / np.dot(np.dot(p.transpose(), A), p)
tmp1 = a * p
x = x + tmp1
tmp2 = np.dot(a * A, p)
rr = r - tmp2
check = np.dot(A, x)
check = check - b
if (nl.norm(check) / nl.norm(b) < 1e-6):
break
B = np.dot(rr.transpose(), rr) / np.dot(r.transpose(), r)
p = B * p
p = p + rr
r = rr
return x
def QR(A, b, n):
Q = np.zeros([n, n])
cnt = 0
for a in A.transpose():
u = np.copy(a)
for i in range(0, cnt):
u = u - np.dot(np.dot(Q[:, i].transpose(), a), Q[:, i])
e = u / nl.norm(u)
Q[:, cnt] = e
cnt = cnt + 1
R = np.dot(Q.transpose(), A)
x = np.dot(np.dot(nl.inv(R), Q.transpose()), b)
return x
if __name__ == "__main__":
A = np.zeros([100, 100])
b = np.zeros([100,1])
x0 = np.zeros([100,1])
for i in range(0, 100):
A[i][i] = 2
b[i][0] = 1
if i == 0:
A[i][i + 1] = -1
elif i == 99:
A[i][i - 1] = -1
else:
A[i][i + 1] = A[i][i - 1] = -1
result1 = ConjugateGradient(A, b, 100, x0)
print("共轭梯度法:")
print(result1)
result2 = QR(A, b, 100)
print("QR:")
print(result2)