代码:
import numpy as np
from scipy.linalg import toeplitz
from scipy.linalg import svdvals
import time
n = 200
m = 500
A = np.random.normal(size = (n, m))
print(A)
B = toeplitz(range(m))
print(B)
#9.1
print('\nEx 9.1')
def f(A, B):
w = np.linalg.eigvals(B)
C = B - w[0] * np.eye(m)
return np.dot(A, C)
print(A+A)
print(np.dot(A, A.T))
print(np.dot(A.T, A))
print(np.dot(A, B))
print(f(A, B))
#9.2
print('\nEx 9.2')
b = np.random.random(m)
print(np.linalg.solve(B, b))
#9.3
print('\nEx 9.3')
print(np.linalg.norm(A, ord = 'fro'))
print(np.linalg.norm(B, ord = np.inf))
u, s, vh = np.linalg.svd(B)
print(max(s), min(s))
#9.4
print('\nEx 9.4')
'''
borrowed from Wikipedia and modified
'''
def power_iteration(A, num_simulations = 100):
# Ideally choose a random vector
# To decrease the chance that our vector
# Is orthogonal to the eigenvector
# num_simulation for the maximum possible iteration count(if not converge)
b_k = np.random.rand(A.shape[1])
b_k1 = np.zeros(A.shape[1])
b_k1_norm = 0
for iter_count in range(num_simulations):
# calculate the matrix-by-vector product Ab
b_k1 = np.dot(A, b_k)
# calculate the norm
b_k2_norm = b_k1_norm #store the last norm
b_k1_norm = np.linalg.norm(b_k1)
# re-normalize the vector
b_k = b_k1 / b_k1_norm
# convergence judgement
if(abs(b_k1_norm - b_k2_norm) < 0.000005):
break
return b_k1_norm, b_k, iter_count+1
Z = np.random.normal(size = (n, n))
start = time.clock()
val, vec, i = power_iteration(Z)
finish = time.clock()
print("Largest eigenvalue:\n", val)
print("Corresponding eigenvector:\n", vec)
print("Iteration count for convergence: (100 indicates misconvergence)\n", i)
print("Time for iterations:\n", finish - start)
#9.5
print('\nEx 9.5')
C = np.random.randint(0, 2, (n, n))
s = svdvals(B)
print(s)
#9.6
print('\nEx 9.6')
z = np.random.normal(size = 1)
print(z)
i = np.argmin(abs(A-z))
print(A[i//m][i%m])
总结:涉及的函数
NumPy:
eye()
zeros()
dot()
argmin()
numpy.random
random()
normal()
rand()
randint()
numpy.linalg
eig()
eigvals()
solve()
norm()
svd()
SciPy
scipy.linalg
toeplitz()
svdvals()