9.1
>>> import numpy as np
>>> import scipy.linalg as la
>>> A=np.random.randn(200,500)
>>> B=la.toeplitz([x for x in range(500)])
>>> C=A+A
>>> C1=np.dot(A,A.T)
>>> C2=np.dot(A.T,A)
>>> C3=np.dot(A,B)
>>> def func(x):
... I=np.eye(500)
... return np.dot(A,B-x*I)
...
(运行结果太大,没有截图)
9.2
>>> b=[x for x in range(500)]
>>> x=la.solve(B,b)
9.3
>>> f=np.linalg.norm(A)
>>> f
318.0199750736099
>>> i=np.linalg.norm(B,ord=np.inf)
>>> i
124750.0
>>> u,s,v=np.linalg.svd(B)
>>> s.max()
86851.66898467169
>>> s.min()
0.5000049348346858
9.4
import numpy as np
import time
def power_iteration(A):
b_k = np.random.rand(A.shape[1])
b_k1_norm=0
b_k1_norm1=1
count=0
while abs(b_k1_norm-b_k1_norm1)>0.00001:
count+=1
# calculate the matrix-by-vector product Ab
b_k1 = np.dot(A, b_k)
# calculate the norm
b_k1_norm1=b_k1_norm
b_k1_norm = np.linalg.norm(b_k1,ord=np.inf)
# re normalize the vector
b_k = b_k1 / b_k1_norm
return (count,b_k1_norm,b_k)
def test_(n):
Z = np.random.normal(0, 0.5, (n, n))
t0=time.clock()
count,eival,eivec=power_iteration(Z)
t1=time.clock()
print('n:', n)
print('time:',t1-t0)
print('How many iterations:',count)
print('eigenvalue:',eival)
#print('eigenvector:',eivec)
for i in range(100, 300, 30):
test_(i)
print('\n')
运行结果
可见随着n增大,time总体呈上升趋势
9.5
>>> import random as rd
>>> C=[[rd.randint(0,1) for x in range(200)]for y in range(200)]
>>> s=sum([sum(row) for row in C])
>>> p=s/40000
>>> p
0.49785
>>> u,s,v=la.svd(C)
>>> s
array([1.00059150e+02, 1.39855786e+01, 1.36748179e+01, 1.35788346e+01,
1.34128754e+01, 1.30628920e+01, 1.28679471e+01, 1.28084904e+01,
1.25643237e+01, 1.24658129e+01, 1.24275020e+01, 1.21860956e+01,
1.20801190e+01, 1.20358424e+01, 1.18765164e+01, 1.17951672e+01,
...
])
s.max()==100.06,n==200,p==0.49785可见s.max()/n约等于p
9.6
>>> def func1(z,A):
... m,n= A.shape
... A=A.reshape(m*n,1)
... a=A-z
... abs_a=list(map(abs,list(map(float,a))))
... a_arr=np.array(abs_a)
... return float(A[a_arr.argmin()])
...
>>> func1(1,A)
1.0000058195241577
即A中最接近1的元素值