唐焕文版本的课后题要求的代码部分
第三章
1.Newton法
from sympy import *
import math
e = math.e
x = symbols("x") # 符号x,自变量
x0 = 5
acc = 0.001
accCurrent = 9999
while accCurrent > acc: # 看差异值是否达到了精确度要求
f = e ** x - 2 * x - 1
xa = x0 - diff(f, x, 1).subs('x', x0) / diff(f, x, 2).subs('x', x0)
accCurrent = abs(xa - x0) # 每次运行后看一下当前的函数值和之前的函数值的差异 求绝对值
x0 = xa
# 当满足精确度时
print("在x=", end='')
print(float(xa), end='')
print("处")
out = e ** xa - 2 * xa - 1
print("取极小值:", end='')
print(float(out))
2. 0.618法
import math
import numpy as np
a = -2
b = 3
acc = 0.01 # 精度
s = 0.618
f0 = 0
f1 = 0
m = 0
n = 0
# 计算函数值
def fx(x):
return math.e ** (-x) + x**2 # '**'在python中代表乘方运算
def main():
step1()
def step1():
global a, b, f0, f1, m, n
m = a + (1 - s) * (b - a)
f0 = fx(m)
n = a + s * (b - a)
f1 = fx(n)
step2()
def step2():
global a, b, f0, f1, m, n
if (b - a) < acc:
m = 1 / 2 * (a + b)
print("极小点为: ", end='')
print(m)
print("极小值为: ", end='')
print(fx(m))
elif f0 > f1:
step3()
else:
step4()
def step3():
global a, b, f0, f1, m, n
a = m
b = b
m = n
n = a + s * (b - a)
f1 = fx(n)
step5()
def step4():
global a, b, f0, f1, m, n
a = a
b = n
n = m
m = a + (1 - s) * (b - a)
f0 = fx(m)
step5()
def step5():
step2()
if __name__ == '__main__':
main()
3.成功失败法
# 计算函数值
def fx(x):
return x ** 5 + 2 * x ** 4 - 4 * x ** 3 + x ** 2 + x + 2 # '**'在python中代表乘方运算
def main():
h = 0.5 # 初始步长
x0 = -0.5 # 初始点
acc = 0.001 # 精度
x = x0
f0 = fx(x) # 初始值
x = x0 + h
f1 = fx(x) # 搜索后的值
while 1:
print("步长:", end='')
print(h)
print("f0,f1=", end='')
print(f0, f1)
if f0 > f1: # 搜索成功
h = 2 * h # 步长加倍
x = x + h
f0 = f1
f1 = fx(x)
elif f0 < f1: # 搜索失败
if abs(h) < acc:
print("x*=", end='')
print(x)
print("f(x)=", end='')
print(fx(x))
break
else:
h = -h / 4
if __name__ == '__main__':
main()
4. 最速下降法
import numpy as np
from sympy import *
import math
# 定义符号
x1, x2, t = symbols('x1, x2, t')
def func():
# 自定义一个函数
return 3*x1 ** 2 + 2 * x2**2 - 4 * x1 -6*x2
def fx(a,b):
# 自定义一个函数
return 3*a ** 2 + 2 * b**2 - 4 * a -6*b
def grad(data):
# 求梯度向量,data=[data1, data2]
f = func()
grad_vec = [diff(f, x1), diff(f, x2)] # 求偏导数,梯度向量
grad = []
for item in grad_vec:
grad.append(item.subs(x1, data[0]).subs(x2, data[1]))
return grad
def grad_len(grad):
# 梯度向量的模长
vec_len = math.sqrt(pow(grad[0], 2) + pow(grad[1], 2))
return vec_len
def zhudian(f):
# 求得min(t)的驻点
t_diff = diff(f)
t_min = solve(t_diff)
return t_min
def main(X0, theta):
f = func()
grad_vec = grad(X0)
grad_length = grad_len(grad_vec) # 梯度向量的模长
k = 0
data_x = [0]
data_y = [0]
while grad_length > theta: # 迭代的终止条件
k += 1
p = -np.array(grad_vec)
# 迭代
X = np.array(X0) + t*p
t_func = f.subs(x1, X[0]).subs(x2, X[1])
t_min = zhudian(t_func)
X0 = np.array(X0) + t_min*p
grad_vec = grad(X0)
grad_length = grad_len(grad_vec)
print('grad_length', grad_length)
print('坐标', X0[0], X0[1])
data_x.append(X0[0])
data_y.append(X0[1])
a=float(X0[0])
b=float(X0[1])
print("极小值点为:")
print(a, b)
print("极小值为:")
print(fx(a,b))
if __name__ == '__main__':
# 给定初始迭代点和阈值
main([0, 0], 0.001)