import math
from math import log
import numpy as np
import sympy
from sympy import pprint
def lagrange_interpolation(x_known, y_known, x_new):
n = len(x_known)
y_new = 0
for i in range(n):
p = y_known[i]
for j in range(n):
if j != i:
p *= (x_new - x_known[j]) / (x_known[i] - x_known[j])
y_new += p
return y_new
# 插值函数
t = sympy.symbols('t')
def S(t):
return sympy.ln(t)
# 定义已知的数据点
x_known = np.array([10, 11, 12, 13])
y_known = np.array([2.302585, 2.397895, 2.484907, 2.5649])
# 计算新的数据点
x_new = 11.25
y_new = lagrange_interpolation(x_known, y_known, x_new)
print("%d项插值:%f" % (len(x_known), y_new))
print("真实值为:", log(11.25))
f4dx = S(t).diff(t, 4)
# pprint(f4dx)
#计算误差上限
def error(x, n=len(x_known), a=1):
'''
:param x: 插值自变量
:param n: 有效项个数
:param a: 连乘
:return: 朗格朗日余项
'''
global er
njie = math.factorial(n)
for xi in x_known:
a *= x - xi
er = (f4dx / njie) * a
ermin = er.subs(t, x_known.min())
ermax = er.subs(t, x_known.max())
print("误差范围:[{},{}]".format(ermin, ermax))
return ermin, ermax
error(x=x_new)
11-08
1万+
