前面一篇文章中我介绍了滤波反投影,实际中我们的扫描都是分立而非连续的,因此我们通常需要对投影值进行插值之后再进行滤波反投影,这样能够获得更好的效果。我现在先把代码贴上来,具体的数学过程过几天再详细讲。
#作者:CSDN用户:宋体的微软雅黑
#时间:2020年7月12日
#脚本任务:线性插值之后进行滤波反投影
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import ndimage
from cv2 import cv2
import numba as nb
from numba import jit
def DiscreteRadonTransform(image, steps):
res = np.zeros((len(image[0]), steps), dtype='float64')
for s in range(steps):
rotation = ndimage.rotate(image, -s*180/steps, reshape=False).astype('float64')
res[:,s] = np.sum(rotation, axis=0)
return res
@jit("double[:](int64, int64)", nopython=True, fastmath=False, cache=True)
def SLFilter(N, d):
filterSL = np.zeros(N)
for i in range(N):
filterSL[i] = - 2.0 / (np.pi**2.0 * d**2.0 * (4.0 * (i - N / 2)**2.0 - 1.0))
return filterSL
@jit("double[:](int64, int64)", nopython=True, fastmath=False, cache=True)
def RLFilter(N, d):
filterRL = np.zeros(N)
for i in range(N):
#判断除数是否为0,如果为0,直接开始下一个循环
if np.mod(i - N / 2, 2) == 0:
filterRL[i] = 0
else:
filterRL[i] = - 1.0 / np.power((i - N / 2) * np.pi * d, 2.0)
filterRL[np.int64(N/2)] = 1 / (4 * np.power(d, 2.0))
return filterRL
@jit("double[:, :](int64, int64, double[:, :], double)", nopython=True, fastmath=False, cache=True)
def RLIRadonTransform(steps, pictureSize, projectionValue, delta):
res = np.zeros((pictureSize, pictureSize))
filter = SLFilter(pictureSize, 1)
for step in range(steps):
pm = projectionValue[:, step]
filterPmWithoutCut = np.convolve(filter, pm)#, "same")
#numba仅支持convolve前两个参数,这也就意味着无法将mode设置为"same",
#所以我们需要对卷积的结果进行一个裁剪
lenFirst = len(filter)
lenSecond = len(pm)
lenCore = max(lenFirst, lenSecond)
lenRes = len(filterPmWithoutCut)
sideLen = np.int64((lenRes - lenCore) / 2.0)
firstIndex = np.int64(sideLen)
lastIndex = np.int64(lenRes - 1 - sideLen)
#print(sideLen, firstIndex, lastIndex)
filterPm = filterPmWithoutCut[firstIndex:lastIndex+1]
deg = (step - 1) * delta
bias = (pictureSize / 2.0) * (1 - np.cos(deg) - np.sin(deg))
for row in range(pictureSize):
for col in range(pictureSize):
pos = bias + (col - 1) * np.cos(deg) + (row - 1) * np.sin(deg)
n = np.int64(np.floor(pos))
t = pos - n
n = np.int64(max(0, n))
n = np.int64(min(n, pictureSize - 2))
p = (1 - t) * filterPm[n] + t * filterPm[n+1]
res[np.int64(pictureSize - 1 - row), np.int64(col)] = res[np.int64(pictureSize - 1 - row), np.int64(col)] + p
return res
N = 256
step = 256
theta = np.linspace(0, 180, step, endpoint=False)
print(theta)
delta = np.pi / step
d=1
image = cv2.imread("matlabSheppLogan.png", cv2.IMREAD_GRAYSCALE)
P = DiscreteRadonTransform(image, step)
#P = np.array(image).astype(np.float64)
print(P.shape)
df = pd.DataFrame(P)
df.to_csv("ProjectioValue.csv", header=False, index=False, mode="w")
res = RLIRadonTransform(step, N, P, delta)
plt.figure()
plt.imshow(P, cmap="gray")
plt.figure()
plt.title("SL Filter")
plt.imshow(res, cmap="gray")
plt.savefig("iradonWithoutCorrection.png")
plt.show()
重建结果如下:
可以看到成像的结果略有改善,但是边缘还是有些“伪影”,我推测是因为我自己写的Radon函数的问题,因为我是直接按列求和产生的投影值,相当于探测器宽度和像素边长相等,而MATLAB中的iradon函数得到的投影值数组的列长度大于Phantom的体素的列的数目,这就相当于使用了更小尺寸的探测器。
希望大家批评指正。