TPS 样板条插值算法矫正图像 TPS 具体用法

#TPS用法
TPS在opencv contribute 里面使用的时候需要自己把源码和contrib 一块编译进去
TPS主要的使用步骤是首先确定上下矫正的点 对于弯曲文本将上边沿点拉到同一水平位置,
使用注意事项是上下边沿点最好确定在同一x坐标上 否则会出现矫正后图片畸形的情况

参考代码:

‘’‘using namespace std;
cv::Mat trans_img(cv::Mat src_img, std::vectorcv::Point2f sourcePoints, std::vectorcv::Point2ftargetPoints)
{
auto tps = cv::createThinPlateSplineShapeTransformer();
std::vectorcv::DMatch matches;
for (unsigned int i = 0; i < sourcePoints.size(); i++)
matches.push_back(cv::DMatch(i, i, 0));

tps->estimateTransformation(targetPoints, sourcePoints, matches);

//std::vector<cv::Point2f> transPoints;
//tps->applyTransformation(sourcePoints, transPoints);

cout << "sourcePoints = " << endl << " " << sourcePoints << endl << endl;
cout << "targetPoints = " << endl << " " << targetPoints << endl << endl;
//cout << "transPos = " << endl << " " << transPoints << endl << endl;
cv::Mat res(600, 600, CV_8UC3, cv::Scalar(0, 0, 0));
cv::Mat roi = res(cv::Rect(0, 0, 495, 152));
src_img.copyTo(roi);
cv::Mat re;
tps->warpImage(src_img, re);
return re;

}
int main(char* arg, char** argv)
{
using clock = std::chrono::system_clock;
using ms = std::chrono::milliseconds;

cv::Mat img = cv::imread("F:\\cppcode\\data\\0001682_6.jpg");

if (img.empty())
{
	cout << "read failed" << endl;
}
int imgH = img.rows;
int imgW = img.cols;
int imgC = img.channels();
//int imgC = image.channels;
cout  << imgC << imgH << imgW << endl;

int objH = 20;
int objW = 400;
//cv::Mat outimg(2, 2, CV_8UC3, cv::Scalar(0, 0, 0));
const auto before = clock::now();
std::vector<cv::Point2f> sourcePoints, targetPoints;
sourcePoints.push_back(cv::Point2f(88, 20));
sourcePoints.push_back(cv::Point2f(149, 2));
sourcePoints.push_back(cv::Point2f(210, 0));
sourcePoints.push_back(cv::Point2f(264, 7));
sourcePoints.push_back(cv::Point2f(320, 27));
sourcePoints.push_back(cv::Point2f(75, 84));
sourcePoints.push_back(cv::Point2f(113, 61));
sourcePoints.push_back(cv::Point2f(153, 46));
sourcePoints.push_back(cv::Point2f(203, 40));
sourcePoints.push_back(cv::Point2f(257, 57));
sourcePoints.push_back(cv::Point2f(290, 72));*/
sourcePoints.push_back(cv::Point2f(8, 59));
sourcePoints.push_back(cv::Point2f(90, 42));
sourcePoints.push_back(cv::Point2f(190, 6));
sourcePoints.push_back(cv::Point2f(330, 42));
sourcePoints.push_back(cv::Point2f(443, 72));
sourcePoints.push_back(cv::Point2f(19, 99));
sourcePoints.push_back(cv::Point2f(103, 78));
sourcePoints.push_back(cv::Point2f(196, 52));
sourcePoints.push_back(cv::Point2f(317, 80));
sourcePoints.push_back(cv::Point2f(429, 106));
/*targetPoints.push_back(cv::Point2f(0,2));
targetPoints.push_back(cv::Point2f(85, 2));
targetPoints.push_back(cv::Point2f(183, 2));
targetPoints.push_back(cv::Point2f(336, 2));
targetPoints.push_back(cv::Point2f(448, 2));
targetPoints.push_back(cv::Point2f(0, 60));
targetPoints.push_back(cv::Point2f(85, 60));
targetPoints.push_back(cv::Point2f(183, 60));
targetPoints.push_back(cv::Point2f(336, 60));
targetPoints.push_back(cv::Point2f(446, 60));*/


targetPoints.push_back(cv::Point2f(sourcePoints[0].x,2));
for (int i = 0; i < sourcePoints.size() / 2 -1  ; i++)
{
	float tmpx = sourcePoints[i].x + sqrt((sourcePoints[i + 1].x - sourcePoints[i].x)
		* (sourcePoints[i + 1].x - sourcePoints[i].x) + (sourcePoints[i + 1].y - sourcePoints[i].y)
		* (sourcePoints[i + 1].y - sourcePoints[i].y));
	float tmpy = 2;
	targetPoints.push_back(cv::Point2f(tmpx, tmpy));
}
targetPoints.push_back(cv::Point2f(sourcePoints[sourcePoints.size() / 2 ].x,60));
for (int i = sourcePoints.size()/2 ; i < sourcePoints.size() - 1; i++)
{
	float tmpx = sourcePoints[i].x + sqrt((sourcePoints[i + 1].x - sourcePoints[i].x)
		* (sourcePoints[i + 1].x - sourcePoints[i].x) + (sourcePoints[i + 1].y - sourcePoints[i].y)
		* (sourcePoints[i + 1].y - sourcePoints[i].y));
	float tmpy = 60;
	targetPoints.push_back(cv::Point2f(tmpx, tmpy));
}

cv::Mat res;
res = trans_img(img, sourcePoints, targetPoints);
const auto duration = std::chrono::duration_cast<ms>(clock::now() - before);
std::cout << "It took " << duration.count() / 1000.0 << "s" << std::endl;
//plot  source and target dot on the map
for (int i = 0; i < sourcePoints.size(); i++)
{
	cv::circle(img, sourcePoints[i], 1, (255, 255, 255), 3);
	//cv::circle(res, targetPoints[i], 1, (255, 255, 0), 3);
}

//cv::namedWindow("Display window", cv::WINDOW_AUTOSIZE); // Create a window for display.
cv::Mat roi = res(cv::Rect(0, 0, imgW, 60));
cv::imshow("warp result_1.jpg", roi);// Show our image inside it.
cv::imwrite("warp result_1.jpg", roi);
cv::waitKey(0); // Wait for a keystroke in the window

return 0;

}

在这里插入图片描述在这里插入图片描述

  • 1
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
当然可以!TPS(Thin Plate Splines)算法是一种常用的非线性方法,可以通过控制点之间的映射关系实现图像的变形和。在Python中,我们可以使用SciPy库来实现TPS算法,并通过GPU加速来提高计算速度。 首先,确保您已经安装了以下库: - numpy:用于处理数组和向量化操作 - scipy:用于科学计算和方法 - cupy:用于在GPU上加速计算 下面是一个示例代码,实现了TPS算法并使用了GPU加速: ```python import numpy as np import cupy as cp from scipy.spatial.distance import cdist def tps_interpolation(points, values, new_points): # 转换为cupy数组 points = cp.asarray(points) values = cp.asarray(values) new_points = cp.asarray(new_points) # 计算距离矩阵 pairwise_dist = cdist(new_points, points) # 计算TPS核函数 tps_kernel = cp.multiply(pairwise_dist**2, cp.log(pairwise_dist + 1e-6)) # 计算TPS权重 weights = cp.linalg.solve(tps_kernel, values) # 计算新点的结果 interpolated_values = cp.dot(tps_kernel, weights) # 转换为numpy数组并返回结果 return cp.asnumpy(interpolated_values) # 示例数据 points = np.random.rand(10, 2) values = np.random.rand(10) new_points = np.random.rand(100, 2) # 执行TPS interpolated_values = tps_interpolation(points, values, new_points) # 打印结果 print(interpolated_values) ``` 请注意,这段代码使用cupy库来进行GPU加速计算。如果您的机器上没有GPU,或者cupy库没有正确安装,代码将自动退回到使用CPU进行计算。此外,由于本示例中的数据较小,可能无法完全体现GPU加速的优势。在处理更大规模的数据时,GPU加速的效果将更加显著。 希望这个示例能帮助到您!如果您还有其他问题,请随时提问。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值