OpenCV实现点对点的透视变换(C++)

从一幅图到透视变换之后的图,如何得到原图中的点在目标图中的位置呢?

用到透视变换到目标图之后得到的透视变换矩阵,同时单独对点坐标进行透视变换矩阵的操作。

透视变换的基本操作其实挺简单,参考我以前的文章:OpenCV4.1.0透视变换的基本操作_LankyBin的博客-CSDN博客

简单示例:从原图中确定四个点,划出一块不规则矩形的区域,把这块区域扩展成规则矩形,长宽保持原图一致

效果图如下:

效果图

代码分别如下:(VS2019+opencv4.1.0)

获得透视变换矩阵:

Mat getWarpMatrix(Point2f pt_a, Point2f pt_b, Point2f pt_c, Point2f pt_d) {
	Point2f origin_pt[4];
	Point2f target_pt[4];
	origin_pt[0] = pt_a;
	origin_pt[1] = pt_b;
	origin_pt[2] = pt_c;
	origin_pt[3] = pt_d;
	target_pt[0] = Point(0, 0);
	target_pt[1] = Point(0, cap_height);
	target_pt[2] = Point(cap_width, 0);
	target_pt[3] = Point(cap_width, cap_height);
	//target_pt[0] = Point(MIN(origin_pt[0].x, origin_pt[1].x), MIN(origin_pt[0].y, origin_pt[2].y));
	//target_pt[1] = Point(MIN(origin_pt[0].x, origin_pt[1].x), MAX(origin_pt[1].y, origin_pt[3].y));
	//target_pt[2] = Point(MAX(origin_pt[2].x, origin_pt[3].x), MIN(origin_pt[0].y, origin_pt[2].y));
	//target_pt[3] = Point(MAX(origin_pt[2].x, origin_pt[3].x), MAX(origin_pt[1].y, origin_pt[3].y));
	return getPerspectiveTransform(origin_pt, target_pt);
	//return getPerspectiveTransform(target_pt, origin_pt);//透视变换是可逆的
}

单独计算目标点坐标:

Point getTargetPoint(Point pt_origin, Mat warpMatrix) {
	Mat_<double> mat_pt(3, 1);
	mat_pt(0, 0) = pt_origin.x;
	mat_pt(1, 0) = pt_origin.y;
	mat_pt(2, 0) = 1;
	Mat mat_pt_view = warpMatrix * mat_pt;
	double a1 = mat_pt_view.at<double>(0, 0);
	double a2 = mat_pt_view.at<double>(1, 0);
	double a3 = mat_pt_view.at<double>(2, 0);
	return Point(a1 * 1.0 / a3, a2 * 1.0 / a3);
}

整体图显示效果:

void Point_aToPoint_b(Mat OriginImg, Point pt_origin) {
	Mat warpMatrix;
	char coordinates[30];
	Point2f point_a = Point2f(89, 70);
	Point2f point_b = Point2f(126, 320);
	Point2f point_c = Point2f(554, 92);
	Point2f point_d = Point2f(474, 360);
	/*Point2f point_a = Point2f(367, 0);
	Point2f point_b = Point2f(11, 162);
	Point2f point_c = Point2f(605, 58);
	Point2f point_d = Point2f(281, 351);*/
	warpMatrix = getWarpMatrix(point_a, point_b, point_c, point_d);
	Mat TargetImg;
	warpPerspective(OriginImg, TargetImg, warpMatrix, OriginImg.size(), CV_INTER_CUBIC);
	Point pt_target = getTargetPoint(pt_origin, warpMatrix);
	
	circle(OriginImg, point_a, 9, Scalar(0, 255, 255), 3);
	sprintf_s(coordinates, "(%.1f, %.1f)", point_a.x, point_a.y);
	putText(OriginImg, coordinates, point_a, 5, 0.6, (255, 255, 255));
	circle(OriginImg, point_b, 9, Scalar(0, 255, 255), 3);
	sprintf_s(coordinates, "(%.1f, %.1f)", point_b.x, point_b.y);
	putText(OriginImg, coordinates, point_b, 5, 0.6, (255, 255, 255));
	circle(OriginImg, point_c, 9, Scalar(0, 255, 255), 3);
	sprintf_s(coordinates, "(%.1f, %.1f)", point_c.x, point_c.y);
	putText(OriginImg, coordinates, point_c, 5, 0.6, (255, 255, 255));
	circle(OriginImg, point_d, 9, Scalar(0, 255, 255), 3);
	sprintf_s(coordinates, "(%.1f, %.1f)", point_d.x, point_d.y);
	putText(OriginImg, coordinates, point_d, 5, 0.6, (255, 255, 255));

	Point pt_a_target = getTargetPoint(point_a, warpMatrix);
	Point pt_b_target = getTargetPoint(point_b, warpMatrix);
	Point pt_c_target = getTargetPoint(point_c, warpMatrix);
	Point pt_d_target = getTargetPoint(point_d, warpMatrix);

	circle(TargetImg, pt_a_target, 9, Scalar(0, 255, 0), 3);
	/*sprintf_s(coordinates, "(%d, %d)", pt_a_target.x, pt_a_target.y);
	putText(TargetImg, coordinates, pt_a_target, 5, 0.6, (255, 255, 255));*/
	circle(TargetImg, pt_b_target, 9, Scalar(0, 255, 0), 3);
	/*sprintf_s(coordinates, "(%d, %d)", pt_b_target.x, pt_b_target.y);
	putText(TargetImg, coordinates, pt_b_target, 5, 0.6, (255, 255, 255));*/
	circle(TargetImg, pt_c_target, 9, Scalar(0, 255, 0), 3);
	/*sprintf_s(coordinates, "(%d, %d)", pt_c_target.x, pt_c_target.y);
	putText(TargetImg, coordinates, pt_c_target, 5, 0.6, (255, 255, 255));*/
	circle(TargetImg, pt_d_target, 9, Scalar(0, 255, 0), 3);
	/*sprintf_s(coordinates, "(%d, %d)", pt_d_target.x, pt_d_target.y);
	putText(TargetImg, coordinates, pt_d_target, 5, 0.6, (255, 255, 255));*/

	circle(OriginImg, pt_origin, 1, Scalar(0, 0, 255), 5);
	sprintf_s(coordinates, "(%d, %d)", pt_origin.x, pt_origin.y);
	putText(OriginImg, coordinates, pt_origin, 2, 0.6, (1, 1, 1));
	circle(TargetImg, pt_target, 1, Scalar(255, 255, 0), 5);
	sprintf_s(coordinates, "(%d, %d)", pt_target.x, pt_target.y);
	putText(TargetImg, coordinates, pt_target, 2, 0.6, (1, 1, 1));
	imshow("SrcImg", OriginImg);
	imshow("TargetImg", TargetImg);
}

主程序打开摄像头调用(用videocature因为后面可以结合定点捕捉来实现动态跟踪和映射):

void transformCap() {
	cap.open(0);
	if (!cap.isOpened()) {
		printf("Open failed\n");
		return;
	}
	cap_width = cap.get(CV_CAP_PROP_FRAME_WIDTH);
	cap_height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
	printf("Cap resolution: %d * %d\tfps = %.2f", cap_width, cap_height, cap.get(CV_CAP_PROP_FPS));
	Mat frame;
	while (true) {
		cap >> frame;
		Point pt_origin = Point(398, 220);
		Point_aToPoint_b(frame, pt_origin);
		waitKey(10);
	}
}

参考:透视变换--点对应变换 - 无左无右 - 博客园

  • 5
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值