在日常生活我们经常会看见这样的图片,它不是正对我们的,通过透视校正我们可以得到较好的图片。
处理流程:
1、二值处理
2、形态学操作
3、轮廓检测(通过横纵比去除干扰轮廓)
4、hough直线
5、计算交点坐标
6、透视变换
函数
getPerspectiveTransform()
根据输入和输出点获得图像透视变换的矩阵
Mat GetPerspectiveTransform(const Point2f src,
const CvPoint2f dst,
);
参数描述
- src 输入图像的四边形顶点坐标
- dst 输出图像的相应的四边形顶点坐标。
warpPerspective()
对图像应用透视转换
void warpPerspective(InputArray src,
OutputArray dst,
InputArray M,
Size dsize,
int flags=INTER_LINEAR,
int borderMode=BORDER_CONSTANT,
const Scalar& borderValue=Scalar()
);
参数描述:
- src 输入图像(要进行透视校正的原图)
- dst 输出图像,这个图像的大小和类型和src一致
- M 输入3*3的变化矩阵
- flags 插值方法(Inter_Linear或Inter_Nearest)和可选标志“Warp_Inverse”映射的组合,将m设置为反向转换(\texttt dst \ rightarrow\texttt src)。
- bordermode–像素外推方法(border_constant或border_replicate)。
原图:
处理图:
代码实现:
# include<opencv2\opencv.hpp>
# include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char** argv) {
Mat src, dst,gray_src,binary;
src = imread("E:/tuku/case007.jpg");
if (src.empty()) {
cout << "can't find this picture...";
return -1;
}
imshow("input", src);
//二值处理
cvtColor(src, gray_src, COLOR_BGR2GRAY);
threshold(gray_src, binary, 0, 255, THRESH_BINARY_INV|THRESH_OTSU);
imshow("binary image", binary);
//形态学处理
Mat kernel = getStructuringElement(MORPH_RECT, Size(5, 5), Point(-1, -1));
morphologyEx(binary, binary, MORPH_CLOSE, kernel, Point(-1, -1), 3);
imshow("close image", binary);
//轮廓发现
bitwise_not(binary, dst);
vector<vector<Point>> contours;
vector<Vec4i> hierchy;
findContours(dst, contours, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(-1, -1));
//轮廓绘制
int width = src.cols;
int height = src.rows;
Mat drawImage = Mat::zeros(src.size(), CV_8UC3);
for (size_t t = 0; t < contours.size();t++) {
Rect rect = boundingRect(contours[t]);
if (rect.width > width / 2 && rect.height < height-10 )
drawContours(drawImage, contours, static_cast<int>(t), Scalar(0, 0, 255), 2, 8, Mat());
}
imshow("drawImage", drawImage);
//检测直线
vector<Vec4i>lines;
Mat contoursImage;
int accu = min(width / 3, height / 3);
cvtColor(drawImage, contoursImage, COLOR_BGR2GRAY);
HoughLinesP(contoursImage, lines, 1, CV_PI / 180.0, accu, accu,7);
Mat lineImage=Mat::zeros(src.size(),CV_8UC3);
for (size_t t = 0; t < lines.size(); t++) {
Vec4i ln = lines[t];
line(lineImage, Point(ln[0], ln[1]), Point(ln[2], ln[3]), Scalar(0, 0, 255), 2, 8, 0);
}
printf("number of line:%d\n", lines.size());
imshow("lineImage", lineImage);
//寻找与定位上下左右四条线段
int daltah = 0;
Vec4i topLine, bottomLine;
Vec4i leftLine, rightLine;
for (size_t i = 0; i < lines.size(); i++) {
Vec4i ln = lines[i];
daltah = ln[3] - ln[1];
if (ln[3]< height / 2.0&&ln[1] < height / 2.0&&daltah < accu - 1) {
topLine = lines[i];
}
if (ln[3] >height / 2.0&&ln[1] > height / 2.0&&daltah < accu - 1) {
bottomLine = lines[i];
}
if (ln[0] < width / 2.0&&ln[2] < width / 2.0) {
leftLine = lines[i];
}
if (ln[0] > width / 2.0&&ln[2] > width / 2.0) {
rightLine = lines[i];
}
}
cout << "topline:p1(x,y)=" << topLine[0] << "," << topLine[1] << "p2(x,y)=" << topLine[2] << "," << topLine[3] << endl;
cout << "bottomLine:p1(x,y)=" << bottomLine[0] << "," << bottomLine[1] << "p2(x,y)=" << bottomLine[2] << "," << bottomLine[3] << endl;
cout << "leftLine:p1(x,y)=" << leftLine[0] << "," << leftLine[1] << "p2(x,y)=" << leftLine[2] << "," << leftLine[3] << endl;
cout << "rightLine:p1(x,y)=" << rightLine[0] << "," << rightLine[1] << "p2(x,y)=" << rightLine[2] << "," << rightLine[3] << endl;
//拟合四条直线方程 y=kx+c
double k1, c1;
k1 = float(topLine[3] - topLine[1]) / float(topLine[2] - topLine[0]);
c1 = topLine[1] - k1 * topLine[0];
double k2, c2;
k2 = float(bottomLine[3] - bottomLine[1]) / float(bottomLine[2] - bottomLine[0]);
c2 = bottomLine[1] - k2 * bottomLine[0];
double k3, c3;
k3 = float(leftLine[3] - leftLine[1]) / float(leftLine[2] - leftLine[0]);
c3 = leftLine[1] - k3 * leftLine[0];
double k4, c4;
k4 = float(rightLine[3] - rightLine[1]) / float(rightLine[2] - rightLine[0]);
c4 = rightLine[1] - k4 * rightLine[0];
//四条直线的交点
Point p1;//左上角
p1.x = static_cast<int>((c1 - c3) / (k3 - k1));
p1.y= static_cast<int>(k1*p1.x+c1);
Point p2;//右上角
p2.x = static_cast<int>((c1 - c4) / (k4 - k1));
p2.y = static_cast<int>(k1*p2.x + c1);
Point p3;//左下角
p3.x = static_cast<int>((c2 - c3) / (k3 - k2));
p3.y = static_cast<int>(k2*p3.x + c2);
Point p4;//右下角
p4.x = static_cast<int>((c2 - c4) / (k4 - k2));
p4.y = static_cast<int>(k2*p4.x + c2);
cout << "p1(x,y)=" << p1.x << "," << p1.y << endl;
cout << "p2(x,y)=" << p2.x << "," << p2.y << endl;
cout << "p3(x,y)=" << p3.x << "," << p3.y << endl;
cout << "p4(x,y)=" << p4.x << "," << p4.y << endl;
//显示四个点的坐标
circle(lineImage, p1, 2, Scalar(0, 0, 255), 2, 8, 0);
circle(lineImage, p2, 2, Scalar(0, 0, 255), 2, 8, 0);
circle(lineImage, p3, 2, Scalar(0, 0, 255), 2, 8, 0);
circle(lineImage, p4, 2, Scalar(0, 0, 255), 2, 8, 0);
imshow("four coner", lineImage);
//透视变换
vector<Point2f> src_coners(4);
src_coners[0] = p1;
src_coners[1] = p2;
src_coners[2] = p3;
src_coners[3] = p4;
vector<Point2f>dst_coners(4);
dst_coners[0] = Point(0, 0);
dst_coners[1] = Point(width, 0);
dst_coners[3] = Point(width, height);
dst_coners[2] = Point(0, height);
//获取透视变换矩阵
Mat ResultImage;
Mat warpmatrix = getPerspectiveTransform(src_coners, dst_coners);
warpPerspective(src, ResultImage, warpmatrix, ResultImage.size(), INTER_LINEAR);
imshow("Final Result Image", ResultImage);
waitKey(0);
return 0;
}
疑问
在使用getPerspectiveTransform()和findHomography()获取透视变换矩阵时,二者有什么区别?
warpPerspective()和 perspectiveTransform()的区别?