相机标定及图片描点测距三维版
一、相机标定
1.棋盘格做相机标定
自制棋盘格如图所示:
如图是一个(7,6)的黑白棋盘格,在这里运用cv::findChessboardCorners方法获取点坐标,如图:
具体代码如下:
std::vector< cv::Point3f > worldPoints;
for (int j = 0; j < colCount; ++j)
{
for (int k = 0; k < rowCount; ++k)
{
worldPoints.push_back(cv::Point3f(k1.0, j1.0, 0.0f));
}
}
std::vector< cv::Point2f > corners;
std::vector< std::vector< cv::Point2f > > corners2;
std::vector< std::vector< cv::Point3f > > worldPoints2;
for (int i = 0; i < imageCount; ++i)
{
// 输入图片,图片内角点数(不算棋盘格最外层的角点),输出角点,求解方式
bool_t found = cv::findChessboardCorners(images[i], cv::Size(rowCount, colCount), corners, cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE);
// 将找到的角点放入容器中
corners2.push_back(corners);
//世界坐标系的二维vector 放入三维vector
worldPoints2.push_back(worldPoints);
}
注意这里的rowCount和colCount参数是棋盘格角点的横纵方向的个数,一定要与图片一致,如果想要画出角点,就用cv::drawChessboardCorners(image, Size(7, 6), corners, found);的方法画出图像中的角点。
得到角点后,接下来就是矫正了,接下来使用OpenCV的cv::calibrateCamera方法。
cv::calibrateCamera(worldPoints2, corners2, images[0].size(), cameraMatrix_, distCoeffs_, rvecs, tvecs, cv::CALIB_FIX_PRINCIPAL_POINT);
首先来看一下参数介绍
cameraMatrix为内参数矩阵。输入一个cv::Mat cameraMatrix即可。
distCoeffs为畸变矩阵。输入一个cv::Mat distCoeffs即可。
rvecs为旋转向量;应该输入一个cv::Mat的vector
vectorcv::Mat rvecs因为每个vector会得到一个rvecs。
tvecs为位移向量;和rvecs一样,也应该为vectorcv::Mat tvecs
得到矫正参数之后,可以通过cv::undistort方法得到矫正后的图像。
cv::undistort(image, realImage, cameraMatrix_, distCoeffs_);
另外,还可以利用cv::initUndistortRectifyMap的方法和remap的方法得到矫正后的图像,但是
initUndistortRectifyMap返回的mapx和mapy具体有什么用我也没搞明白。
如果要获取图像中每个点标定后的修正坐标,还需要使用cv::undistortPoints方法,输入的是以左上角为原点,向右为x轴正方向,向下为y轴正方向的点,得到的点是相对于图像正中心的,如果得到的是一个小于零的数,那说明是得到了偏移量,需要对应乘上图像的行列数,如果要直接得出像素坐标,则需要在最后加上cameraMatrix_参数。
cv::undistortPoints(obj_p, res_p, cameraMatrix_, distCoeffs_, cv::noArray(), cameraMatrix_);
二、中心思想
为了克服摄像头安装的角度问题,再之前的描点测距的基础上,又设计了三维的方法,这个方法不论摄像头与标定纸之间的角度是多少,都可以通过三维的方式将距离换算出,这样也能使方法的适应性更广。
当相机与棋盘格存在角度时,我们可以假设,棋盘格是由一个垂直于相机的棋盘格空间变换得到的,而空间变换的中间关系,我们可以用一个矩阵来表示,
我们已知的有图像上的像素坐标,然后两个旋转的角度,所以设计一个四阶的旋转矩阵。然后就是如何求得这个矩阵了,在这里需要假设有一个垂直于相机的棋盘格,然后人为的为这个棋盘格的点做标注,这里可以随意标注,只要符合规律即可,比如棋盘格检测到的第一个点是(600,400),自己标注垂直点时,可以标注为(1,1)然后因为是四阶的矩阵,所以我将实际棋盘格的点标注为(600,400,600400,1)为什么是600400呢,因为矩阵必须是可逆矩阵,有需要有规律的标注,我只是为了方便,用别的数据也可以,只要可逆即可,垂直点我用的是棋盘格的每个格子的实际距离(x距离,y距离,0, 1)。
在这里还需要提一下四阶矩阵求逆,如果是python,那是有方法的,C++是没有这样的方法的,不过可以自己手写高斯约旦消元法,适用于各阶矩阵求逆。
三、描点测距
测距的话就比较简单了,只要自己在图像中选取合适的四个点,通过矩阵求解得出一个关系矩阵,然后将输入的点坐标做成一个4x1的行矩阵,乘上关系矩阵,即可得实际距离。
另外,由于三维空间的变换并不是绝对的线性,所以还需要考虑到测距的精度,这就需要从选点的角度入手了,选取的四个点距离越近,则结果越准确,然后被测点再所选的那些点的范围内,就用哪些点得出的关系矩阵计算,这样可以提高精度度。
代码如下:
// cameraCalibration.cpp : 此文件包含 “main” 函数。程序执行将在此处开始并结束。
//
#include <iostream>
#include <string.h>
#include <opencv2/opencv.hpp>
#include <vector>
#include <assert.h>
using namespace std;
using namespace cv;
typedef bool bool_t;
class CameraCalibration
{
protected:
cv::Mat cameraMatrix_;
cv::Mat distCoeffs_;
std::vector< cv::Point2f > imagePoints;
std::vector < std::vector < cv::Point2f >> matPoints;
std::vector < Mat > invMat;
int matNumber = 0;
double mmPerPixels_;
Point2f centerOfPixels_;
Point2f locationP;
public:
bool_t Init(const cv::Mat* images, int imageCount, int rowCount, int colCount, double lengthOfSide)
{
rowCount--;
colCount--;
mmPerPixels_ = lengthOfSide;
std::vector< cv::Point3f > worldPoints;
for (int j = 0; j < colCount; ++j)
{
for (int k = 0; k < rowCount; ++k)
{
worldPoints.push_back(cv::Point3f(k*1.0, j*1.0, 0.0f));
}
}
std::vector< cv::Mat > rvecs, tvecs;
std::vector< cv::Point2f > corners;
std::vector< std::vector< cv::Point2f > > corners2;
std::vector< std::vector< cv::Point3f > > worldPoints2;
double pointDis = 1000;
int pointDisFlag = 0;
for (int i = 0; i < imageCount; ++i)
{
bool_t found = cv::findChessboardCorners(images[i], cv::Size(rowCount, colCount), corners, cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE);
if (abs(corners[0].x - corners[1].x) < pointDis) {
pointDis = abs(corners[0].x - corners[1].x);
pointDisFlag = i;
}
if (abs(corners[0].y - corners[1].y) < pointDis) {
pointDis = abs(corners[0].y - corners[1].y);
pointDisFlag = i;
}
corners2.push_back(corners);
worldPoints2.push_back(worldPoints);
}
cv::calibrateCamera(worldPoints2, corners2, images[0].size(), cameraMatrix_, distCoeffs_, rvecs, tvecs, cv::CALIB_FIX_PRINCIPAL_POINT);
cv::Mat realImage;
cv::undistort(images[pointDisFlag], realImage, cameraMatrix_, distCoeffs_);
centerOfPixels_ = Point2f(realImage.cols / 2, realImage.rows / 2);
cv::imshow("img", realImage);
bool found = cv::findChessboardCorners(realImage, cv::Size(rowCount, colCount), imagePoints, cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE);
locationP = Point2f((imagePoints[0].x - centerOfPixels_.x), (imagePoints[0].y - centerOfPixels_.y));
return true;
}
void step0(double m, double newMat[4][4]) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
if (i == j) {
newMat[i][j] = 1;
}
else {
newMat[i][j] = 0;
}
}
}
}
void step1(double m, double swap[4], double l[4][4], double mat11[4][4]) {
for (int i = 0; i < 4; i++) {
swap[i] = i;
for (int j = 0; j < 4; j++) {
l[i][j] = 0;
}
}
for (int i = 0; i < 4; i++) {
double max_row = mat11[i][i];
int row = i;
for (int j = i; j < 4; j++) {
if (mat11[j][i] >= max_row) {
max_row = mat11[j][i];
row = j;
}
}
swap[i] = row;
if (row != i) {
for (int j = 0; j < 4; j++) {
double swapk = mat11[i][j];
mat11[i][j] = mat11[row][j];
mat11[row][j] = swapk;
}
}
for (int j = i + 1; j < 4; j++) {
if (mat11[j][i] != 0) {
l[j][i] = mat11[j][i] / mat11[i][i];
for (int k = 0; k < 4; k++) {
mat11[j][k] = mat11[j][k] - (l[j][i] * mat11[i][k]);
}
}
}
}
}
void step2(double m, double mat11[4][4], double l1[4][4]) {
int longM = m - 1;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
l1[i][j] = 0;
}
}
for (int i = 0; i < 4 - 1; i++) {
for (int j = 0; j < longM - i; j++) {
if ((mat11[longM - i - j - 1][longM - i] != 0) && (mat11[longM - i][longM - i] != 0)) {
l1[longM - i - j - 1][longM - i] = mat11[longM - i - j - 1][longM - i] / mat11[longM - i][longM - i];
for (int k = 0; k < 4; k++) {
mat11[longM - i - j - 1][k] = mat11[longM - i - j - 1][k] - l1[longM - i - j - 1][longM - i] * mat11[longM - i][k];
}
}
}
}
}
void step3(double m, double mat11[4][4], double l2[4]) {
for (int i = 0; i < 4; i++) {
l2[i] = mat11[i][i];
}
}
void gaussJordan(int m, double mat11[4][4], double newMat[4][4]) {
double swap[4], l[4][4], l1[4][4], l2[4];
step0(4, newMat);
step1(4, swap, l, mat11);
step2(4, mat11, l1);
step3(4, mat11, l2);
for (int i = 0; i < 4; i++) {
if (swap[i] != i) {
for (int j = 0; j < 4; j++) {
double swapk1 = newMat[i][j];
int k1 = swap[i];
newMat[i][j] = newMat[k1][j];
newMat[k1][j] = swapk1;
}
}
for (int j = i + 1; j < 4; j++) {
for (int k = 0; k < 4; k++) {
if (l[j][i] != 0) {
newMat[j][k] = newMat[j][k] - l[j][i] * newMat[i][k];
}
}
}
}
for (int i = 0; i < 4 - 1; i++) {
for (int j = 0; j < 4 - i - 1; j++) {
if (l1[4 - 1 - i - j - 1][4 - 1 - i] != 0) {
for (int k = 0; k < 4; k++) {
newMat[4 - 1 - i - j - 1][k] = newMat[4 - 1 - i - j - 1][k] - l1[4 - 1 - i - j - 1][4 - i - 1] * newMat[4 - i - 1][k];
}
}
}
}
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
newMat[i][j] = newMat[i][j] / l2[i];
}
}
}
void getGaussJordan(int rowCount, int colCount, Size stepLong) {
std::vector< std::vector< int > > pointRange;
cv::Mat newB = cv::Mat(4, 4, CV_32FC1);
cv::Mat newC = cv::Mat(4, 4, CV_32FC1);
double newMat[4][4];
rowCount--;
colCount--;
static int k = 0;
for (int i = 0; i < colCount; i++)
{
std::vector< int > pointRange1;
for (int j = 0; j < rowCount; j++)
{
pointRange1.push_back(k);
k++;
}
pointRange.push_back(pointRange1);
}
for (int i = 0; i < colCount - stepLong.width; i = i + stepLong.width)
{
for (int j = 0; j < rowCount - stepLong.height; j = j + stepLong.height)
{
double matrix[4][4] = {
{imagePoints[pointRange[i][j]].x, imagePoints[pointRange[i][j]].y, imagePoints[pointRange[i][j]].x * imagePoints[pointRange[i][j]].y, 1},
{imagePoints[pointRange[i + stepLong.height][j]].x, imagePoints[pointRange[i + stepLong.height][j]].y, imagePoints[pointRange[i + stepLong.height][j]].x * imagePoints[pointRange[i + stepLong.height][j]].y, 1},
{imagePoints[pointRange[i][j + stepLong.width]].x, imagePoints[pointRange[i][j + stepLong.width]].y, imagePoints[pointRange[i][j + stepLong.width]].x * imagePoints[pointRange[i][j + stepLong.width]].y, 1},
{imagePoints[pointRange[i + stepLong.height][j + stepLong.width]].x, imagePoints[pointRange[i + stepLong.height][j + stepLong.width]].y, imagePoints[pointRange[i + stepLong.height][j + stepLong.width]].x * imagePoints[pointRange[i + stepLong.height][j + stepLong.width]].y, 1},
};
double b[4][4] = {
{(i + 1) * mmPerPixels_, (j + 1) * mmPerPixels_, 0, 1},
{(i + 1 + stepLong.height) * mmPerPixels_, (j + 1) * mmPerPixels_, 0, 1},
{(i + 1) * mmPerPixels_, (j + 1 + stepLong.width) * mmPerPixels_, 0, 1},
{(i + 1 + stepLong.height) * mmPerPixels_, (j + 1 + stepLong.width) * mmPerPixels_, 0, 1},
};
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
newB.at<float>(i, j) = b[i][j];
}
}
gaussJordan(4, matrix, newMat);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
newC.at<float>(i, j) = newMat[i][j];
}
}
Mat newM = newC * newB;
invMat.push_back(newM);
std::vector < cv::Point2f > matPoints1;
matPoints1.push_back(imagePoints[pointRange[i][j]]);
matPoints1.push_back(imagePoints[pointRange[i + stepLong.height][j]]);
matPoints1.push_back(imagePoints[pointRange[i][j + stepLong.width]]);
matPoints1.push_back(imagePoints[pointRange[i + stepLong.height][j + stepLong.width]]);
matPoints.push_back(matPoints1);
matNumber++;
}
}
double x[4] = { centerOfPixels_.x, centerOfPixels_.y, centerOfPixels_.x * centerOfPixels_.y, 1 };
cv::Mat x1 = cv::Mat(1, 4, CV_32FC1);
for (int i = 0; i < 4; i++) {
x1.at<float>(i) = x[i];
}
cv::Mat newCenter;
for (int i = 0; i < matNumber; i++) {
double tmp = pointPolygonTest(matPoints[i], centerOfPixels_, true);
if (tmp >= 0) {
newCenter = x1 * invMat[i];
}
}
centerOfPixels_ = Point2f(newCenter.at<float>(0), newCenter.at<float>(1));
}
void claPoints(Point2f& startPoint) {
double x[4] = { startPoint.x, startPoint.y, startPoint.x * startPoint.y, 1 };
cv::Mat x1 = cv::Mat(1, 4, CV_32FC1);
for (int i = 0; i < 4; i++) {
x1.at<float>(i) = x[i];
}
cv::Mat restMat;
double outsideTmp = pointPolygonTest(matPoints[0], startPoint, true);;
int outsideFlag = 0;
bool flag = 0;
for (int i = 0; i < matNumber; i++) {
double tmp = pointPolygonTest(matPoints[i], startPoint, true);
if (tmp >= 0) {
flag = 1;
restMat = x1 * invMat[i];
break;
}
else {
if (outsideTmp < tmp) {
outsideTmp = tmp;
outsideFlag = i;
}
}
}
if (flag == 0) {
restMat = x1 * invMat[outsideFlag];
}
startPoint = Point2f(restMat.at<float>(0), restMat.at<float>(1));
}
void TranslateCoord(cv::Point2f& points, int pointCount)
{
assert(mmPerPixels_ > 0);
assert(matNumber > 0);
claPoints(points);
points.x = points.x - centerOfPixels_.x;
points.y = points.y - centerOfPixels_.y;
if (locationP.x > 0 && locationP.y < 0) {
points.x = -points.x;
}
else if (locationP.x < 0 && locationP.y > 0) {
points.y = -points.y;
}
else if (locationP.x > 0 && locationP.y > 0) {
points.x = -points.x;
points.y = -points.y;
}
}
};
int main()
{
std::cout << "Hello World!\n";
Mat images[5];
images[0] = cv::imread("E:\\CPP\\rectImage\\4\\3.bmp");
images[1] = cv::imread("E:\\CPP\\rectImage\\4\\2.bmp");
images[2] = cv::imread("E:\\CPP\\rectImage\\4\\1.bmp");
images[3] = cv::imread("E:\\CPP\\rectImage\\4\\4.bmp");
images[4] = cv::imread("E:\\CPP\\rectImage\\4\\5.bmp");
CameraCalibration newMatrix;
newMatrix.Init(images, 5, 8, 7, 20);
cv::Size stepLong;
stepLong.height = 2;
stepLong.width = 2;
newMatrix.getGaussJordan(8, 7, stepLong);
/*Point2f p1 = Point2f(1200, 1500);
Point2f p2;
newMatrix.claPoints(p1, p2);*/
Point2f p1 = Point2f(1800, 1500);
int p = 0;
newMatrix.TranslateCoord(p1, p);
cout << p1 << endl;
cv::waitKey(0);
}