自拟图像矫正算法
一、算法原理
因为相机本身存在畸变,所以在测距时总会存在误差,张氏标定法虽然方便,但是精度有时候还达不到要求。所以我自拟了一个相机标定的方法,只为做测距。
首先设置两个坐标系,其中一个就是图像的坐标系,直接以像素坐标进行计算,另一个为世界坐标系,用来表示实际距离,这个要自己设定。两个坐标系之间做仿射变换,将图像坐标系中的像素坐标直接映射到世界坐标系的实际距离。
如图,已知棋盘格每个格子的宽度为10毫米,则按照实际距离做一个坐标系,然后将此坐标系与拍摄的图像中的棋盘格按照角点位置一一对应,选取图像中四个角点与实际的四个角点,做两个可逆矩阵,然后通过计算,得出变换矩阵,后面就可以利用这个变换矩阵来求取四点区域内所有点到(0,0)点的实际距离。
二、方法步骤
1.获取棋盘格的角点坐标
2.用角点坐标做成可逆矩阵
3.用高斯消元法计算角点坐标矩阵的逆矩阵
4.用世界坐标的矩阵与逆矩阵计算变换矩阵
5.调用变换矩阵计算每一个区域内每一个点的实际距离
三、代码
#include <iostream>
#include <string.h>
#include <opencv2/opencv.hpp>
#include <vector>
#include <assert.h>
#include <stdio.h>
#include <string.h>
typedef bool bool_t;
class CameraCalibration
{
protected:
std::vector< cv::Point2f > imagePoints_1;
// 四幅图中的棋盘格角点坐标分别保存
std::vector< cv::Point2f > imagePoints1, imagePoints2, imagePoints3, imagePoints4;
// flag的作用是判断每一位置的棋盘格角点坐标是否已经获取
int flag1 = 0, flag2 = 0, flag3 = 0, flag4 = 0;
// invMat保存的是仿射变换的变换矩阵
std::vector < cv::Mat > invMat1, invMat2, invMat3, invMat4;
// 棋盘格格子的大小
double mmPerPixels_;
// 中心点像素坐标
cv::Point2f centerOfPixels_;
// 图像的行列数
int imageShape_cols, imageShape_rows;
// 棋盘格角点的行列数
int new_rowCount, new_colCount;
public:
// 获取每张图中棋盘格的角点坐标,棋盘格格子大小,中心点像素坐标,图像的大小,棋盘格格子的行列数
bool_t getFourImgPointsInit(const cv::Mat image, int rowCount, int colCount, double lengthOfSide)
{
rowCount--;
colCount--;
new_rowCount = rowCount;
new_colCount = colCount;
mmPerPixels_ = lengthOfSide;
std::vector< cv::Point3f > worldPoints;
for (int j = 0; j < colCount; ++j)
{
for (int k = 0; k < rowCount; ++k)
{
worldPoints.push_back(cv::Point3f(k*1.0, j*1.0, 0.0f));
}
}
std::vector< std::vector< cv::Point3f > > worldPoints2;
// 获取棋盘格角点
bool_t found = cv::findChessboardCorners(image, cv::Size(rowCount, colCount), imagePoints_1, cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE);
centerOfPixels_ = cv::Point2f(image.cols / 2, image.rows / 2);
imageShape_cols = image.cols;
imageShape_rows = image.rows;
return true;
}
// 获取每幅图中棋盘格的像素坐标
bool_t getFourImgPoints1(cv::Mat image, int rowCount, int colCount, std::string &strrr, double lengthOfSide) {
// 获取棋盘格中的角点坐标
getFourImgPointsInit(image, rowCount, colCount, lengthOfSide);
// 通过计算棋盘格的平均值获取棋盘格在图中的位置,将角点坐标放入对应的位置
double sum[2] = { 0, 0 };
for (int i = 0; i < imagePoints_1.size(); i++) {
sum[0] = sum[0] + imagePoints_1[i].x;
sum[1] = sum[1] + imagePoints_1[i].y;
}
double argvX = sum[0] / imagePoints_1.size();
double argvY = sum[1] / imagePoints_1.size();
if (argvX < centerOfPixels_.x && argvY < centerOfPixels_.y) {
if (flag1 == 0) {
imagePoints1 = imagePoints_1;
flag1 = 1;
strrr = "位置1图片已加载";
}
else {
strrr = "位置1图片存在请更换位置";
return false;
}
}
if (argvX > centerOfPixels_.x && argvY < centerOfPixels_.y) {
if (flag2 == 0) {
imagePoints2 = imagePoints_1;
flag2 = 1;
strrr = "位置2图片已加载";
}
else {
strrr = "位置2图片存在请更换位置";
return false;
}
}if (argvX < centerOfPixels_.x && argvY > centerOfPixels_.y) {
if (flag3 == 0) {
imagePoints3 = imagePoints_1;
flag3 = 1;
strrr = "位置3图片已加载";
}
else {
strrr = "位置3图片存在请更换位置";
return false;
}
}if (argvX > centerOfPixels_.x && argvY > centerOfPixels_.y) {
if (flag4 == 0) {
imagePoints4 = imagePoints_1;
flag4 = 1;
strrr = "位置4图片已加载";
}
else {
strrr = "位置4图片存在请更换位置";
return false;
}
}
// 图像采集完后返回true
if (flag1 == 1 && flag2 == 1 && flag3 == 1 && flag4 == 1) {
return true;
}
return false;
}
// 以下几个step是高斯约旦求逆矩阵的步骤
void step0(double m, double newMat[4][4]) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
if (i == j) {
newMat[i][j] = 1;
}
else {
newMat[i][j] = 0;
}
}
}
}
void step1(double m, double swap[4], double l[4][4], double mat11[4][4]) {
for (int i = 0; i < 4; i++) {
swap[i] = i;
for (int j = 0; j < 4; j++) {
l[i][j] = 0;
}
}
for (int i = 0; i < 4; i++) {
double max_row = mat11[i][i];
int row = i;
for (int j = i; j < 4; j++) {
if (mat11[j][i] >= max_row) {
max_row = mat11[j][i];
row = j;
}
}
swap[i] = row;
if (row != i) {
for (int j = 0; j < 4; j++) {
double swapk = mat11[i][j];
mat11[i][j] = mat11[row][j];
mat11[row][j] = swapk;
}
}
for (int j = i + 1; j < 4; j++) {
if (mat11[j][i] != 0) {
l[j][i] = mat11[j][i] / mat11[i][i];
for (int k = 0; k < 4; k++) {
mat11[j][k] = mat11[j][k] - (l[j][i] * mat11[i][k]);
}
}
}
}
}
void step2(double m, double mat11[4][4], double l1[4][4]) {
int longM = m - 1;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
l1[i][j] = 0;
}
}
for (int i = 0; i < 4 - 1; i++) {
for (int j = 0; j < longM - i; j++) {
if ((mat11[longM - i - j - 1][longM - i] != 0) && (mat11[longM - i][longM - i] != 0)) {
l1[longM - i - j - 1][longM - i] = mat11[longM - i - j - 1][longM - i] / mat11[longM - i][longM - i];
for (int k = 0; k < 4; k++) {
mat11[longM - i - j - 1][k] = mat11[longM - i - j - 1][k] - l1[longM - i - j - 1][longM - i] * mat11[longM - i][k];
}
}
}
}
}
void step3(double m, double mat11[4][4], double l2[4]) {
for (int i = 0; i < 4; i++) {
l2[i] = mat11[i][i];
}
}
// 高斯约旦求变化矩阵的部分
void gaussJordan(int m, double mat11[4][4], double newMat[4][4]) {
double swap[4], l[4][4], l1[4][4], l2[4];
step0(4, newMat);
step1(4, swap, l, mat11);
step2(4, mat11, l1);
step3(4, mat11, l2);
for (int i = 0; i < 4; i++) {
if (swap[i] != i) {
for (int j = 0; j < 4; j++) {
double swapk1 = newMat[i][j];
int k1 = swap[i];
newMat[i][j] = newMat[k1][j];
newMat[k1][j] = swapk1;
}
}
for (int j = i + 1; j < 4; j++) {
for (int k = 0; k < 4; k++) {
if (l[j][i] != 0) {
newMat[j][k] = newMat[j][k] - l[j][i] * newMat[i][k];
}
}
}
}
for (int i = 0; i < 4 - 1; i++) {
for (int j = 0; j < 4 - i - 1; j++) {
if (l1[4 - 1 - i - j - 1][4 - 1 - i] != 0) {
for (int k = 0; k < 4; k++) {
newMat[4 - 1 - i - j - 1][k] = newMat[4 - 1 - i - j - 1][k] - l1[4 - 1 - i - j - 1][4 - i - 1] * newMat[4 - i - 1][k];
}
}
}
}
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
newMat[i][j] = newMat[i][j] / l2[i];
}
}
}
// 通过高斯约旦消元法算出逆矩阵,并得到变换矩阵
void getGaussJordan(cv::Size stepLong, std::vector< cv::Point2f > imagePoints, std::vector < cv::Mat > &invMat, int &matNumber, std::vector < std::vector < cv::Point2f >> &matPoints, cv::Point2f ¢erOfPixels) {
std::vector< std::vector< int > > pointRange;
cv::Mat newB = cv::Mat(4, 4, CV_32FC1);
cv::Mat newC = cv::Mat(4, 4, CV_32FC1);
double newMat[4][4];
int k = 0;
//将棋盘格的点坐标按照对应的顺序标注为第几个
for (int i = 0; i < new_colCount; i++)
{
std::vector< int > pointRange1;
for (int j = 0; j < new_rowCount; j++)
{
pointRange1.push_back(k);
k++;
}
pointRange.push_back(pointRange1);
}
//按照指定的步长计算变换矩阵
for (int i = 0; i < new_colCount - stepLong.width; i = i + stepLong.width)
{
for (int j = 0; j < new_rowCount - stepLong.height; j = j + stepLong.height)
{
//设置图像坐标的数组
double matrix[4][4] = {
{imagePoints[pointRange[i][j]].x, imagePoints[pointRange[i][j]].y, imagePoints[pointRange[i][j]].x * imagePoints[pointRange[i][j]].y, 1},
{imagePoints[pointRange[i + stepLong.height][j]].x, imagePoints[pointRange[i + stepLong.height][j]].y, imagePoints[pointRange[i + stepLong.height][j]].x * imagePoints[pointRange[i + stepLong.height][j]].y, 1},
{imagePoints[pointRange[i][j + stepLong.width]].x, imagePoints[pointRange[i][j + stepLong.width]].y, imagePoints[pointRange[i][j + stepLong.width]].x * imagePoints[pointRange[i][j + stepLong.width]].y, 1},
{imagePoints[pointRange[i + stepLong.height][j + stepLong.width]].x, imagePoints[pointRange[i + stepLong.height][j + stepLong.width]].y, imagePoints[pointRange[i + stepLong.height][j + stepLong.width]].x * imagePoints[pointRange[i + stepLong.height][j + stepLong.width]].y, 1},
};
//设置世界坐标的矩阵
double b[4][4] = {
{(i + 1) * mmPerPixels_, (j + 1) * mmPerPixels_, 0, 1},
{(i + 1 + stepLong.height) * mmPerPixels_, (j + 1) * mmPerPixels_, 0, 1},
{(i + 1) * mmPerPixels_, (j + 1 + stepLong.width) * mmPerPixels_, 0, 1},
{(i + 1 + stepLong.height) * mmPerPixels_, (j + 1 + stepLong.width) * mmPerPixels_, 0, 1},
};
//将世界坐标数组变成矩阵
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
newB.at<float>(i, j) = b[i][j];
}
}
//通过高斯约旦得到图像坐标的逆矩阵
gaussJordan(4, matrix, newMat);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
newC.at<float>(i, j) = newMat[i][j];
}
}
//计算得到变换矩阵并保存
cv::Mat newM = newC * newB;
invMat.push_back(newM);
//将每个矩阵所在的区域保存
std::vector < cv::Point2f > matPoints1;
matPoints1.push_back(imagePoints[pointRange[i][j]]);
matPoints1.push_back(imagePoints[pointRange[i + stepLong.height][j]]);
matPoints1.push_back(imagePoints[pointRange[i][j + stepLong.width]]);
matPoints1.push_back(imagePoints[pointRange[i + stepLong.height][j + stepLong.width]]);
matPoints.push_back(matPoints1);
matNumber++;
}
}
//计算中点的世界坐标位置
double x[4] = { centerOfPixels_.x, centerOfPixels_.y, centerOfPixels_.x * centerOfPixels_.y, 1 };
cv::Mat x1 = cv::Mat(1, 4, CV_32FC1);
for (int i = 0; i < 4; i++) {
x1.at<float>(i) = x[i];
}
cv::Mat newCenter;
double tmpKK = -1000;
//判断中点坐标所在的区域
for (int i = 0; i < matNumber; i++) {
double tmp = pointPolygonTest(matPoints[i], centerOfPixels_, true);
if (tmp >= 0) {
newCenter = x1 * invMat[i];
break;
}
else {
if (tmp > tmpKK) {
tmpKK = tmp;
newCenter = x1 * invMat[i];
}
}
}
std::cout << newCenter << std::endl;
//将中点的实际距离坐标位置保存
centerOfPixels = cv::Point2f(newCenter.at<float>(1), newCenter.at<float>(0));
}
//计算输入点的实际坐标
void claPoints(cv::Point2f& startPoint, std::vector < cv::Mat > invMat, int matNumber, std::vector < std::vector < cv::Point2f >> matPoints) {
double x[4] = { startPoint.x, startPoint.y, startPoint.x * startPoint.y, 1 };
cv::Mat x1 = cv::Mat(1, 4, CV_32FC1);
for (int i = 0; i < 4; i++) {
x1.at<float>(i) = x[i];
}
cv::Mat restMat;
//判断点所在的区域,如果在所有区域外则选择最近的区域
double outsideTmp = pointPolygonTest(matPoints[0], startPoint, true);;
int outsideFlag = 0;
bool flag = 0;
for (int i = 0; i < matNumber; i++) {
double tmp = pointPolygonTest(matPoints[i], startPoint, true);
if (tmp >= 0) {
flag = 1;
restMat = x1 * invMat[i];
break;
}
else {
if (outsideTmp > tmp) {
outsideTmp = tmp;
outsideFlag = i;
}
}
}
if (flag == 0) {
restMat = x1 * invMat[outsideFlag];
}
startPoint = cv::Point2f(restMat.at<float>(1), restMat.at<float>(0));
}
//计算以图像中点为中心的笛卡尔坐标系的实际距离并确定象限
void TranslateCoord(cv::Point2f& points, std::vector < cv::Mat > invMat, int matNumber, std::vector < std::vector < cv::Point2f >> matPoints, cv::Point2f centerOfPixels, std::vector< cv::Point2f > imagePoints)
{
float locationP1, locationP2;
locationP1 = points.x;
locationP2 = points.y;
assert(mmPerPixels_ > 0);
assert(matNumber > 0);
//计算实际距离
claPoints(points, invMat, matNumber, matPoints);
points.x = points.x - centerOfPixels.x;
points.y = points.y - centerOfPixels.y;
if (locationP1 < centerOfPixels_.x && locationP2 < centerOfPixels_.y) {
points.x = -abs(points.x);
points.y = abs(points.y);
}
else if (locationP1 < centerOfPixels_.x && locationP2 > centerOfPixels_.y) {
points.x = -abs(points.x);
points.y = -abs(points.y);
}
else if (locationP1 > centerOfPixels_.x && locationP2 < centerOfPixels_.y) {
points.x = abs(points.x);
points.y = abs(points.y);
}
else if (locationP1 > centerOfPixels_.x && locationP2 > centerOfPixels_.y) {
points.x = abs(points.x);
points.y = -abs(points.y);
}
}
//计算矫正后的实际距离并保存
bool_t getFourImgPoints2(cv::Size stepLong, std::string filenameData) {
float* scales;
scales = new float[imageShape_rows * imageShape_cols * 2];
int matNumber1 = 0, matNumber2 = 0, matNumber3 = 0, matNumber4 = 0;
std::vector < std::vector < cv::Point2f >> matPoints1, matPoints2, matPoints3, matPoints4;
cv::Point2f centerOfPixels1, centerOfPixels2, centerOfPixels3, centerOfPixels4;
//分别计算四个位置的实际距离
getGaussJordan(stepLong, imagePoints1, invMat1, matNumber1, matPoints1, centerOfPixels1);
for (int i = 1; i < imageShape_cols / 2; i++) {
for (int j = 1; j < imageShape_rows / 2; j++) {
cv::Point2f p = cv::Point2f(i, j);
TranslateCoord(p, invMat1, matNumber1, matPoints1, centerOfPixels1, imagePoints1);
scales[i * imageShape_rows * 2 + j * 2 + 0] = p.x;
scales[i * imageShape_rows * 2 + j * 2 + 1] = p.y;
}
}
getGaussJordan(stepLong, imagePoints2, invMat2, matNumber2, matPoints2, centerOfPixels2);
for (int i = imageShape_cols / 2; i < imageShape_cols; i++) {
for (int j = 1; j < imageShape_rows / 2; j++) {
cv::Point2f p = cv::Point2f(i, j);
TranslateCoord(p, invMat2, matNumber2, matPoints2, centerOfPixels2, imagePoints2);
scales[i * imageShape_rows * 2 + j * 2 + 0] = p.x;
scales[i * imageShape_rows * 2 + j * 2 + 1] = p.y;
}
}
getGaussJordan(stepLong, imagePoints3, invMat3, matNumber3, matPoints3, centerOfPixels3);
for (int i = 1; i < imageShape_cols / 2; i++) {
for (int j = imageShape_rows / 2; j < imageShape_rows; j++) {
cv::Point2f p = cv::Point2f(i, j);
TranslateCoord(p, invMat3, matNumber3, matPoints3, centerOfPixels3, imagePoints3);
scales[i * imageShape_rows * 2 + j * 2 + 0] = p.x;
scales[i * imageShape_rows * 2 + j * 2 + 1] = p.y;
}
}
getGaussJordan(stepLong, imagePoints4, invMat4, matNumber4, matPoints4, centerOfPixels4);
for (int i = imageShape_cols / 2; i < imageShape_cols; i++) {
for (int j = imageShape_rows / 2; j < imageShape_rows; j++) {
cv::Point2f p = cv::Point2f(i, j);
TranslateCoord(p, invMat4, matNumber4, matPoints4, centerOfPixels4, imagePoints4);
scales[i * imageShape_rows * 2 + j * 2 + 0] = p.x;
scales[i * imageShape_rows * 2 + j * 2 + 1] = p.y;
}
}
/* std::cout << scales[10 * imageShape_rows * 2 + 10 * 2 + 0] << std::endl;
std::cout << scales[10 * imageShape_rows * 2 + 10 * 2 + 1] << std::endl;*/
FILE *file;
file = fopen(filenameData.c_str(), "wb");
fwrite(scales, imageShape_rows * imageShape_cols, sizeof(float) * 2, file);
fclose(file);
return true;
}
//读取保存的数组
cv::Point2f readPoint(cv::Point imageP, std::string filenameData) {
cv::Point2f realP = cv::Point2f(100, 200);
float* scales;
scales = new float[imageShape_rows * imageShape_cols * 2];
FILE *file;
file = fopen(filenameData.c_str(), "rb");
fread(scales, imageShape_cols * imageShape_rows, sizeof(float) * 2, file);
realP.x = scales[imageP.x * imageShape_rows * 2 + imageP.y * 2 + 0];
realP.y = scales[imageP.x * imageShape_rows * 2 + imageP.y * 2 + 1];
fclose(file);
return realP;
}
bool_t getRealImg(cv::Mat image, cv::Mat &realImage, int rowCount, int colCount)
{
rowCount--;
colCount--;
std::vector< cv::Point3f > worldPoints;
cv::Mat cameraMatrix_;
cv::Mat distCoeffs_;
for (int j = 0; j < colCount; ++j)
{
for (int k = 0; k < rowCount; ++k)
{
worldPoints.push_back(cv::Point3f(k*1.0, j*1.0, 0.0f));
}
}
std::vector< cv::Mat > rvecs, tvecs;
std::vector< std::vector< cv::Point2f > > corners2;
std::vector< std::vector< cv::Point3f > > worldPoints2;
double pointDis = 1000;
int pointDisFlag = 0;
for (int i = 0; i < 4; ++i)
{
worldPoints2.push_back(worldPoints);
}
corners2.push_back(imagePoints1);
corners2.push_back(imagePoints2);
corners2.push_back(imagePoints3);
corners2.push_back(imagePoints4);
cv::calibrateCamera(worldPoints2, corners2, image.size(), cameraMatrix_, distCoeffs_, rvecs, tvecs, cv::CALIB_FIX_PRINCIPAL_POINT);
cv::undistort(image, realImage, cameraMatrix_, distCoeffs_);
return true;
}
};
int main()
{
std::cout << "Hello World!\n";
cv::Mat image;
int long_size = 16;
int short_size = 12;
cv::Size stepLong;
stepLong.height = 4;
stepLong.width = 4;
double lengthOfSide = 5;
std::string strrr;
std::string filenameData = "E:\\python\\opencv\\008newcal\\test.dat";
CameraCalibration newMatrix;
image = cv::imread("E:\\python\\opencv\\008newcal\\01181.bmp");
newMatrix.getFourImgPoints1(image, long_size, short_size, strrr, lengthOfSide);
std::cout << strrr << std::endl;
image = cv::imread("E:\\python\\opencv\\008newcal\\01182.bmp");
newMatrix.getFourImgPoints1(image, long_size, short_size, strrr, lengthOfSide);
std::cout << strrr << std::endl;
image = cv::imread("E:\\python\\opencv\\008newcal\\01183.bmp");
newMatrix.getFourImgPoints1(image, long_size, short_size, strrr, lengthOfSide);
std::cout << strrr << std::endl;
image = cv::imread("E:\\python\\opencv\\008newcal\\01182.bmp");
newMatrix.getFourImgPoints1(image, long_size, short_size, strrr, lengthOfSide);
std::cout << strrr << std::endl;
image = cv::imread("E:\\python\\opencv\\008newcal\\01184.bmp");
newMatrix.getFourImgPoints1(image, long_size, short_size, strrr, lengthOfSide);
std::cout << strrr << std::endl;
newMatrix.getFourImgPoints2(stepLong, filenameData);
cv::Point imageP = cv::Point(100, 200);
cv::Point2f realP = newMatrix.readPoint(imageP, filenameData);
cv::Mat realImg;
newMatrix.getRealImg(image, realImg, 16, 12);
cv::imshow("realImg", realImg);
cv::waitKey(0);
}