OpenCvSharp三维重建SFM与图像拼接Stitch
关于SFM重建的原理参考https://blog.csdn.net/lpj822/article/details/82716971
关于图像拼接的C++代码参考https://blog.csdn.net/qq_34623621/article/details/120783508
本文基于OpenCvSharp实现了SFM三维稀疏点云重建,具体的原理其他博主写的非常详细,这里只是放出实现的代码,各位参考着学习吧。C#的核心代码如下:
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using OpenCvSharp;
using OpenCvSharp.Extensions;
using OpenCvSharp.Features2D;
using OpenCvSharp.XFeatures2D;
namespace SFM
{
public class StructureFromMotion
{
public StructureFromMotion()
{
}
public StructureFromMotion(SURF SURF)
{
_ThisSURF = SURF;
SelectMethod = 0;
}
public StructureFromMotion(SIFT SIFT)
{
_ThisSIFT = SIFT;
SelectMethod = 1;
}
public StructureFromMotion(ORB ORB)
{
_ThisORB = ORB;
SelectMethod = 2;
}
private int SelectMethod;
private SURF _ThisSURF;
public SURF ThisSURF
{
get { return _ThisSURF; }
set { _ThisSURF = value; }
}
private ORB _ThisORB;
public ORB ThisORB
{
get { return _ThisORB; }
set { _ThisORB = value; }
}
private SIFT _ThisSIFT;
public SIFT ThisSIFT
{
get { return _ThisSIFT; }
set { _ThisSIFT = value; }
}
private double _Ratio = 0.6;
public double Ratio
{
get => _Ratio;
set => _Ratio = value;
}
private double _Outlier = 0.5;
/// <summary>
/// 根据匹配点求取本征矩阵,使用RANSAC,进一步排除失配点
/// <para>对于RANSAC而言,outlier数量大于50%时,结果是不可靠的</para>
/// </summary>
public double Outlier
{
get => _Outlier;
set => _Outlier = value;
}
#region 重构核心函数
/// <summary>
/// 特征点提取
/// </summary>
/// <param name="image_names">图像输入</param>
/// <param name="key_points_for_all">提取的特征点</param>
/// <param name="descriptor_for_all">特征描述符</param>
/// <param name="colors_for_all">特征点颜色信息</param>
private void Extract_Features(List<string> image_names, List<List<KeyPoint>> key_points_for_all, List<Mat> descriptor_for_all, List<List<Vec3b>> colors_for_all)
{
key_points_for_all.Clear();
descriptor_for_all.Clear();
Mat image = new Mat();
switch (SelectMethod)
{
case 0:
{
var ExtractMethod = _ThisSURF;
for (int it = 0; it != image_names.Count(); ++it)
{
image = Cv2.ImRead(image_names[it]);
if (image.Empty()) continue;
KeyPoint[] key_points;
Mat descriptor = new Mat();
Mat mask = new Mat();
//偶尔出现内存分配失败的错误
ExtractMethod.DetectAndCompute(image, mask, out key_points, descriptor);
//特征点过少,则排除该图像
if (key_points.Length <= 10) continue;
List<KeyPoint> AA = new List<KeyPoint>(key_points);
key_points_for_all.Add(AA);
descriptor_for_all.Add(descriptor);
List<Vec3b> colors = new List<Vec3b>();
for (int i = 0; i < key_points.Length; ++i)
{
Point2f p = key_points[i].Pt;
colors.Add(image.At<Vec3b>((int)p.Y, (int)p.X));
//colors[i] = image.At<Vec3b>((int)p.Y, (int)p.X);
}
colors_for_all.Add(colors);
}
}
break;
case 1:
{
var ExtractMethod = _ThisSIFT;
for (int it = 0; it != image_names.Count(); ++it)
{
image = Cv2.ImRead(image_names[it]);
if (image.Empty()) continue;
KeyPoint[] key_points;
Mat descriptor = new Mat();
Mat mask = new Mat();
//偶尔出现内存分配失败的错误
ExtractMethod.DetectAndCompute(image, mask, out key_points, descriptor);
//特征点过少,则排除该图像
if (key_points.Length <= 10) continue;
List<KeyPoint> AA = new List<KeyPoint>(key_points);
key_points_for_all.Add(AA);
descriptor_for_all.Add(descriptor);
List<Vec3b> colors = new List<Vec3b>();
for (int i = 0; i < key_points.Length; ++i)
{
Point2f p = key_points[i].Pt;
colors.Add(image.At<Vec3b>((int)p.Y, (int)p.X));
//colors[i] = image.At<Vec3b>((int)p.Y, (int)p.X);
}
colors_for_all.Add(colors);
}
}
break;
case 2:
{
var ExtractMethod = _ThisORB;
for (int it = 0; it != image_names.Count(); ++it)
{
image = Cv2.ImRead(image_names[it]);
if (image.Empty()) continue;
KeyPoint[] key_points;
Mat descriptor = new Mat();
Mat mask = new Mat();
//偶尔出现内存分配失败的错误
ExtractMethod.DetectAndCompute(image, mask, out key_points, descriptor);
//特征点过少,则排除该图像
if (key_points.Length <= 10) continue;
List<KeyPoint> AA = new List<KeyPoint>(key_points);
key_points_for_all.Add(AA);
descriptor_for_all.Add(descriptor);
List<Vec3b> colors = new List<Vec3b>();
for (int i = 0; i < key_points.Length; ++i)
{
Point2f p = key_points[i].Pt;
colors.Add(image.At<Vec3b>((int)p.Y, (int)p.X));
//colors[i] = image.At<Vec3b>((int)p.Y, (int)p.X);
}
colors_for_all.Add(colors);
}
}
break;
}
}
/// <summary>
/// 特征点匹配
/// </summary>
/// <param name="query">测试图像的特征点描述符</param>
/// <param name="train">样本图像的特征点描述符</param>
/// <param name="matches">生成的特征匹配器</param>
private void Match_Features(Mat query, Mat train, List<DMatch> matches)
{
BFMatcher Matcher = new BFMatcher(NormTypes.L2);
int K = 2; //可设置K = 2 ,即对每个匹配返回两个最近邻描述符,仅当第一个匹配与第二个匹配之间的距离足够小时,才认为这是一个匹配
DMatch[][] Knn_Matches = Matcher.KnnMatch(query, train, K);
//获取满足Ratio Test的最小匹配的距离
float min_dist = float.MaxValue;
for (int r = 0; r < Knn_Matches.Length; ++r)
{
//Ratio Test
if (Knn_Matches[r][0].Distance > Ratio * Knn_Matches[r][1].Distance)
continue;
float dist = Knn_Matches[r][0].Distance;
if (dist < min_dist) min_dist = dist;
}
//Lowe推荐ratio的阈值为0.8,但作者对大量任意存在尺度、旋转和亮度变化的两幅图片进行匹配,
//结果表明ratio取值在0. 4~0. 6 之间最佳,小于0. 4的很少有匹配点,大于0. 6的则存在大量错误匹配点,
//所以建议ratio的取值原则如下:
//ratio = 0. 4:对于准确度要求高的匹配;
//ratio = 0. 6:对于匹配点数目要求比较多的匹配;
//ratio = 0. 5:一般情况下。
matches.Clear();
for (int r = 0; r < Knn_Matches.Count(); ++r)
{
//排除不满足Ratio Test的点和匹配距离过大的点
if (Knn_Matches[r][0].Distance > Ratio * Knn_Matches[r][1].Distance || Knn_Matches[r][0].Distance > 5 * Math.Max(min_dist, 10.0f))
continue;
//保存匹配点
matches.Add(Knn_Matches[r][0]);
}
}
/// <summary>
/// 根据特征描述符生成匹配器
/// </summary>
/// <param name="descriptor_for_all">特征描述符</param>
/// <param name="matches_for_all">特征匹配器</param>
private void Match_MutiFeatures(List<Mat> descriptor_for_all, List<List<DMatch>> matches_for_all)
{
matches_for_all.Clear();
// n个图像,两两顺次有 n-1 对匹配
// 1与2匹配,2与3匹配,3与4匹配,以此类推
for (int i = 0; i < descriptor_for_all.Count() - 1; ++i)
{
//cout << "Matching images " << i << " - " << i + 1 << endl;
List<DMatch> matches = new List<DMatch>();
Match_Features(descriptor_for_all[i], descriptor_for_all[i + 1], matches);
matches_for_all.Add(matches);
}
}
//Mat K(Matx33d(2759.48, 0, 1520.69, 0, 2764.16, 1006.81, 0, 0, 1));
/// <summary>
/// 计算旋转平移矩阵
/// </summary>
/// <param name="CameraMatrix">内参矩阵</param>
/// <param name="Points1">第一幅图片的N个二维像素点. 点坐标应该是浮点(单精度或双精度)</param>
/// <param name="Points2">第二幅图片的N个二维像素点,与points1同样大小和类型</param>
/// <param name="RotationMatrix">生成相机位姿的旋转矩阵</param>
/// <param name="MotionsMatrix">生成相机位姿的平移矩阵</param>
/// <param name="mask">输出N个元素的数组,其中每个元素对于异常值设置为0,对其他点设置为1。 该数组仅在RANSAC和LMedS方法中计算</param>
/// <returns></returns>
private bool Find_Transform(Mat CameraMatrix, List<Point2f> Points1, List<Point2f> Points2, Mat RotationMatrix, Mat MotionsMatrix, Mat mask)
{
//根据内参矩阵获取相机的焦距和光心坐标(主点坐标)
double focal_length = 0.5 * (CameraMatrix.At<double>(0, 0) + CameraMatrix.At<double>(1, 1));
Point2d principle_point = new Point2d(CameraMatrix.At<double>(0, 2), CameraMatrix.At<double>(1, 2));
//根据匹配点求取本征矩阵,使用RANSAC,进一步排除失配点
Mat E = Cv2.FindEssentialMat(InputArray.Create<Point2f>(Points1), InputArray.Create<Point2f>(Points2), focal_length, principle_point, EssentialMatMethod.Ransac, 0.999, 1.0, mask);
if (E.Empty())
return false;
double feasible_count = Cv2.CountNonZero(mask);
//对于RANSAC而言,outlier数量大于50%时,结果是不可靠的
if (feasible_count <= 15 || (feasible_count / Points1.Count()) < _Outlier)
return false;
//分解本征矩阵,获取相对变换
int pass_count = Cv2.RecoverPose(E, InputArray.Create<Point2f>(Points1), InputArray.Create<Point2f>(Points2), RotationMatrix, MotionsMatrix, focal_length, principle_point, mask);
string Rotation = Cv2.Format(RotationMatrix);
string Motion = Cv2.Format(MotionsMatrix);
//同时位于两个相机前方的点的数量要足够大
if (((double)pass_count) / feasible_count < 0.7)
return false;
return true;
}
/// <summary>
/// 根据特征匹配器筛选匹配的点
/// </summary>
/// <param name="p1">第一幅图片的特征点</param>
/// <param name="p2">第二幅图片的特征点</param>
/// <param name="matches">图像特征点匹配器</param>
/// <param name="out_p1">输出的第一幅图片的特征点</param>
/// <param name="out_p2">输出的第二幅图片的特征点</param>
private void Get_Matched_Points(List<KeyPoint> p1, List<KeyPoint> p2, List<DMatch> matches, List<Point2f> out_p1, List<Point2f> out_p2)
{
out_p1.Clear();
out_p2.Clear();
for (int i = 0; i < matches.Count(); ++i)
{
out_p1.Add(p1[matches[i].QueryIdx].Pt);
out_p2.Add(p2[matches[i].TrainIdx].Pt);
}
}
/// <summary>
/// 根据特征匹配器筛选匹配点的颜色信息
/// </summary>
/// <param name="c1">第一幅图片特征点的颜色信息</param>
/// <param name="c2">第二幅图片特征点的颜色信息</param>
/// <param name="matches">图像特征点匹配器</param>
/// <param name="out_c1">输出第一幅图片特征点的颜色信息</param>
/// <param name="out_c2">输出第二幅图片特征点的颜色信息</param>
private void Get_Matched_Colors(List<Vec3b> c1, List<Vec3b> c2, List<DMatch> matches, List<Vec3b> out_c1, List<Vec3b> out_c2)
{
out_c1.Clear();
out_c2.Clear();
for (int i = 0; i < matches.Count(); ++i)
{
out_c1.Add(c1[matches[i].QueryIdx]);
out_c2.Add(c2[matches[i].TrainIdx]);
}
}
/// <summary>
/// 重构点云
/// </summary>
/// <param name="CameraMatrix"></param>
/// <param name="R1">图1的旋转矩阵</param>
/// <param name="T1">图1的平移矩阵</param>
/// <param name="R2">图2的旋转矩阵</param>
/// <param name="T2">图2的平移矩阵</param>
/// <param name="Points1">图1的特征点</param>
/// <param name="Points2">图2的特征点</param>
/// <param name="Structure3D">重建的三维点</param>
private void ReconstructRL(Mat CameraMatrix, Mat R1, Mat T1, Mat R2, Mat T2, List<Point2f> Points1, List<Point2f> Points2, List<Point3f> Structure3D)
{
//两个相机的投影矩阵[R T],triangulatePoints只支持float型
Mat proj1 = new Mat(3, 4, MatType.CV_32FC1);
Mat proj2 = new Mat(3, 4, MatType.CV_32FC1);
//R和T放在proj1的3x4矩阵内,行列数要对应上,之前结果算错都是在这里,没加上ColRange(0, 3)
// R1和T1转换为投影矩阵proj1,
R1.ConvertTo(proj1.RowRange(0, 3).ColRange(0, 3), MatType.CV_32FC1);
T1.ConvertTo(proj1.Col(3), MatType.CV_32FC1);
// R2和T2转换为投影矩阵proj2
R2.ConvertTo(proj2.RowRange(0, 3).ColRange(0, 3), MatType.CV_32FC1);
T2.ConvertTo(proj2.Col(3), MatType.CV_32FC1);
string R1r = Cv2.Format(R1, FormatType.NumPy);
string R2r = Cv2.Format(R2, FormatType.NumPy);
/*array([[0.9939621570950784, 0.008017628351170689, -0.1094301050832036],
[-0.01100758227918648, 0.999581647838761, -0.0267462602304377],
[0.1091698831879259, 0.0277893313983945, 0.9936345855822273]], dtype='float64')*/
string T1r = Cv2.Format(T1, FormatType.NumPy);
string T2r = Cv2.Format(T2, FormatType.NumPy);
/*array([0.9618138882286356,
-0.03706734160908502,
-0.2711826996630669], dtype='float64')*/
Mat fK = new Mat();
CameraMatrix.ConvertTo(fK, MatType.CV_32FC1);
string CameraMat = Cv2.Format(CameraMatrix, FormatType.NumPy);
string fKMat = Cv2.Format(fK, FormatType.NumPy);
/*array([[2759.48, 0, 1520.6899],
[0, 2764.1599, 1006.81],
[0, 0, 0]], dtype='float32')*/
proj1 = fK * proj1;
proj2 = fK * proj2;
//三角重建
Mat Points4D = new Mat();
Cv2.TriangulatePoints(proj1, proj2, InputArray.Create<Point2f>(Points1), InputArray.Create<Point2f>(Points2), Points4D);
string P4d = Cv2.Format(Points4D, FormatType.NumPy);
string proj11 = Cv2.Format(proj1, FormatType.NumPy);
/*array([[2759.48, 0, 1520.6899, 0],
[0, 2764.1599, 1006.81, 0],
[0, 0, 0, 0]], dtype='float32')*/
string proj22 = Cv2.Format(proj2, FormatType.NumPy);
/*array([[2908.832, 64.383438, 1209.0399, 2241.7214],
[79.486618, 2790.9819, 926.47034, -375.4895],
[0, 0, 0, 0]], dtype='float32')*/
Structure3D.Clear();
//structure = new List<Point3f>(Points4D.Cols);//容器预留空间
//string P4d = Cv2.Format(Points4D.Col(0), FormatType.CSV);
//float[] PX = Array.ConvertAll<string, float>(Cv2.Format(Points4D.Row(0), FormatType.CSV).Split(','), delegate (string s) { return float.Parse(s); });
//float[] PY = Array.ConvertAll<string, float>(Cv2.Format(Points4D.Row(1), FormatType.CSV).Split(','), delegate (string s) { return float.Parse(s); });
//float[] PZ = Array.ConvertAll<string, float>(Cv2.Format(Points4D.Row(2), FormatType.CSV).Split(','), delegate (string s) { return float.Parse(s); });
//float[] PN = Array.ConvertAll<string, float>(Cv2.Format(Points4D.Row(3), FormatType.CSV).Split(','), delegate (string s) { return float.Parse(s); });
for (int i = 0; i < Points4D.Cols; i++)
{
//齐次坐标,需要除以最后一个元素才是真正的坐标值
Mat PointCol = Points4D.Col(i);
Mat RealX = PointCol.Row(0) / PointCol.Row(3);
Mat RealY = PointCol.Row(1) / PointCol.Row(3);
Mat RealZ = PointCol.Row(2) / PointCol.Row(3);
Structure3D.Add(new Point3f(RealX.At<float>(0), RealY.At<float>(0), RealZ.At<float>(0)));
}
//for (int i = 0; i < Points4D.cols; ++i)
//{
// Mat_<float> col = Points4D.col(i);
// col /= col(3); //齐次坐标,需要除以最后一个元素才是真正的坐标值
// structure.push_back(Point3f(col(0), col(1), col(2)));
//}
}
/// <summary>
/// 筛选特征点,排除异常值
/// </summary>
/// <param name="ListPoint2f">输入的特征点</param>
/// <param name="FeaturesMask">特征点的标志</param>
/// <returns></returns>
private List<Point2f> Maskout_Points(List<Point2f> ListPoint2f, Mat FeaturesMask)
{
List<Point2f> ListPoint2f_copy = ListPoint2f;
ListPoint2f = new List<Point2f>();
for (int i = 0; i < FeaturesMask.Rows; ++i)
{
if (FeaturesMask.At<Byte>(i) > 0)
ListPoint2f.Add(ListPoint2f_copy[i]);
}
return ListPoint2f;
}
/// <summary>
/// 筛选特征点颜色信息,排除异常值
/// </summary>
/// <param name="ListVec3b">输入的特征点颜色信息</param>
/// <param name="FeaturesMask">特征点的标志</param>
/// <returns></returns>
private List<Vec3b> Maskout_Colors(List<Vec3b> ListVec3b, Mat FeaturesMask)
{
List<Vec3b> ListVec3b_copy = ListVec3b;
ListVec3b = new List<Vec3b>();
for (int i = 0; i < FeaturesMask.Rows; ++i)
{
if (FeaturesMask.At<Byte>(i) > 0)
ListVec3b.Add(ListVec3b_copy[i]);
}
return ListVec3b;
}
/// <summary>
/// 剔除特征点
/// </summary>
/// <param name="Matches">匹配器</param>
/// <param name="Struct_Indices">结构矩阵索引</param>
/// <param name="Structure3D">结构矩阵</param>
/// <param name="Key_Points">关键点</param>
/// <param name="Object_Points">对象点</param>
/// <param name="Image_Points">图像特征点</param>
private void Get_ObjPoints_And_ImgPoints(List<DMatch> Matches, List<int> Struct_Indices, List<Point3f> Structure3D, List<KeyPoint> Key_Points, List<Point3f> Object_Points, List<Point2f> Image_Points)
{
Object_Points.Clear();
Image_Points.Clear();
for (int i = 0; i < Matches.Count(); ++i)
{
int query_idx = Matches[i].QueryIdx;
int train_idx = Matches[i].TrainIdx;
int struct_idx = Struct_Indices[query_idx];
if (struct_idx < 0) continue;
Object_Points.Add(Structure3D[struct_idx]);
Image_Points.Add(Key_Points[train_idx].Pt);
}
}
/// <summary>
/// 融合重建
/// </summary>
/// <param name="Matches">匹配器</param>
/// <param name="Struct_Indices">结构矩阵索引</param>
/// <param name="Next_Struct_Indices">下一个结构矩阵索引</param>
/// <param name="Structure3D">结构矩阵</param>
/// <param name="Next_Structure3D">下一个结构矩阵</param>
/// <param name="Colors">初始颜色集合</param>
/// <param name="Next_Colors">下一个颜色集合</param>
private void Fusion_Structure3D(List<DMatch> Matches, List<int> Struct_Indices, List<int> Next_Struct_Indices, List<Point3f> Structure3D, List<Point3f> Next_Structure3D, List<Vec3b> Colors, List<Vec3b> Next_Colors)
{
for (int i = 0; i < Matches.Count(); ++i)
{
int query_idx = Matches[i].QueryIdx;
int train_idx = Matches[i].TrainIdx;
int struct_idx = Struct_Indices[query_idx];
if (struct_idx >= 0) //若该点在空间中已经存在,则这对匹配点对应的空间点应该是同一个,索引要相同
{
Next_Struct_Indices[train_idx] = struct_idx;
continue;
}
//若该点在空间中已经存在,将该点加入到结构中,且这对匹配点的空间点索引都为新加入的点的索引
Structure3D.Add(Next_Structure3D[i]);
Colors.Add(Next_Colors[i]);
Struct_Indices[query_idx] = Next_Struct_Indices[train_idx] = Structure3D.Count() - 1;
}
}
/// <summary>
/// 初始化重建
/// </summary>
/// <param name="CameraMatrix">相机内参</param>
/// <param name="Key_Points_For_All">所有特征点</param>
/// <param name="Colors_For_All">所有颜色信息</param>
/// <param name="Matches_For_All">所有图像间的匹配器</param>
/// <param name="Structure3D">生成的结构矩阵</param>
/// <param name="Correspond_Struct_Index"></param>
/// <param name="Colors"></param>
/// <param name="Rotations"></param>
/// <param name="Motions"></param>
private bool Init_Structure(Mat CameraMatrix, List<List<KeyPoint>> Key_Points_For_All, List<List<Vec3b>> Colors_For_All, List<List<DMatch>> Matches_For_All, List<Point3f> Structure3D, List<List<int>> Correspond_Struct_Index, List<Vec3b> Colors, List<Mat> Rotations, List<Mat> Motions)
{
//计算头两幅图像之间的变换矩阵
List<Point2f> p1 = new List<Point2f>();
List<Point2f> p2 = new List<Point2f>();
List<Vec3b> c2 = new List<Vec3b>();
Mat R = new Mat();
Mat T = new Mat();//旋转矩阵和平移向量
Mat mask = new Mat(); //mask中大于零的点代表匹配点,等于零代表失配点
Get_Matched_Points(Key_Points_For_All[0], Key_Points_For_All[1], Matches_For_All[0], p1, p2);
Get_Matched_Colors(Colors_For_All[0], Colors_For_All[1], Matches_For_All[0], Colors, c2);
if (Find_Transform(CameraMatrix, p1, p2, R, T, mask))
{
//对头两幅图像进行三维重建
p1 = Maskout_Points(p1, mask);
p2 = Maskout_Points(p2, mask);
Colors = Maskout_Colors(Colors, mask);
Mat R0 = Mat.Eye(3, 3, MatType.CV_64FC1);
Mat T0 = Mat.Zeros(3, 1, MatType.CV_64FC1);
ReconstructRL(CameraMatrix, R0, T0, R, T, p1, p2, Structure3D);
//保存变换矩阵
//rotations = { R0, R };
//motions = { T0, T0 };
Rotations.Add(R0);
Rotations.Add(R);
Motions.Add(T0);
Motions.Add(T);
//将correspond_struct_idx的大小初始化为与key_points_for_all完全一致
Correspond_Struct_Index.Clear();
for (int i = 0; i < Key_Points_For_All.Count(); i++)
{
List<int> Temp = new List<int>();
Correspond_Struct_Index.Add(Temp);
for (int j = 0; j < Key_Points_For_All[i].Count(); j++)
{
Correspond_Struct_Index[i].Add(-1);
}
}
//填写头两幅图像的结构索引
int idx = 0;
List<DMatch> matches = Matches_For_All[0];
for (int i = 0; i < matches.Count(); ++i)
{
if (mask.At<Byte>(i) == 0)
continue;
Correspond_Struct_Index[0][matches[i].QueryIdx] = idx;
Correspond_Struct_Index[1][matches[i].TrainIdx] = idx;
++idx;
}
return true;
}
else
{
System.Windows.Forms.MessageBox.Show("匹配点数量不足,初始化失败!");
return false;
}
}
private void Get_File_Names(string Dir_Name, List<string> Img_Names)
{
Img_Names.Clear();
System.IO.DirectoryInfo root = new System.IO.DirectoryInfo(Dir_Name);
foreach (System.IO.FileInfo f in root.GetFiles())
{
string fullName = f.FullName;
Img_Names.Add(fullName);
}
}
/// <summary>
/// 保存数据,使用FileStorage未设置成功
/// </summary>
/// <param name="File_Name">保存路径和文件名</param>
/// <param name="Rotations">旋转矩阵</param>
/// <param name="Motions">平移矩阵</param>
/// <param name="Structure3D">三维点集</param>
/// <param name="Colors">RGB信息</param>
private void Save_Structure3D(string File_Name, List<Mat> Rotations, List<Mat> Motions, List<Point3f> Structure3D, List<Vec3b> Colors)
{
int n = Rotations.Count();
FileStorage File = new FileStorage(File_Name, FileStorage.Modes.Write);
File.Write(File_Name, "Camera Count:" + n);
File.Write(File_Name, "Point Count:" + Structure3D.Count());
File.Write(File_Name, "Rotations" + "[");
for (int i = 0; i < n; ++i)
{
File.Write(File_Name, Rotations[i]);
}
File.Write(File_Name, "[");
File.Write(File_Name, "Motions" + "[");
for (int i = 0; i < n; ++i)
{
File.Write(File_Name, Motions[i]);
}
File.Write(File_Name, "[");
File.Write(File_Name, "Points" + "[");
for (int i = 0; i < Structure3D.Count(); ++i)
{
File.Write(File_Name, Structure3D[i].X + "," + Structure3D[i].Y + "," + Structure3D[i].Z);
}
File.Write(File_Name, "[");
File.Write(File_Name, "Colors" + "[");
for (int i = 0; i < Colors.Count(); ++i)
{
File.Write(File_Name, Colors[i].Item0 + "," + Colors[i].Item1 + "," + Colors[i].Item2);
}
File.Write(File_Name, "[");
File.Release();
}
private void Save_PointsAndRGB(string File_Name, List<Point3f> Structure3D, List<Vec3b> Colors)
{
//定义写文件流
System.IO.FileStream FileSave = new System.IO.FileStream(File_Name, System.IO.FileMode.OpenOrCreate);
for (int i = 0; i < Structure3D.Count; ++i)
{
string PointData = Structure3D[i].X.ToString() + " " + Structure3D[i].Y.ToString() + " " + Structure3D[i].Z.ToString() + " "
+ Colors[i].Item0.ToString() + " " + Colors[i].Item1.ToString() + " " + Colors[i].Item2.ToString() + "\r";
FileSave.Write(Encoding.UTF8.GetBytes(PointData), 0, Encoding.UTF8.GetBytes(PointData).Length);
}
//关闭文件流
FileSave.Close();
}
#endregion
/// <summary>
/// 运行
/// </summary>
/// <param name="Images_Name">图像路径和文件名</param>
/// <param name="CameraParamMatrix">本征矩阵</param>
/// <param name="Save_Name">保存路径和文件名</param>
public List<Point3f> RunMain(List<string> Images_Name, Mat CameraParamMatrix, string Save_Name)
{
List<Point3f> structure = new List<Point3f>();
try
{
List<List<KeyPoint>> key_points_for_all = new List<List<KeyPoint>>();
List<Mat> descriptor_for_all = new List<Mat>();
List<List<Vec3b>> colors_for_all = new List<List<Vec3b>>();
List<List<DMatch>> matches_for_all = new List<List<DMatch>>();
//提取所有图像的特征
Extract_Features(Images_Name, key_points_for_all, descriptor_for_all, colors_for_all);
//对所有图像进行顺次的特征匹配
Match_MutiFeatures(descriptor_for_all, matches_for_all);
List<List<int>> correspond_struct_idx = new List<List<int>>(); //保存第i副图像中第j个特征点对应的structure中点的索引
List<Vec3b> colors = new List<Vec3b>();
List<Mat> rotations = new List<Mat>();
List<Mat> motions = new List<Mat>();
//初始化结构(三维点云)
if (Init_Structure(CameraParamMatrix, key_points_for_all, colors_for_all, matches_for_all, structure, correspond_struct_idx, colors, rotations, motions))
{
//增量方式重建剩余的图像
for (int i = 1; i < matches_for_all.Count(); ++i)
{
List<Point3f> object_points = new List<Point3f>();
List<Point2f> image_points = new List<Point2f>();
Mat r = new Mat();
Mat R = new Mat();
Mat T = new Mat();
//获取第i幅图像中匹配点对应的三维点,以及在第i+1幅图像中对应的像素点
Get_ObjPoints_And_ImgPoints(matches_for_all[i], correspond_struct_idx[i], structure, key_points_for_all[i + 1], object_points, image_points);
//求解变换矩阵
//*参数:
//* objectpoints 参考点在世界坐标系下的点集;float or double
//* image_points 参考点在相机像平面的坐标;float or double
//* cameraMatrix 相机内参
//* distCoeffs 相机畸变系数 distCoeffs = cv::Mat::zeros(4, 1, CV_64FC1);
//* rvec 旋转矩阵
//* tvec 平移向量
//* useExtrinsicGuess 若果求解PnP使用迭代算法,初始值可以使用猜测的初始值(true),也可以使用解析求解的结果作为初始值(false)。
//* iterationsCount Ransac算法的迭代次数,这只是初始值,根据估计外点的概率,可以进一步缩小迭代次数;(此值函数内部是会不断改变的),所以一开始可以赋一个大的值。
//* reprojectionErrr Ransac筛选内点和外点的距离阈值,这个根据估计内点的概率和每个点的均方差(假设误差按照高斯分布)可以计算出此阈值。
//* confidence 此值与计算采样(迭代)次数有关。此值代表从n个样本中取s个点,N次采样可以使s个点全为内点的概率。
//* _inliers 返回内点的序列。为矩阵形式
//* flags 最小子集的计算模型:
//* SOLVEPNP_ITERATIVE(此方案,最小模型用的EPNP,内点选出之后用了一个迭代);
//* SOLVE_P3P(P3P只用在最小模型上,内点选出之后用了一个EPNP)
//* SOLVE_AP3P(AP3P只用在最小模型上,内点选出之后用了一个EPNP)
//* SOLVE_EPnP(最小模型上 & 内点选出之后都采用了EPNP)
Mat KP = new Mat();
Cv2.SolvePnPRansac(InputArray.Create<Point3f>(object_points), InputArray.Create<Point2f>(image_points), CameraParamMatrix, KP, r, T);
string 畸变系数 = Cv2.Format(KP,FormatType.MATLAB);
//将旋转向量转换为旋转矩阵
Cv2.Rodrigues(r, R);
//保存变换矩阵
rotations.Add(R);
motions.Add(T);
List<Point2f> p1 = new List<Point2f>(), p2 = new List<Point2f>();
List<Vec3b> c1 = new List<Vec3b>(), c2 = new List<Vec3b>();
Get_Matched_Points(key_points_for_all[i], key_points_for_all[i + 1], matches_for_all[i], p1, p2);
Get_Matched_Colors(colors_for_all[i], colors_for_all[i + 1], matches_for_all[i], c1, c2);
//根据之前求得的R,T进行三维重建
List<Point3f> next_structure = new List<Point3f>();
ReconstructRL(CameraParamMatrix, rotations[i], motions[i], R, T, p1, p2, next_structure);
//将新的重建结果与之前的融合
Fusion_Structure3D(matches_for_all[i], correspond_struct_idx[i], correspond_struct_idx[i + 1], structure, next_structure, colors, c1);
}
//保存
Save_PointsAndRGB(Save_Name, structure, colors);
return structure;
//save_structure(".\\Points.yml", rotations, motions, structure, colors);
}
else
{
System.Windows.Forms.MessageBox.Show("初始化重构失败!");
return null;
}
}
catch (Exception Err)
{
System.Windows.Forms.MessageBox.Show(Err.Message);
return null;
}
}
private void MatchAlgorithm()
{
//SURF特征。 SURF全称为“加速稳健特征”(Speeded Up Robust Feature),我们将会看到,它们不仅是尺度不变特征,而且是具有较高计算效率的特征。
//SURF算法是SIFT算法的加速版, 而SIFT(尺度不变特征转换, ScaleInvariant Feature Transform) 是另一种著名的尺度不变特征检测法。
//我们知道,SURF相对于SIFT而言,特征点检测的速度有着极大的提升,所以在一些实时视频流物体匹配上有着很强的应用。
//而SIFT因为其巨大的特征计算量而使得特征点提取的过程异常花费时间,所以在一些注重速度的场合难有应用场景。
//但是SIFT相对于SURF的优点就是,由于SIFT基于浮点内核计算特征点,
//因此通常认为, SIFT算法检测的特征在空间和尺度上定位更加精确,所以在要求匹配极度精准且不考虑匹配速度的场合可以考虑使用SIFT算法。
//ORB是ORiented Brief的简称,是brief算法的改进版。ORB算法比SIFT算法快100倍,比SURF算法快10倍。在计算机视觉领域有种说法,ORB算法的综合性能在各种测评里较其他特征提取算法是最好的。
//ORB算法是brief算法的改进,那么我们先说一下brief算法有什么去缺点。
//BRIEF的优点在于其速度,其缺点是:
// 不具备旋转不变性
// 对噪声敏感
// 不具备尺度不变性
}
}
}
导入图像后,修改焦距和主点坐标就可以开始重建了
里面可以导入整个文件夹的图像,也可以单独打开一张图像,在右侧图像列表中双击图像文件名即可预览,右键菜单可以删除或者清空整个列表。
在相机参数读取那里,它的读取的目标是左侧“工具”中的标定助手的保存的结果。
想要这个软件源码的可以在这里下载软件源码。