即之前关于点云三个视角进行截图的讲解,这个通过一个小小的程序来进行实现。采用的pcl来进行点云的读取并且进行相关处理,往某个轴向进行投影,这样处理后,某个轴的坐标会全有0,然后将处理完成,使用opencv来进行成图。也就是将上一步骤处理好的点云数据如何在一个二维画布matrix矩阵之中进行映射的问题。这里有小伙伴可能会说,这也不是相机成像原理啊,这只是特殊化处理,并不是任意视角的下的成图,确实如此。但是通过这样最简单的方式不要消耗太多时间来验证pcl+opencv这样的方式是否行得通。而且后续,通过翻阅opencv的接口文档,找到了其实这个相机成图过程,opencv已经实现,并且有接口可以直接调用,后边我们也会把接口列出来。直接上代码。感谢小伙伴,耐心阅读。
#include <iostream>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/ModelCoefficients.h>
#include <pcl/filters/project_inliers.h>
#include <pcl/range_image/range_image.h> // //关于深度图像的头文件
#include <pcl/visualization/pcl_visualizer.h> // //PCL可视化的头文件
#include <pcl/visualization/range_image_visualizer.h> // //深度图可视化的头文件
#include <pcl/io/png_io.h>
#include <pcl/visualization/common/float_image_utils.h>
#include<opencv2/opencv.hpp>
#include<vector>
#include <cstdlib>
#include <cstdlib>
#include <liblas/liblas.hpp>
#include <pcl/io/io.h>
#include "ImageAxesAdd.h"
using namespace cv;
using namespace std;
void ShowPngImage(Mat& mat)
{
//创建待Alpha通道的Mat
/*Mat mat(480, 640, CV_8UC4);
createAlphaMat(mat);*/
//Mat mat(500, 500, CV_8UC4);
//createAlphaMatByPointCloud();
vector<int>compression_params;
compression_params.push_back(IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
try {
imwrite("透明Alpha值图.png", mat, compression_params);
imshow("生成的PNG图", mat);
fprintf(stdout, "PNG图片文件的alpha数据保存完成\n");
// 添加坐标
DrawAxes drawAxes;
cv::Mat ImageAddAxes = cv::Mat(mat.rows + 100, mat.cols + 70, CV_8UC3, cv::Scalar(255, 255, 255));
drawAxes.InputFigure(mat, ImageAddAxes);
std::string ylabel = "y";
std::string xlabel = "x";
std::string title_name = "Calibration";
drawAxes.DrawLabel_Y(ylabel, 0, 100, 10, cv::Scalar(0, 0, 0));
drawAxes.DrawLabel_X(xlabel, 0, 100, 10, cv::Scalar(0, 0, 0));
drawAxes.DrawTitle(title_name);
cv::imshow("add axes", ImageAddAxes);
waitKey(0);
}
catch (runtime_error& ex) {
fprintf(stderr, "图像转换成PNG格式发生错误:%s\n", ex.what());
return;
}
}
void createAlphaMatByPointCloud(Mat& mat, pcl::PointCloud<pcl::PointXYZ>& pointcloud)
{
std::vector<float> x_coord_list;
std::vector<float> y_coord_list;
std::cerr << "Cloud after projection: " << std::endl;
for (size_t i = 0; i < pointcloud.size(); ++i)
{
x_coord_list.push_back(pointcloud.points[i].z);
y_coord_list.push_back(pointcloud.points[i].y);
}
sort(x_coord_list.begin(), x_coord_list.end());//从小到大
sort(y_coord_list.begin(), y_coord_list.end());//从小到大
auto max_x = x_coord_list[x_coord_list.size()-1];
auto min_x = x_coord_list[0];
auto abs_x = max_x - min_x;
auto max_y = y_coord_list[y_coord_list.size() - 1];
auto min_y = y_coord_list[0];
auto abs_y = max_y - min_y;
std::cerr << "max_x:" << max_x << " "
<<"min_x:"<< min_x << " "
<< "max_y:" << max_y << " "
<< "min_y:" << min_y <<" "
<< "abs_x:" << abs_x << " "
<< "abs_y:" << abs_y << " "
<< std::endl;
/*std::cerr << " " << pointcloud.points[i].x << " "
<< pointcloud.points[i].y << " "
<< pointcloud.points[i].z << std::endl;*/
auto step_x = abs_x / 500;
auto step_y = abs_y / 500;
//Mat mat(500, 500, CV_8UC4);
//不能用数值比较,应该有点云映射到矩阵里边去
/*
for (float i = 0; i < 500; i++)
{
auto x = min_x + step_x * i;
for (float j = 0;j<500;j++)
{
auto y = min_y + step_y * j;
if (count(x_coord_list.begin(), x_coord_list.end(), x)&& count(y_coord_list.begin(), y_coord_list.end(), y))
{
//点写入矩阵
Vec4b& rgba = mat.at<Vec4b>(i, j);
rgba[0] = UCHAR_MAX;
rgba[1] = saturate_cast<uchar>((float(mat.cols - j)) / ((float)mat.cols) * UCHAR_MAX);
rgba[2] = saturate_cast<uchar>((float(mat.rows - i)) / ((float)mat.cols) * UCHAR_MAX);
rgba[3] = saturate_cast<uchar>(0.5 * (rgba[1] + rgba[2]));
}
else
{
//赋值黑色
Vec4b& rgba = mat.at<Vec4b>(i, j);
rgba[0] = 0;
rgba[1] = 0;
rgba[2] = 0;
rgba[3] = saturate_cast<uchar>(0.5 * (rgba[1] + rgba[2]));
}
}
}
*/
for (size_t i = 0; i < pointcloud.size(); ++i)
{
auto abs_x_value =abs(pointcloud.points[i].z - min_x) ;
auto abx_y_value = abs(pointcloud.points[i].y - min_y);
auto row = floor(abs_x_value / step_x);
auto column = floor( abx_y_value / step_y);
Vec4b& rgba = mat.at<Vec4b>(row, column);
rgba[0] = UCHAR_MAX;
rgba[1] = saturate_cast<uchar>((float(mat.cols - column)) / ((float)mat.cols) * UCHAR_MAX);
rgba[2] = saturate_cast<uchar>((float(mat.rows - row)) / ((float)mat.cols) * UCHAR_MAX);
rgba[3] = saturate_cast<uchar>(0.5 * (rgba[1] + rgba[2]));
}
ShowPngImage(mat);
}
/// <summary>
/// 点云投影
/// </summary>
void ProjectionPointCloud(pcl::PointCloud<pcl::PointXYZ> cloudcloud )
{
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud = cloudcloud.makeShared();
pcl::PointCloud<pcl::PointXYZ>& cloud_projected = *cloud;
// Fill in the cloud data
//cloud->width = 5;
//cloud->height = 1;
//cloud->points.resize(cloud->width * cloud->height);
//for (size_t i = 0; i < cloud->points.size(); ++i)
//{
// cloud->points[i].x = 1024 * rand() / (RAND_MAX + 1.0f);
// cloud->points[i].y = 1024 * rand() / (RAND_MAX + 1.0f);
// cloud->points[i].z = 1024 * rand() / (RAND_MAX + 1.0f);
//}
//pcl::io::loadPCDFile("C:\\Users\\Administrator\\Desktop\\vs2019 pcl\\testpcl\\testpcl\\x64\\Release\\bunny.pcd", *cloud);
std::cerr << "Cloud before projection: " << std::endl;
/*for (size_t i = 0; i < 50; ++i)
std::cerr << " " << cloud->points[i].x << " "
<< cloud->points[i].y << " "
<< cloud->points[i].z << std::endl;*/
// Create a set of planar coefficients with X=Y=0,Z=1
// 填充ModelCoefficients的值,使用ax+by+cz+d=0平面模型,其中 a=b=d=0,c=1 也就是X——Y平面
//定义模型系数对象,并填充对应的数据
pcl::ModelCoefficients::Ptr coefficients(new pcl::ModelCoefficients());
coefficients->values.resize(4);
coefficients->values[0] = 1; //x为0 a
coefficients->values[1] = 0; //y为0 b
coefficients->values[2] = 0; // c
coefficients->values[3] = 0; // d
// Create the filtering object
pcl::ProjectInliers<pcl::PointXYZ> proj;
proj.setModelType(pcl::SACMODEL_PLANE);
proj.setInputCloud(cloud);
proj.setModelCoefficients(coefficients);
proj.filter(cloud_projected);
/*std::cerr << "Cloud after projection: " << std::endl;
for (size_t i = 0; i < 50; ++i)
std::cerr << " " << cloud_projected.points[i].x << " "
<< cloud_projected.points[i].y << " "
<< cloud_projected.points[i].z << std::endl;*/
// --------------------------------------------
// -----Open 3D viewer and add point cloud-----
// --------------------------------------------
pcl::visualization::PCLVisualizer viewer("3D Viewer");
viewer.setBackgroundColor(1, 1, 1);
pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> org_color_handler(cloud, 255, 200, 0);
viewer.addPointCloud(cloud, org_color_handler, "original pointcloud");
viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 4, "orginal pointcloud");
/*pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> org_image_color_handler(cloud_projected.makeShared(), 255, 100, 0);
viewer.addPointCloud(cloud_projected.makeShared(), org_image_color_handler, "orginal image");
viewer.setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 2, "orginal image");*/
viewer.initCameraParameters();
viewer.addCoordinateSystem(1.0);
Mat mat(500, 500, CV_8UC4, Scalar(255, 0, 0));
createAlphaMatByPointCloud(mat,cloud_projected);
while (!viewer.wasStopped())
{
viewer.spinOnce();
pcl_sleep(0.01);
}
}
void createAlphaMat(Mat& mat)
{
for (int i = 0; i < mat.rows; ++i) {
for (int j = 0; j < mat.cols; ++j) {
Vec4b& rgba = mat.at<Vec4b>(i, j);
rgba[0] = UCHAR_MAX;
rgba[1] = saturate_cast<uchar>((float(mat.cols - j)) / ((float)mat.cols) * UCHAR_MAX);
rgba[2] = saturate_cast<uchar>((float(mat.rows - i)) / ((float)mat.cols) * UCHAR_MAX);
rgba[3] = saturate_cast<uchar>(0.5 * (rgba[1] + rgba[2]));
}
}
}
pcl::PointCloud<pcl::PointXYZ> ReadLas(std::string filepath)
{
std::ifstream ifs(filepath, std::ios::in | std::ios::binary); // 打开las文件
liblas::ReaderFactory f;
liblas::Reader reader = f.CreateWithStream(ifs); // 读取las文件
unsigned long int nbPoints = reader.GetHeader().GetPointRecordsCount();//获取las数据点的个数
pcl::PointCloud<pcl::PointXYZ> cloud;
cloud.width = nbPoints; //保证与las数据点的个数一致
cloud.height = 1;
cloud.is_dense = false;
cloud.points.resize(cloud.width * cloud.height);
int i = 0;
uint16_t r1, g1, b1;
int r2, g2, b2;
uint32_t rgb;
while (reader.ReadNextPoint())
{
// 获取las数据的x,y,z信息
cloud.points[i].x = (reader.GetPoint().GetX());
cloud.points[i].y = (reader.GetPoint().GetY());
cloud.points[i].z = (reader.GetPoint().GetZ());
//获取las数据的r,g,b信息
r1 = (reader.GetPoint().GetColor().GetRed());
g1 = (reader.GetPoint().GetColor().GetGreen());
b1 = (reader.GetPoint().GetColor().GetBlue());
r2 = ceil(((float)r1 / 65536) * (float)256);
g2 = ceil(((float)g1 / 65536) * (float)256);
b2 = ceil(((float)b1 / 65536) * (float)256);
rgb = ((int)r2) << 16 | ((int)g2) << 8 | ((int)b2);
//cloud.points[i].rgb = *reinterpret_cast<float*>(&rgb);
i++;
}
//pcl::io::savePCDFileASCII("C:\\Users\\Administrator\\Desktop\\vs2019 pcl\\TestOpencv\\x64\\Release\\outputdata\\pointcloud.pcd", cloud);//存储为pcd类型文件
return cloud;
}
///投影xyz轴点云 opecv显示图片
int main(int argc, char** argv)
{
std::string filepath = "C:\\Users\\Administrator\\Desktop\\vs2019 pcl\\TestOpencv\\x64\\Release\\outputdata\\pointcloud.las";
pcl::PointCloud<pcl::PointXYZ> originalPointCloud = ReadLas(filepath);
ProjectionPointCloud(originalPointCloud);
//
//ShowPngImage();
//getchar();
return (0);
}
逻辑讲解:
1. 使用liblas读取点云数据
2. 往某个轴投影点云
3. 投影到一个平面的点云映射到图片矩阵中去
4. 显示图片
5. 显示图片坐标轴,网上找到的源码
(29条消息) Opencv给Mat加坐标轴_绿竹巷人的博客-CSDN博客
这里就不贴出来了。
效果如下:
关于opencv三维点转二维,直接上代码
opencv中3D点根据相机参数投影成2D点,直接上代码:
输入:3D坐标+旋转,平移矩阵(所谓的相机姿态)+相机内参(包括畸变矩阵)
输出:2D坐标
(1.投影函数:根据相机参数(包括畸变矩阵),把3D点投影成2D点
2.搞清楚R和t的具体含义。
R的第i行 表示摄像机坐标系中的第i个坐标轴方向的单位向量在世界坐标系里的坐标;
R的第i列 表示世界坐标系中的第i个坐标轴方向的单位向量在摄像机坐标系里的坐标;
t 表示世界坐标系的原点在摄像机坐标系的坐标;
-R的转置 * t 表示摄像机坐标系的原点在世界坐标系的坐标。)
原谅我这个接口还没有动手去自己实践下,下次试试这个接口。
c# 实现得透视投影可以看这里,
https://blog.csdn.net/cangqiongxiaoye/article/details/120915372