实验室同学一起劳动的成果,后期进行理论说明
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
//#include <opencv2/nonfree/nonfree.hpp>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/filters/radius_outlier_removal.h>
#include <pcl/filters/conditional_removal.h>
#include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/ModelCoefficients.h>
#include <pcl/point_types.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/segmentation/sac_segmentation.h>
#include <PeakFinder.h>
#include <math.h> /* sin */
#include "pcl_ros/point_cloud.h"
using namespace std;
using namespace cv;
typedef pcl::PointXYZRGB PointT;
typedef pcl::PointCloud<PointT> PointCloud;
pcl::visualization::CloudViewer viewer ("Simple Cloud Viewer");
#define fx 1204.696659
#define fy 1204.299368
#define cx 966.428218
#define cy 559.772423
#define image_width 1920
#define image_height 1080
#define theta 0.00 //in radians
#define baseline 0.1000
int main(int argc, char** argv)
{
cv::Mat black;
cv::Mat frame;
cv::Mat temp;
cv::Mat diff;
cv::Mat imageGrayblack;
cv::Mat imageGray;
cv::Mat imgundistortedblack;
cv::Mat imgundistorted;
PointCloud::Ptr cloud ( new PointCloud );
vector<Point3f> point3d;
double scale=100000.0;
float col[image_width/2]={0.0};
soundtouch::PeakFinder pf;
double pp=0.0; //accurate peak postion
cv::Matx33f K (fx,0,cx,
0,fy,cy,
0,0,1); //calibration matrix
vector<float> distCoeffs = {-0.400622, 0.134591, 0.001135, -0.001642, 0.000000};//c++ 11 in CMakeLists.txt!!!
cv::Size patternsize(11,8); //interior number of corners
vector<Point2f> corners; //this will be filled by the detected corners
bool patternfound=false;
int num=270;//0.25*1080
vector<double> vpp(image_height,0.0);
for(int kk=0;kk<3;kk++)
{
if(kk==0)
{
frame=cv::imread("/home/kylefan/Pictures/Webcam/2017-03-10-140619.jpg");
black=cv::imread("/home/kylefan/Pictures/Webcam/2017-03-10-140626.jpg");
}
if(kk==1)
{
frame=cv::imread("/home/kylefan/Pictures/Webcam/2017-03-10-141006.jpg");
black=cv::imread("/home/kylefan/Pictures/Webcam/2017-03-10-141014.jpg");
}
if(kk==2)
{
frame=cv::imread("/home/kylefan/Pictures/Webcam/2017-03-10-140913.jpg");
black=cv::imread("/home/kylefan/Pictures/Webcam/2017-03-10-140921.jpg");
}
cvtColor(frame, imageGray, CV_BGR2GRAY);
cvtColor(black, imageGrayblack, CV_BGR2GRAY);
cv::undistort(imageGray,imgundistorted,K, distCoeffs, K );
cv::undistort(imageGrayblack,imgundistortedblack,K, distCoeffs, K );
cv::absdiff(imgundistorted,imgundistortedblack,diff);
cv::GaussianBlur(imgundistorted,imgundistorted,cv::Size(5,5),2,2);
//1.find corners in image without stripe
//CALIB_CB_FAST_CHECK saves a lot of time on images
//that do not contain any chessboard corners
patternfound = findChessboardCorners(imgundistortedblack, patternsize, corners,CALIB_CB_ADAPTIVE_THRESH);
cout<<"patternfound:"<<patternfound<<endl;
if(patternfound)cornerSubPix(imgundistortedblack, corners, Size(11, 11), Size(-1, -1),TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(imgundistortedblack, patternsize, Mat(corners), patternfound);
//for(int i=0;i<corners.size();i++)cout<<"("<<corners[i].x<<","<<corners[i].y<<")"<<endl;
//2,calculate 3-d of corners in camera coordinate.remember :2d corners must be in the undistorted images !!!!!!! .but why?
for(int i=0;i<corners.size();i++)
{
Point3f p3d;
//cv::Matx31f point (corners[i].x-image_width/2,image_height/2-corners[i].y,1);
cv::Matx31f point (corners[i].x,corners[i].y,1);
cv::Mat_<double> point3=cv::Mat(K.inv())*cv::Mat(point);
//cout<<"K.inv()="<<K.inv()<<endl;
p3d.x=point3.at<double>(0,0);
p3d.y=point3.at<double>(1,0);
p3d.z=point3.at<double>(2,0);
point3d.push_back(p3d);
}
//3,calculate scale
double distance = cv::norm(point3d[0]-point3d[10]);
//cout<<"distance="<<distance<<endl;
scale=distance/0.200;
for(int i=0;i<corners.size();i++)
{
point3d[i].x/=scale;
point3d[i].y/=scale;
point3d[i].z/=scale;
//cout<<point3d[i].x<<","<<point3d[i].y<<","<<point3d[i].z<<endl;
}
//4,get stripe point
for(int i=num;i<2*num;i++)
{
for(int j=image_width/2;j<image_width;j++)
{
col[j-image_width/2]=(float)(diff.at<uchar>(i,j));
}
pp=pf.detectPeak(col,0,image_width/2);
diff.at<uchar>(i,(int)pp+image_width/2)=0;
PointT p;
//cv::Matx31f point (pp+(double)image_width/2.0-image_width/2,image_height/2-(i+num),1);
cv::Matx31f point (pp+(double)image_width/2.0,i+num,1);
cv::Mat_<double> point3=cv::Mat(K.inv())*cv::Mat(point);
p.x=point3.at<double>(0,0)/scale;
p.y=point3.at<double>(1,0)/scale;
p.z=point3.at<double>(2,0)/scale;
//cout<<p.x<<","<<p.y<<","<<p.z<<endl;
p.r=55;
p.g=250;
p.b=55;
cloud->push_back(p);
}
cout<<"cloud num="<<cloud->width<<endl;
point3d.clear();
corners.clear();
patternfound=false;
scale=100000.0;
}
pcl::PCLHeader header;
header.frame_id="map";
cloud->header=header;
cloud->height = 1;
cloud->width = cloud->points.size();
cloud->is_dense = false;
cloud->points.resize (cloud->width * cloud->height);
pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);
pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
// Create the segmentation object
pcl::SACSegmentation<PointT> seg;
// Optional
seg.setOptimizeCoefficients (true);
// Mandatory
seg.setModelType (pcl::SACMODEL_PLANE);
seg.setMethodType (pcl::SAC_RANSAC);
seg.setDistanceThreshold (0.01);
seg.setInputCloud (cloud);
seg.segment (*inliers, *coefficients);
if (inliers->indices.size () == 0)
{
PCL_ERROR ("Could not estimate a planar model for the given dataset.");
return (-1);
}
std::cerr << "Plane : " <<coefficients->values[0] << "x+"<< coefficients->values[1] << "y+"<< coefficients->values[2] << "z+"<< coefficients->values[3] <<"=0"<< std::endl;
viewer.showCloud(cloud);
while (!viewer.wasStopped ());
}
http://blog.csdn.net/fk1174/article/details/61201231