从Cnkinect学习了很多kinect入门知识, 这几天 对Kincect 采集到的深度图数据(g_DepthGenerator.GetMetaData(m_DepthMD)采集到的数据存成raw.dat )利用openCV2.2 处理,得到手掌骨架图像。效果如下
分享代码
#include"stdafx.h"
#include<iostream>
#include<fstream>
#include"opencv/cv.h"
#include"opencv/highgui.h"
using namespace cv
std::ifstreaminput("sample.dat", ios::binary);
//for opencv Mat
Mat
m_srcdepth16u( 480,640,CV_16UC1);
Mat
m_depth16u( 480,640,CV_16UC1);
Mat
m_middepth8u( 480,640,CV_8UC1);
Mat
m_depth8u( 480,640,CV_8UC1);
Mat
m_rgb8u( 480,640,CV_8UC3);
Mat
m_DepthShow( 480,640,CV_8UC1);
Mat
m_srcDepthShow( 480,640,CV_8UC1);
int_tmain(int argc, _TCHAR* argv[])
{
int sizebuffer = 640*480*2;
char * buffer = (char *)malloc(sizebuffer);
input.read(buffer, sizebuffer);
memcpy(m_srcdepth16u.data,buffer,sizebuffer);
// 首先处理深度为 0 的点,这种实际上无法测量的点,
所以将深度为 0 的点设成最大深度
for( int i = 0; i < 480; i++)
for (int j = 0; j<640; j++) {
unsigned short & temp = m_srcdepth16u.at<unsignedshort>(i,j);
if(temp == 0) {
temp = 65535;
}
}
// 高斯滤波平滑
Ptr<FilterEngine> f = createGaussianFilter(m_srcdepth16u.type(), Size(9,9), 0.85, 0.85 );
f->apply(
m_srcdepth16u, m_depth16u);
// 深度图像二值化,
阈值大概是取手掌的厚度, minValue存储的事距离kinect最近的指尖深度
double minValue, maxValue;
minMaxIdx(m_depth16u, &minValue, &maxValue);
for( int i = 0; i < 480; i++)
for (int j = 0; j<640; j++) {
if(m_depth16u.at<unsigned short>(i,j) > minValue +50)
m_middepth8u.at<unsigned char>(i,j) =0;
else
m_middepth8u.at<unsigned char>(i,j) = 255;
}
m_middepth8u.copyTo(m_depth8u);
// 取得手掌轮廓
vector<vector<Point>> contours;
findContours(m_middepth8u, contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE, Point(0,0));
Scalar color( 255, 0, 0 );
drawContours(m_depth8u, contours, -1, color);
vector<Point> contourMerge;
vector<vector<Point>>::iterator it;
// 将轮廓数据合并到一个数组中 , 并获得包含此轮廓的最小矩形
for (it=contours.begin(); it!=contours.end(); it++)
{
vector<Point> ¤tContour = *it;
vector<Point>::iterator itContour;
for (itContour=currentContour.begin();itContour!=currentContour.end(); itContour++)
{
contourMerge.push_back(*itContour);
}
}
RotatedRect minRect = minAreaRect(Mat(contourMerge));
Point2f pt[4];
minRect.points(pt);
line(m_depth8u, pt[0], pt[1], color);
line(m_depth8u, pt[1], pt[2], color);
line(m_depth8u, pt[2], pt[3], color);
line(m_depth8u, pt[3], pt[0], color);
// 将上述最小矩形取出,
并旋转到正方向,
横平竖直
Mat rotate8u = Mat(minRect.boundingRect().size(),CV_8UC1);
Mat after_rotate8u =Mat(minRect.boundingRect().size(),CV_8UC1);
getRectSubPix( m_depth8u, minRect.boundingRect().size(),minRect.center, rotate8u);
Point2f rotateCenter =Point2f(minRect.boundingRect().size().width/2.0,minRect.boundingRect().size().height/2.0);
Mat rotateM = getRotationMatrix2D(rotateCenter,
90+minRect.angle, 1
);
warpAffine(rotate8u,
after_rotate8u, rotateM, minRect.boundingRect().size()
);
Mat scale8u = Mat( Size(30, minRect.size.width),CV_8UC1);
getRectSubPix(after_rotate8u, Size(minRect.size.height,minRect.size.width), rotateCenter, scale8u);
rotate8u.convertTo(m_DepthShow,CV_8U);
//m_depth8u.convertTo(m_DepthShow,CV_8U);
cvNamedWindow("beforerotate");
imshow("beforerotate",
m_DepthShow);
// scale8u.convertTo(m_DepthShow,CV_8U);
//m_depth8u.convertTo(m_DepthShow,CV_8U);
cvNamedWindow("depth");
// 距离变换,获取骨架
Mat
m_outdepth32u(scale8u.rows,scale8u.cols,CV_32FC1);
distanceTransform(scale8u, m_outdepth32u, CV_DIST_L2,CV_DIST_MASK_5);
// 显示距离变换得到结果
int i = 0, j = 0;
float maxDist = 0.0;
for( i = 0; i < scale8u.rows; i++)
for (j = 0; j<scale8u.cols; j++) {
if(maxDist < m_outdepth32u.at<float>(i,j))
maxDist = m_outdepth32u.at<float>(i,j);
}
m_outdepth32u.convertTo(m_DepthShow,CV_8U, 255/maxDist);
imshow("depth",
m_DepthShow);
cvWaitKey(-1);
return 0;
}