#include<opencv2\opencv.hpp>
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<math.h>
#include<Windows.h>
#include<string.h>
using namespace cv;
using namespace std;
Mat frame;
HANDLE hComm;
LPCWSTR pStr=L"COM4";
char lpOutbuffer[100];
DWORD dwbyte=100;
vector<Point> five;
vector<Point> zero;
Mat Ycrcb_detect(Mat input)
{
Mat skinCrCbHist = Mat::zeros(Size(256, 256), CV_8UC1);
ellipse(skinCrCbHist, Point(113, 155.6), Size(23.4, 15.2), 43.0, 0.0, 360.0, Scalar(255, 255, 255), -1);
Mat ycrcb_image,output;
output = Mat::zeros(input.size(), CV_8UC1);
ycrcb_image = Mat::zeros(input.size(), input.depth());
cvtColor(input, ycrcb_image, CV_BGR2YCrCb); //首先转换成到YCrCb空间
for(int i = 0; i < input.cols; i++)
{
for(int j = 0; j < input.rows; j++)
{
Vec3b ycrcb = ycrcb_image.at<Vec3b>(j, i);
if(skinCrCbHist.at<uchar>(ycrcb[1], ycrcb[2]) > 0)
output.at<uchar>(j, i) = 255;
}
}
return output;
}
Mat detect(Mat original)
{
Mat original_1=original.clone();
Mat temp;
Size kSize;
kSize.height = 3;
kSize.width = 3;
double sigma = 0.3*(3 / 2 - 1) + 0.8;
GaussianBlur(original, temp, kSize, sigma, 0.0, 4);
Mat mask(original.size(), CV_8UC1);
Mat mask_show(original.size(), CV_8UC1);
mask=Ycrcb_detect(original_1);
Mat element_erode=getStructuringElement(MORPH_RECT,Size(5,5),Point(-1,-1));
Mat element_dilate=getStructuringElement(MORPH_RECT,Size(7,7),Point(-1,-1));
Mat element=getStructuringElement(MORPH_RECT,Size(3,3),Point(-1,-1));
cv::erode(mask, mask, element_erode, cv::Point(-1, -1));
cv::dilate(mask, mask, element_erode, cv::Point(-1, -1), 2);
if (!mask.empty())
{
mask_show=mask.clone();
vector<vector<Point> > contours_hull;
findContours(mask_show, contours_hull,RETR_LIST, CHAIN_APPROX_SIMPLE, Point(0, 0));
if (contours_hull.size()>=1)
{
int biggestID=0;
int biggestA=0;
for (int i = 0; i < contours_hull.size(); i++)
{
if (contours_hull[i].size()>biggestA)
{
biggestA=contours_hull[i].size();
biggestID=i;
}
}
RotatedRect box=minAreaRect(contours_hull[biggestID]);
int boxarea=box.size.area();
Size frame_size=frame.size();
if (boxarea>frame_size.area()/10)
{
vector<Point> hull;
convexHull(Mat(contours_hull[biggestID]), hull, false);
drawContours(frame,contours_hull,biggestID,Scalar(255,255,255),2,8);
Point point0=hull[hull.size()-1];
for (int j = 0; j < hull.size(); j++)
{
Point point=hull[j];
line(mask,point0,point,Scalar(255,255,255),2,8);
point0=point;
}
double m1=matchShapes(zero,hull,CV_CONTOURS_MATCH_I1,0);
double m2=matchShapes(five,hull,CV_CONTOURS_MATCH_I1,0);
bool can=PurgeComm(hComm,PURGE_TXCLEAR);
m1>m2?lpOutbuffer[0]='a':lpOutbuffer[0]='b';
bool ne=WriteFile(hComm,lpOutbuffer,dwbyte,&dwbyte,NULL);
}
}
}
//imshow("mask_show",mask_show);
return mask;
}
int main()
{
COMSTAT Comstat;
DWORD dwError;
BOOL bWritestat;
hComm=CreateFile(pStr,GENERIC_READ | GENERIC_WRITE,0,0,OPEN_EXISTING, 0,NULL);
if (hComm == INVALID_HANDLE_VALUE)
{
cout<<"FLASE";
return -1;
}
else
{
cout<<"TURE";
}
DCB dcb;
GetCommState(hComm,&dcb);
dcb.BaudRate=9600;
dcb.ByteSize=8;
dcb.Parity=NOPARITY;
dcb.StopBits=TWOSTOPBITS;
bool set=SetCommState(hComm,&dcb);
bool sup=SetupComm(hComm,1024,1024);
VideoCapture cap(0); // open the default camera
Mat m_zero,m_five;
FileStorage fs("zero_finger.xml", FileStorage::READ);
fs["zero_finger"] >> m_zero;
fs.release();
zero=Mat_<Point>(m_zero);
FileStorage fs2("five_finger.xml", FileStorage::READ);
fs2["five_finger"] >> m_five;
fs2.release();
five=Mat_<Point>(m_five);
if (!cap.isOpened())
return -1;
while (true){
cap >> frame; // new frame from camera
Mat hand=detect(frame);
cv::imshow("hand", hand);
cv::imshow("Original Video", frame);
waitKey(20);
}
return 0;
}
整个程序其实很简单,首先肤色识别,然后提取手的部分,根据Hu矩匹配去识别手势,最后根据识别到的手势发送不同的命令到arduino串口,最终实现手势控制。后期可以增加其他功能,手势部分其实可以通过多个模板利用SVM进行识别,增加手势灵活性。