图像处理C++ MFC界面

在这里插入图片描述


// MFC_workDlg.cpp: 实现文件
//

#include "pch.h"
#include "framework.h"
#include "MFC_work.h"
#include "MFC_workDlg.h"
#include "afxdialogex.h"
#include "opencv2/opencv.hpp"
#include <opencv2\imgproc\types_c.h>
#include<iostream>
#include "conio.h"
//#include "opencv2\calib3d\calib3d.hpp"
#include "opencv2/calib3d.hpp"

using namespace std;

using namespace cv;

using std::max;
using std::cout;

using std::endl;

using cv::Mat;

using cv::Scalar;

using cv::imshow;

using cv::line;

#ifdef _DEBUG
#define new DEBUG_NEW
#endif


// 用于应用程序“关于”菜单项的 CAboutDlg 对话框

class CAboutDlg : public CDialogEx
{
public:
	CAboutDlg();

// 对话框数据
#ifdef AFX_DESIGN_TIME
	enum { IDD = IDD_ABOUTBOX };
#endif

	protected:
	virtual void DoDataExchange(CDataExchange* pDX);    // DDX/DDV 支持

// 实现
protected:
	DECLARE_MESSAGE_MAP()
};

CAboutDlg::CAboutDlg() : CDialogEx(IDD_ABOUTBOX)
{
}

void CAboutDlg::DoDataExchange(CDataExchange* pDX)
{
	CDialogEx::DoDataExchange(pDX);
}

BEGIN_MESSAGE_MAP(CAboutDlg, CDialogEx)
END_MESSAGE_MAP()


// CMFCworkDlg 对话框



CMFCworkDlg::CMFCworkDlg(CWnd* pParent /*=nullptr*/)
	: CDialogEx(IDD_MFC_WORK_DIALOG, pParent)
{
	m_hIcon = AfxGetApp()->LoadIcon(IDR_MAINFRAME);
}

void CMFCworkDlg::DoDataExchange(CDataExchange* pDX)
{
	CDialogEx::DoDataExchange(pDX);
}

BEGIN_MESSAGE_MAP(CMFCworkDlg, CDialogEx)
	ON_WM_SYSCOMMAND()
	ON_WM_PAINT()
	ON_WM_QUERYDRAGICON()
	ON_BN_CLICKED(IDC_BUTTON1, &CMFCworkDlg::OnBnClickedButton1)
	ON_BN_CLICKED(IDC_BUTTON2, &CMFCworkDlg::OnBnClickedButton2)
	ON_BN_CLICKED(IDC_BUTTON3, &CMFCworkDlg::OnBnClickedButton3)
	ON_BN_CLICKED(IDC_BUTTON4, &CMFCworkDlg::OnBnClickedButton4)
	ON_BN_CLICKED(IDC_BUTTON5, &CMFCworkDlg::OnBnClickedButton5)
	ON_BN_CLICKED(IDC_BUTTON6, &CMFCworkDlg::OnBnClickedButton6)
	ON_BN_CLICKED(IDC_BUTTON7, &CMFCworkDlg::OnBnClickedButton7)
	ON_BN_CLICKED(IDC_BUTTON8, &CMFCworkDlg::OnBnClickedButton8)
	ON_BN_CLICKED(IDC_BUTTON9, &CMFCworkDlg::OnBnClickedButton9)
	ON_BN_CLICKED(IDC_BUTTON10, &CMFCworkDlg::OnBnClickedButton10)
	ON_BN_CLICKED(IDC_BUTTON11, &CMFCworkDlg::OnBnClickedButton11)
	ON_BN_CLICKED(IDC_BUTTON12, &CMFCworkDlg::OnBnClickedButton12)
	ON_BN_CLICKED(IDC_BUTTON13, &CMFCworkDlg::OnBnClickedButton13)
	ON_BN_CLICKED(IDC_BUTTON14, &CMFCworkDlg::OnBnClickedButton14)
	ON_BN_CLICKED(IDC_BUTTON15, &CMFCworkDlg::OnBnClickedButton15)
	ON_BN_CLICKED(IDC_BUTTON16, &CMFCworkDlg::OnBnClickedButton16)
	ON_BN_CLICKED(IDC_BUTTON17, &CMFCworkDlg::OnBnClickedButton17)
	ON_BN_CLICKED(IDC_BUTTON18, &CMFCworkDlg::OnBnClickedButton18)

	ON_BN_CLICKED(IDC_BUTTON20, &CMFCworkDlg::OnBnClickedButton20)
	ON_BN_CLICKED(IDC_BUTTON21, &CMFCworkDlg::OnBnClickedButton21)
	ON_BN_CLICKED(IDC_BUTTON22, &CMFCworkDlg::OnBnClickedButton22)
	ON_BN_CLICKED(IDC_BUTTON23, &CMFCworkDlg::OnBnClickedButton23)
END_MESSAGE_MAP()


// CMFCworkDlg 消息处理程序

BOOL CMFCworkDlg::OnInitDialog()
{
	CDialogEx::OnInitDialog();

	// 将“关于...”菜单项添加到系统菜单中。

	// IDM_ABOUTBOX 必须在系统命令范围内。
	ASSERT((IDM_ABOUTBOX & 0xFFF0) == IDM_ABOUTBOX);
	ASSERT(IDM_ABOUTBOX < 0xF000);

	CMenu* pSysMenu = GetSystemMenu(FALSE);
	if (pSysMenu != nullptr)
	{
		BOOL bNameValid;
		CString strAboutMenu;
		bNameValid = strAboutMenu.LoadString(IDS_ABOUTBOX);
		ASSERT(bNameValid);
		if (!strAboutMenu.IsEmpty())
		{
			pSysMenu->AppendMenu(MF_SEPARATOR);
			pSysMenu->AppendMenu(MF_STRING, IDM_ABOUTBOX, strAboutMenu);
		}
	}

	// 设置此对话框的图标。  当应用程序主窗口不是对话框时,框架将自动
	//  执行此操作
	SetIcon(m_hIcon, TRUE);			// 设置大图标
	SetIcon(m_hIcon, FALSE);		// 设置小图标

	// TODO: 在此添加额外的初始化代码
	AllocConsole();

	return TRUE;  // 除非将焦点设置到控件,否则返回 TRUE
}

void CMFCworkDlg::OnSysCommand(UINT nID, LPARAM lParam)
{
	if ((nID & 0xFFF0) == IDM_ABOUTBOX)
	{
		CAboutDlg dlgAbout;
		dlgAbout.DoModal();
	}
	else
	{
		CDialogEx::OnSysCommand(nID, lParam);
	}
}

// 如果向对话框添加最小化按钮,则需要下面的代码
//  来绘制该图标。  对于使用文档/视图模型的 MFC 应用程序,
//  这将由框架自动完成。

void CMFCworkDlg::OnPaint()
{
	if (IsIconic())
	{
		CPaintDC dc(this); // 用于绘制的设备上下文

		SendMessage(WM_ICONERASEBKGND, reinterpret_cast<WPARAM>(dc.GetSafeHdc()), 0);

		// 使图标在工作区矩形中居中
		int cxIcon = GetSystemMetrics(SM_CXICON);
		int cyIcon = GetSystemMetrics(SM_CYICON);
		CRect rect;
		GetClientRect(&rect);
		int x = (rect.Width() - cxIcon + 1) / 2;
		int y = (rect.Height() - cyIcon + 1) / 2;

		// 绘制图标
		dc.DrawIcon(x, y, m_hIcon);
	}
	else
	{
		CDialogEx::OnPaint();
	}
}

//当用户拖动最小化窗口时系统调用此函数取得光标
//显示。
HCURSOR CMFCworkDlg::OnQueryDragIcon()
{
	return static_cast<HCURSOR>(m_hIcon);
}


double max_double(double * array ,int n)//array 为int类型的数bai组du的指zhi针 ,n为数组元素个数
{
	double max= array[0];
	for (int i = 0; i < n; i++)
	{
		if (max<array[i])
		{
			max=array[i];
		}
	}
    return max;
}
//图像灰度化
Mat gray_img(Mat image)
{
	Mat image_gray = Mat::zeros(image.size(), CV_8UC1);


	for (int i = 0; i < image.rows; i++)
	{
		for (int j = 0; j < image.cols; j++)
		{
			image_gray.at<uchar>(i, j) = 0.11 * image.at<Vec3b>(i, j)[0] + 0.59 * image.at<Vec3b>(i, j)[1] + 0.3 * image.at<Vec3b>(i, j)[2];
		}
	}



	// Show stuff
	//namedWindow("Original Image", 1);
	imshow("gray", image_gray);
	waitKey(0);
	return(image_gray);
}

//图像直方图
void imagehist(Mat src)//, Mat dst
{
	Mat EH = Mat::zeros(src.size(), CV_8UC1);


	double hist[3][256];

	for (int i = 0; i < 3; i++)
	{
		for (int j = 0; j < 256; j++)
		{
			hist[i][j] = 0;
		}
	}


	for (int k = 0; k < 3; k++)
	{
		for (int i = 0; i < src.rows; i++)
		{
			for (int j = 0; j < src.cols; j++)
			{
				hist[k][src.at<Vec3b>(i, j)[k]]++;
			}
		}

	}
	
	char color[3] = {'B','G','R'};
	for (int k = 0; k < 3; k++)
	{
		Mat frame(257, 256, CV_8U, Scalar(0));
		for (int i = 0; i < 256; i++)
		{
			double max_value = max_double(hist[k], 256); //
			//cout << "Value of str is : " << endl;
			int pointend = cvRound(hist[k][i] / max_value * 256 * 0.9);
			line(frame, Point(i, 0+256), Point(i, 256-pointend), Scalar(255, 255, 255), 2);//

			//line(frame, (i,0),(i,hist[0][i]/ src.rows/ src.cols), Scalar(0, 0, 255), 2);
		}
		char color_j = color[k];
        imshow("灰度直方图", frame);
		waitKey(0);
	}



}

//灰度直方图均衡化
void grayhist(Mat image_gray)
{
	double hist[256];


	for (int j = 0; j < 256; j++)
	{
		hist[j] = 0;
	}




	for (int i = 0; i < image_gray.rows; i++)
	{
		for (int j = 0; j < image_gray.cols; j++)
		{
			hist[image_gray.at<uchar>(i, j)]++;
		}
	}



	Mat frame(257, 256, CV_8U, Scalar(0));
	for (int i = 0; i < 256; i++)
	{
		double max_value = max_double(hist, 256); //
		//cout << "Value of str is : " << endl;
		int pointend = cvRound(hist[i] / max_value * 256 * 0.9);
		line(frame, Point(i, 0), Point(i, pointend), Scalar(255, 255, 255), 2);//

		//line(frame, (i,0),(i,hist[0][i]/ src.rows/ src.cols), Scalar(0, 0, 255), 2);
	}

	imshow("灰度直方图", frame);
	waitKey(0);
}



//直方图均衡化
Mat equalizeHist(Mat image)
{
	//图像灰度化
	Mat image_gray = Mat::zeros(image.size(), CV_8UC1);


	for (int i = 0; i < image.rows; i++)
	{
		for (int j = 0; j < image.cols; j++)
		{
			image_gray.at<uchar>(i, j) = 0.11 * image.at<Vec3b>(i, j)[0] + 0.59 * image.at<Vec3b>(i, j)[1] + 0.3 * image.at<Vec3b>(i, j)[2];
		}
	}
	//统计灰度直方图



	double hist[256];


	for (int j = 0; j < 256; j++)
	{
		hist[j] = 0;
	}




	for (int i = 0; i < image.rows; i++)
	{
		for (int j = 0; j < image.cols; j++)
		{
			hist[image_gray.at<uchar>(i, j)]++;
		}
	}



		Mat frame(257, 256, CV_8U, Scalar(0));
		for (int i = 0; i < 256; i++)
		{
			double max_value = max_double(hist, 256); //
			//cout << "Value of str is : " << endl;
			int pointend = cvRound(hist[i] / max_value * 256 * 0.9);
			line(frame, Point(i, 0), Point(i, pointend ), Scalar(255, 255, 255), 2);//

			//line(frame, (i,0),(i,hist[0][i]/ src.rows/ src.cols), Scalar(0, 0, 255), 2);
		}
		
		imshow("灰度直方图", frame);
		waitKey(0);


		double hist_norm[256];
		for (int i = 0; i < 256; i++)
		{
			hist_norm[i] = hist[i]/ image.rows / image.cols;
		}
			

		double hist_sum[256] ;
		for (int i=0; i < 256; i++)
		{
			hist_sum[i] = 0;
		}

		for (int i = 0; i < 256; i++)
		{
			for (int j = 0; j <= i; j++)
			{
				hist_sum[i] = hist_sum[i] + hist_norm[j] * 255;

			}
		}

		Mat dst= Mat::zeros(image.size(), CV_8UC1);
		for (int i = 0; i < image.rows; i++)
		{
			for (int j=0; j < image.cols; j++)
			{
				dst.at<uchar>(i, j) = hist_sum[image_gray.at<uchar>(i, j)];
			}
		}
		imshow("灰度图", image_gray);
		imshow("灰度均衡化", dst);
		

		return(dst);

	}




void CMFCworkDlg::OnBnClickedButton1()
{
	// TODO: 在此添加控件通知处理程序代码
	Mat image = imread("ema.jpg", 1);

	imagehist(image);
	//equalizeHist(image);

}


void CMFCworkDlg::OnBnClickedButton2()
{
	// TODO: 在此添加控件通知处理程序代码
	Mat image = imread("ema.jpg", 1);
	Mat image_gray= gray_img(image);

}



void CMFCworkDlg::OnBnClickedButton3()
{
	// TODO: 在此添加控件通知处理程序代码
	Mat image = imread("ema.jpg", 1);
	Mat dst = Mat::zeros(image.size(), CV_8UC1);
	Mat gray_blance = equalizeHist(image);
	//Mat image_gray = gray_img(image);
	grayhist(gray_blance);
}


void CMFCworkDlg::OnBnClickedButton4()
{
	// TODO: 在此添加控件通知处理程序代码
	Mat image = imread("ema.jpg", 1);
	Mat image_gray = gray_img(image);
	Mat result, grad;
	
	result.create(image.size(), CV_8UC1);
	grad.create(image.size(), CV_8UC1);

	//因为最后一行最后一列没有办法计算梯度所以,使用原图填充,先试用灰度图进行初始化
	for (int i = 0; i < image_gray.rows; i++)
	{
		for (int j = 0; j < image_gray.cols ; j++)
		{
			result.at<uchar>(i, j) =image_gray.at<char>(i, j);
		}
	}


	for (int i = 1; i < image_gray.rows - 1; i++)
	{
		for (int j = 1; j < image_gray.cols - 1; j++)
		{
			grad.at<uchar>(i, j) = saturate_cast<uchar>(fabs(image_gray.at<uchar>(i, j) - image_gray.at<uchar>(i - 1, j)));//+ fabs(image_gray.at<uchar>(i, j) - image_gray.at<uchar>(i , j-1))
			result.at<uchar>(i, j) = saturate_cast<uchar>( image_gray.at<uchar>(i, j) - fabs(image_gray.at<uchar>(i, j) - image_gray.at<uchar>(i - 1, j)));
			
		}

	}
	imshow("灰度图", image_gray);
	imshow("梯度", grad);
	imshow("梯度锐化",result);
	waitKey(0);
}


void CMFCworkDlg::OnBnClickedButton5()
{
	// TODO: 在此添加控件通知处理程序代码
	//laplace增强
	Mat image = imread("ema.jpg", 1);
	Mat image_gray = gray_img(image);
	Mat result, laplaceimg;

	result.create(image.size(), CV_8UC1);
	laplaceimg.create(image.size(), CV_8UC1);

	for (int i = 1; i < image_gray.rows-1; i++)
	{
		for (int j = 1; j < image_gray.cols-1; j++)
		{
			result.at<uchar>(i, j) = saturate_cast<uchar>(5 * image_gray.at<uchar>(i, j) - image_gray.at<uchar>(i + 1, j) - image_gray.at<uchar>(i - 1, j)- image_gray.at<uchar>(i, j+1)- image_gray.at<uchar>(i, j-1));
			laplaceimg.at<uchar>(i, j) = saturate_cast<uchar>(image_gray.at<uchar>(i+1, j)+ image_gray.at<uchar>(i-1, j)+ image_gray.at<uchar>(i, j+1)+ image_gray.at<uchar>(i, j-1)-4* image_gray.at<uchar>(i, j));
		}

	}
	imshow("laplace增强", result);
	imshow("laplace算子", laplaceimg);
	waitKey(0);
}


void CMFCworkDlg::OnBnClickedButton6()
{
	// TODO: 在此添加控件通知处理程序代码
	// Roberts算子边缘检测
	Mat image = imread("ema.jpg", 1);
	Mat image_gray = gray_img(image);
	Mat grad;

	grad.create(image.size(), CV_8UC1);
	for (int i = 1; i < image_gray.rows - 1; i++)
	{
		for (int j = 1; j < image_gray.cols - 1; j++)
		{
			grad.at<uchar>(i, j) = saturate_cast<uchar>(fabs(image_gray.at<uchar>(i, j) - image_gray.at<uchar>(i - 1, j-1))+ fabs(image_gray.at<uchar>(i, j-1) - image_gray.at<uchar>(i-1 , j )));
		}
	}
	imshow("灰度图", image_gray);
	imshow("Roberts", grad);
}



void CMFCworkDlg::OnBnClickedButton7()
{
	// TODO: 在此添加控件通知处理程序代码
	// Sobel算子边缘检测
	Mat image = imread("ema.jpg", 1);
	Mat image_gray = gray_img(image);
	Mat gradx,grady;

	gradx.create(image.size(), CV_8UC1);
	grady.create(image.size(), CV_8UC1);


	for (int i = 1; i < image.rows-1; i++)
	{
		for (int j = 1; j < image.cols-1; j++)
		{
			gradx.at<uchar>(i, j) = saturate_cast<uchar>(fabs(image_gray.at<uchar>(i + 1, j - 1) + 2 * image_gray.at<uchar>(i + 1, j) + image_gray.at<uchar>(i + 1, j +1) -image_gray.at<uchar>(i - 1, j - 1) - 2 * image_gray.at<uchar>(i - 1, j) - image_gray.at<uchar>(i - 1, j + 1)));
			grady.at<uchar>(i, j) = saturate_cast<uchar>(fabs(image_gray.at<uchar>(i - 1, j + 1) + 2* image_gray.at<uchar>(i , j + 1)+ image_gray.at<uchar>(i+1,j+1)-image_gray.at<uchar>(i-1,j-1)-2*image_gray.at<uchar>(i,j-1)-image_gray.at<uchar>(i+1,j-1)));
		}
	}
	imshow("灰度图", image_gray);
	imshow("gradx", gradx);
	imshow("grady", grady);
}



void CMFCworkDlg::OnBnClickedButton8()
{
	// TODO: 在此添加控件通知处理程序代码
	//canny算子边缘检测
	Mat image = imread("ema.jpg", 1);
	Mat image_gray = gray_img(image);

	Mat gray_src;
	int t1_value  = 50;
	int max_value = 255;

	gray_src.create(image.size(), CV_8UC1);
	namedWindow("input image", WINDOW_AUTOSIZE);
	namedWindow("output image", WINDOW_AUTOSIZE);
	imshow("input image", image_gray);
	cvtColor(image, gray_src, COLOR_BGR2GRAY);

	Mat edge_output;
	blur(gray_src, gray_src, Size(3, 3), Point(-1, -1), BORDER_DEFAULT);
	cv::Canny(gray_src, edge_output, t1_value, t1_value * 2, 3, false);
	imshow("output image", edge_output);

	waitKey(0);
}


void CMFCworkDlg::OnBnClickedButton9()
{
	// TODO: 在此添加控件通知处理程序代码
	// TODO: 在此添加控件通知处理程序代码
	Mat image = imread("ema.jpg", 1);
	Mat image_gray = gray_img(image);
	Mat result;

	result.create(image.size(), CV_8UC1);

	for (int i = 1; i < image_gray.rows - 1; i++)
	{
		for (int j = 1; j < image_gray.cols - 1; j++)
		{
			result.at<uchar>(i, j) = saturate_cast<uchar>((image_gray.at<uchar>(i - 1, j - 1) + 2 * image_gray.at<uchar>(i - 1, j) + image_gray.at<uchar>(i - 1, j + 1) + 2 * image_gray.at<uchar>(i, j - 1) + 4 * image_gray.at<uchar>(i, j) + 2 * image_gray.at<uchar>(i, j + 1) + image_gray.at<uchar>(i + 1, j - 1) + 2 * image_gray.at<uchar>(i + 1, j) + image_gray.at<uchar>(i + 1, j + 1)) / 16);

		}

	}
	imshow("高斯", result);

}


void CMFCworkDlg::OnBnClickedButton10()
{
	// TODO: 在此添加控件通知处理程序代码
	// TODO: 在此添加控件通知处理程序代码
	Mat image = imread("source.jpg", 0);
	Mat structure_element = getStructuringElement(0, Size(13, 12));
	Mat dst_erode, dst_dilate;
	dst_erode.create(image.size(), CV_8UC1);
	dst_dilate.create(image.size(), CV_8SC1);

	erode(image, dst_erode, structure_element);
	dilate(dst_erode, dst_dilate, structure_element);
	imshow("image", image);
	imshow("erode", dst_erode);
	imshow("dailate", dst_dilate);
	_cprintf("pass");

}


int cal_otsu(Mat image)
{
	vector<int>hist(256);
	for (int i = 0; i<image.rows; i++)
	{
		for(int j = 0; j < image.cols; j++)
		{
			hist[image.at<uchar>(i, j)]++;
		}
	}
	float u0, u1, w0, w1; int count0, t, maxT; float devi, maxDevi = 0;
	int i, sum = 0;
	sum = image.rows * image.cols;
	_cprintf("sum = % d\n", sum);
	for(t = 0; t < 255; t++)
	{
		u0 = 0; count0 = 0; u1 = 0;

		for(i = 0; i <= t; i++)
		{

			u0 += i * hist[i]; count0 += hist[i];

		}
		u0 = u0 / count0; w0 = (float)count0 / sum;
		for(i = t + 1; i < 256; i++)

		{
			u1 += i * hist[i];
		}
		u1 = u1/(sum-count0);w1=1-w0;
		devi =w0*w1*(u1-u0)*(u1-u0);//两类间方差
		if (devi>maxDevi)//记录最大的方差及最佳位置
		{
			maxDevi =devi;
			maxT=t;
		} 

	}
	return maxT;
}






void CMFCworkDlg::OnBnClickedButton11()
{
	// TODO: 在此添加控件通知处理程序代码
	//otsu
	Mat src = imread("ema.jpg", 0);
	int KT = cal_otsu(src);
	_cprintf("KT=%d\n", KT);
    Mat grayImg;
	grayImg.create(src.rows, src.cols, CV_8UC1);
	for (int i = 0; i < src.rows; i++)
	{
		for (int j = 0; j < src.cols; j++)
		{
			if (src.at<uchar>(i, j) > KT)
				grayImg.at<uchar>(i, j) = 255;
			else
				grayImg.at<uchar>(i, j) = 0;
		}
	}

	imshow("src", src);
	imshow("grayImg", grayImg);
	waitKey(0);



}

int cal_kittler(Mat image)

{

	Mat gradx, grady,grad;

	gradx.create(image.size(), CV_8UC1);
	grady.create(image.size(), CV_8UC1);
	grad.create(image.size(), CV_8UC1);

	float grad_f_multi_sum = 0;
	float grad_sum = 0;

	for (int i = 1; i < image.rows - 1; i++)
	{
		for (int j = 1; j < image.cols - 1; j++)
		{
			gradx.at<uchar>(i, j) = saturate_cast<uchar>(fabs(image.at<uchar>(i, j) - image.at<uchar>(i - 1, j)));
			grady.at<uchar>(i, j) = saturate_cast<uchar>(fabs(image.at<uchar>(i, j) - image.at<uchar>(i, j - 1)));
			grad.at<uchar>(i, j) = max(gradx.at<uchar>(i, j), grady.at<uchar>(i, j));

			grad_f_multi_sum = grad_f_multi_sum + grad.at<uchar>(i, j) * image.at<uchar>(i, j);
			grad_sum = grad_sum + grad.at<uchar>(i, j);
		}
	}
	return grad_f_multi_sum/ grad_sum;
}

void CMFCworkDlg::OnBnClickedButton12()
{
	// TODO: 在此添加控件通知处理程序代码
	//kittler
	Mat src = imread("ema.jpg", 0);
	int KT = cal_kittler(src);

	_cprintf("KT=%d\n", KT);
	Mat grayImg;
	grayImg.create(src.rows, src.cols, CV_8UC1);
	for (int i = 0; i < src.rows; i++)
	{
		for (int j = 0; j < src.cols; j++)
		{
			if (src.at<uchar>(i, j) > KT)
				grayImg.at<uchar>(i, j) = 255;
			else
				grayImg.at<uchar>(i, j) = 0;
		}
	}

	imshow("src", src);
	imshow("grayImg", grayImg);
	waitKey(0);
}



void CMFCworkDlg::OnBnClickedButton13()
{
	// TODO: 在此添加控件通知处理程序代码
	Mat src = imread("ema.jpg", 0);
	Mat dst;
	threshold(src,dst,100,255,THRESH_BINARY);
	imshow("src",src);
	imshow("dst", dst);
	waitKey(0);
}


void CMFCworkDlg::OnBnClickedButton14()
{
	// TODO: 在此添加控件通知处理程序代码
	Mat greyimg;
	Mat foreground, foreground2;
	Ptr<BackgroundSubtractorKNN> ptrKNN = createBackgroundSubtractorKNN(100, 400, true);
	Ptr<BackgroundSubtractorMOG2> mog2 = createBackgroundSubtractorMOG2(100, 25, true);
	namedWindow("Extracted Foreground");
	VideoCapture pCapture;
	Mat pframe;
	pCapture = VideoCapture("pets2001.avi");

	while (1)
	{
		pCapture >> pframe;
		if (pframe.data == NULL)
			return;
		cvtColor(pframe, greyimg, CV_BGR2GRAY);
		long long t = getTickCount();
		ptrKNN->apply(pframe, foreground, 0.01);
		long long t1 = getTickCount();
		mog2->apply(greyimg, foreground2, -1);
		long long t2 = getTickCount();
		_cprintf("t1 = %f t2 = %f\n", (t1 - t) / getTickFrequency(), (t2 - t1) / getTickFrequency());
		imshow("Extracted Foreground", foreground);
		imshow("Extracted Foreground2", foreground2);
		imshow("video", pframe);
		waitKey(10);
	}
	waitKey();

}


void CMFCworkDlg::OnBnClickedButton15()
{
	// TODO: 在此添加控件通知处理程序代码
	int iWidth = 512, iheight = 512;
	Mat matImg = Mat::zeros(iheight, iWidth, CV_8UC3);//三色通道
															  //1.获取样本
	int labels[5] = { 1.0, -1.0, -1.0, -1.0,1.0 }; //样本数据  
	Mat labelsMat(5, 1, CV_32SC1, labels);     //样本标签  
	float trainingData[5][2] = { { 501, 300 },{ 255, 10 },{ 501, 255 },{ 10, 501 },{ 450,500 } }; //Mat结构特征数据  
	Mat trainingDataMat(5, 2, CV_32FC1, trainingData);   //Mat结构标签  
														 //2.设置SVM参数
	Ptr<ml::SVM> svm = ml::SVM::create();
	svm->setType(ml::SVM::C_SVC);//可以处理非线性分割的问题
	svm->setKernel(ml::SVM::POLY);//径向基函数SVM::LINEAR
										/*svm->setGamma(0.01);
										svm->setC(10.0);*/
										//算法终止条件
	svm->setDegree(1.0);
	svm->setTermCriteria(TermCriteria(CV_TERMCRIT_ITER, 100, 1e-6));
	//3.训练支持向量
	svm->train(trainingDataMat, ml::SampleTypes::ROW_SAMPLE, labelsMat);
	//4.保存训练器
	svm->save("mnist_svm.xml");
	//5.导入训练器
	//Ptr<SVM> svm1 = StatModel::load<SVM>("mnist_dataset/mnist_svm.xml");

	//读取测试数据
	Vec3b green(0, 255, 0), blue(255, 0, 0);
	for (int i = 0; i < matImg.rows; i++)
	{
		for (int j = 0; j < matImg.cols; j++)
		{
			Mat sampleMat = (Mat_<float>(1, 2) << j, i);
			float fRespone = svm->predict(sampleMat);
			if (fRespone == 1)
			{
				matImg.at<cv::Vec3b>(i, j) = green;
			}
			else if (fRespone == -1)
			{
				matImg.at<cv::Vec3b>(i, j) = blue;
			}


		}
	}
	// Show the training data  
	int thickness = -1;
	int lineType = 8;
	for (int i = 0; i < trainingDataMat.rows; i++)
	{
		if (labels[i] == 1)
		{
			circle(matImg, Point(trainingData[i][0], trainingData[i][1]), 5, Scalar(0, 0, 0), thickness, lineType);
		}
		else
		{
			circle(matImg, Point(trainingData[i][0], trainingData[i][1]), 5, Scalar(255, 255, 255), thickness, lineType);
		}
	}

	//显示支持向量点
	thickness = 2;
	lineType = 8;
	Mat vec = svm->getSupportVectors();
	int nVarCount = svm->getVarCount();//支持向量的维数
	_cprintf("vec.rows=%d vec.cols=%d\n", vec.rows, vec.cols);
	for (int i = 0; i < vec.rows; ++i)
	{
		int x = (int)vec.at<float>(i, 0);
		int y = (int)vec.at<float>(i, 1);
		_cprintf("vec.at=%d %f,%f\n", i, vec.at<float>(i, 0), vec.at<float>(i, 1));
		_cprintf("x=%d,y=%d\n", x, y);
		circle(matImg, Point(x, y), 6, Scalar(0, 0, 255), thickness, lineType);
	}


	imshow("circle", matImg); // show it to the user  
	waitKey(0);
}


void CMFCworkDlg::OnBnClickedButton16()
{
	// TODO: 在此添加控件通知处理程序代码
		//Load Image   
	Mat src1 = imread("1.1.jpg", 1);
	Mat src2 = imread("1.2.jpg", 1);
	imshow("src1", src1);
	imshow("src2", src2);

	if (!src1.data || !src2.data)
	{
		_cprintf(" --(!) Error reading images \n");
		return;
	}

	//sift feature detect  
	Ptr<SIFT> siftdetector = SIFT::create();
	vector<KeyPoint> kp1, kp2;

	siftdetector->detect(src1, kp1);
	siftdetector->detect(src2, kp2);
	Mat des1, des2;//descriptor  
	siftdetector->compute(src1, kp1, des1);
	siftdetector->compute(src2, kp2, des2);
	Mat res1, res2;

	drawKeypoints(src1, kp1, res1);//在内存中画出特征点  
	drawKeypoints(src2, kp2, res2);

	_cprintf("size of description of Img1: %d\n", kp1.size());
	_cprintf("size of description of Img2: %d\n", kp2.size());

	Mat transimg1, transimg2;
	transimg1 = res1.clone();
	transimg2 = res2.clone();

	char str1[20], str2[20];
	sprintf_s(str1, "%d", kp1.size());
	sprintf_s(str2, "%d", kp2.size());

	const char* str = str1;
	putText(transimg1, str1, Point(280, 230), 0, 1.0, Scalar(255, 0, 0), 2);//在图片中输出字符   

	str = str2;
	putText(transimg2, str2, Point(280, 230), 0, 1.0, Scalar(255, 0, 0), 2);//在图片中输出字符   

																			//imshow("Description 1",res1);  
	imshow("descriptor1", transimg1);
	imshow("descriptor2", transimg2);

	BFMatcher matcher(NORM_L2, true);
	vector<DMatch> matches;
	matcher.match(des1, des2, matches);
	Mat img_match;
	drawMatches(src1, kp1, src2, kp2, matches, img_match);//,Scalar::all(-1),Scalar::all(-1),vector<char>(),drawmode);  
	_cprintf("number of matched points: %d\n", matches.size());
	imshow("matches", img_match);
	waitKey(10);
}


void CMFCworkDlg::OnBnClickedButton17()
{
	// TODO: 在此添加控件通知处理程序代码
	Mat img1 = imread("1.1.jpg");
	Mat img2 = imread("1.2.jpg");
	// 1 初始化特征点和描述子,ORB
	vector<KeyPoint> keypoints1, keypoints2;
	Mat descriptors1, descriptors2;
	Ptr<ORB> orb = ORB::create();
	// 2 提取 Oriented FAST 特征点
	orb->detect(img1, keypoints1);
	orb->detect(img2, keypoints2);
	// 3 根据角点位置计算 BRIEF 描述子
	orb->compute(img1, keypoints1, descriptors1);
	orb->compute(img2, keypoints2, descriptors2);
	// 4 对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
	vector<DMatch> matches;
	BFMatcher bfmatcher(NORM_HAMMING, true);
	bfmatcher.match(descriptors1, descriptors2, matches);
	// 5 绘制匹配结果
	Mat img_match;
	drawMatches(img1, keypoints1, img2, keypoints2, matches, img_match);
	imshow("所有匹配点对", img_match);
	waitKey(0);
}


void CMFCworkDlg::OnBnClickedButton18()
{
	// TODO: 在此添加控件通知处理程序代码
	Mat obj = imread("1.1.jpg");   //载入目标图像
	Mat scene = imread("1.2.jpg"); //载入场景图像
	if (obj.empty() || scene.empty())
	{
		cout << "Can't open the picture!\n";
		return;
	}
	vector<KeyPoint> obj_keypoints, scene_keypoints;
	Mat obj_descriptors, scene_descriptors;
	Ptr<ORB> detector = ORB::create();

	detector->detect(obj, obj_keypoints);
	detector->detect(scene, scene_keypoints);
	detector->compute(obj, obj_keypoints, obj_descriptors);
	detector->compute(scene, scene_keypoints, scene_descriptors);

	BFMatcher matcher(NORM_HAMMING, true); //汉明距离做为相似度度量
	vector<DMatch> matches;
	matcher.match(obj_descriptors, scene_descriptors, matches);
	Mat match_img;
	drawMatches(obj, obj_keypoints, scene, scene_keypoints, matches, match_img);
	imshow("滤除误匹配前", match_img);

	//保存匹配对序号
	vector<int> queryIdxs(matches.size()), trainIdxs(matches.size());
	for (size_t i = 0; i < matches.size(); i++)
	{
		queryIdxs[i] = matches[i].queryIdx;
		trainIdxs[i] = matches[i].trainIdx;
	}

	Mat H12;   //变换矩阵

	vector<Point2f> points1;
	KeyPoint::convert(obj_keypoints, points1, queryIdxs);
	vector<Point2f> points2;
	KeyPoint::convert(scene_keypoints, points2, trainIdxs);
	int ransacReprojThreshold = 5;  //拒绝阈值

	//CV_RANSAC中去除CV_
	//H12 = findHomography(Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold);
	H12 = findHomography(Mat(points1), Mat(points2), RANSAC, ransacReprojThreshold);
	vector<char> matchesMask(matches.size(), 0);
	Mat points1t;
	perspectiveTransform(Mat(points1), points1t, H12);
	for (size_t i1 = 0; i1 < points1.size(); i1++)  //保存‘内点’
	{
		if (norm(points2[i1] - points1t.at<Point2f>((int)i1, 0)) <= ransacReprojThreshold) //给内点做标记
		{
			matchesMask[i1] = 1;
		}
	}
	Mat match_img2;   //滤除‘外点’后
	drawMatches(obj, obj_keypoints, scene, scene_keypoints, matches, match_img2, Scalar(0, 0, 255), Scalar::all(-1), matchesMask);

	//画出目标位置
	std::vector<Point2f> obj_corners(4);
	obj_corners[0] = Point(0, 0); obj_corners[1] = Point(obj.cols, 0);
	obj_corners[2] = Point(obj.cols, obj.rows); obj_corners[3] = Point(0, obj.rows);
	std::vector<Point2f> scene_corners(4);
	perspectiveTransform(obj_corners, scene_corners, H12);
	//line( match_img2, scene_corners[0] + Point2f(static_cast<float>(obj.cols), 0),scene_corners[1] + Point2f(static_cast<float>(obj.cols), 0),Scalar(0,0,255),2);
	//line( match_img2, scene_corners[1] + Point2f(static_cast<float>(obj.cols), 0),scene_corners[2] + Point2f(static_cast<float>(obj.cols), 0),Scalar(0,0,255),2);
	//line( match_img2, scene_corners[2] + Point2f(static_cast<float>(obj.cols), 0),scene_corners[3] + Point2f(static_cast<float>(obj.cols), 0),Scalar(0,0,255),2);
	//line( match_img2, scene_corners[3] + Point2f(static_cast<float>(obj.cols), 0),scene_corners[0] + Point2f(static_cast<float>(obj.cols), 0),Scalar(0,0,255),2);
	line(match_img2, Point2f((scene_corners[0].x + static_cast<float>(obj.cols)), (scene_corners[0].y)), Point2f((scene_corners[1].x + static_cast<float>(obj.cols)), (scene_corners[1].y)), Scalar(0, 0, 255), 2);
	line(match_img2, Point2f((scene_corners[1].x + static_cast<float>(obj.cols)), (scene_corners[1].y)), Point2f((scene_corners[2].x + static_cast<float>(obj.cols)), (scene_corners[2].y)), Scalar(0, 0, 255), 2);
	line(match_img2, Point2f((scene_corners[2].x + static_cast<float>(obj.cols)), (scene_corners[2].y)), Point2f((scene_corners[3].x + static_cast<float>(obj.cols)), (scene_corners[3].y)), Scalar(0, 0, 255), 2);
	line(match_img2, Point2f((scene_corners[3].x + static_cast<float>(obj.cols)), (scene_corners[3].y)), Point2f((scene_corners[0].x + static_cast<float>(obj.cols)), (scene_corners[0].y)), Scalar(0, 0, 255), 2);

	float A_th;
	A_th = atan(abs((scene_corners[3].y - scene_corners[0].y) / (scene_corners[3].x - scene_corners[0].x)));
	A_th = 90 - 180 * A_th / 3.14;
	_cprintf("angle=%f\n", A_th);

	imshow("滤除误匹配后", match_img2);

	//line( scene, scene_corners[0],scene_corners[1],Scalar(0,0,255),2);
	//line( scene, scene_corners[1],scene_corners[2],Scalar(0,0,255),2);
	//line( scene, scene_corners[2],scene_corners[3],Scalar(0,0,255),2);
	//line( scene, scene_corners[3],scene_corners[0],Scalar(0,0,255),2);

	imshow("场景图像", scene);

	Mat rotimage;
	Mat rotate = getRotationMatrix2D(Point(scene.cols / 2, scene.rows / 2), A_th, 1);
	warpAffine(scene, rotimage, rotate, scene.size());
	imshow("rotimage", rotimage);


	//方法三 透视变换  
	Point2f src_point[4];
	Point2f dst_point[4];
	src_point[0].x = scene_corners[0].x;
	src_point[0].y = scene_corners[0].y;
	src_point[1].x = scene_corners[1].x;
	src_point[1].y = scene_corners[1].y;
	src_point[2].x = scene_corners[2].x;
	src_point[2].y = scene_corners[2].y;
	src_point[3].x = scene_corners[3].x;
	src_point[3].y = scene_corners[3].y;


	dst_point[0].x = 0;
	dst_point[0].y = 0;
	dst_point[1].x = obj.cols;
	dst_point[1].y = 0;
	dst_point[2].x = obj.cols;
	dst_point[2].y = obj.rows;
	dst_point[3].x = 0;
	dst_point[3].y = obj.rows;

	Mat newM(3, 3, CV_32FC1);
	newM = getPerspectiveTransform(src_point, dst_point);

	Mat dst = scene.clone();

	warpPerspective(scene, dst, newM, obj.size());

	imshow("obj", obj);
	imshow("dst", dst);

	Mat resultimg = dst.clone();

	absdiff(obj, dst, resultimg);//当前帧跟前面帧相减

	imshow("result", resultimg);

	imshow("dst", dst);
	imshow("src", obj);
}


Mat image;
bool backprojMode = false;
bool selectObject = false;
int trackObject = 0;
bool showHist = true;
Point origin;
Rect selection;
int vmin = 10, vmax = 65, smin = 30;
// User draws box around object to track. This triggers CAMShift to start tracking
static void onMouse(int event, int x, int y, int, void*)
{
	if (selectObject)
	{
		selection.x = MIN(x, origin.x);
		selection.y = MIN(y, origin.y);
		selection.width = std::abs(x - origin.x);
		selection.height = std::abs(y - origin.y);

		selection &= Rect(0, 0, image.cols, image.rows);
	}

	switch (event)
	{
	case EVENT_LBUTTONDOWN:
		origin = Point(x, y);
		selection = Rect(x, y, 0, 0);
		selectObject = true;
		break;
	case EVENT_LBUTTONUP:
		selectObject = false;
		if (selection.width > 0 && selection.height > 0)
			trackObject = -1;   // Set up CAMShift properties in main() loop
		break;
	}
}

void CMFCworkDlg::OnBnClickedButton20()
{
	// TODO: 在此添加控件通知处理程序代码
	int nFrmNum;

	Mat pframe;
	VideoCapture pCapture;
	pCapture = VideoCapture("big_2.avi");

	Rect trackWindow;
	int hsize = 16;
	float hranges[] = { 0,180 };
	const float* phranges = hranges;




	

	namedWindow("Histogram", 0);
	namedWindow("CamShift Demo", 0);

	//setMouseCallback("CamShift Demo", onMouse, 0);
	setMouseCallback("CamShift Demo", onMouse, 0);

	createTrackbar("Vmin", "CamShift Demo", &vmin, 256, 0);
	createTrackbar("Vmax", "CamShift Demo", &vmax, 256, 0);
	createTrackbar("Smin", "CamShift Demo", &smin, 256, 0);


	Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
	bool paused = true;
	pCapture >> frame;
	imshow("CamShift Demo", frame);
	for (;;)
	{
		if (!paused)
		{
			pCapture >> frame;
			if (frame.empty())
				break;
		}

		frame.copyTo(image);

		if (!paused)
		{
			cvtColor(image, hsv, COLOR_BGR2HSV);
			if (trackObject)
			{
				int _vmin = vmin, _vmax = vmax;

				inRange(hsv, Scalar(0, smin, MIN(_vmin, _vmax)),
					Scalar(180, 256, MAX(_vmin, _vmax)), mask);
				int ch[] = { 0, 0 };
				hue.create(hsv.size(), hsv.depth());
				mixChannels(&hsv, 1, &hue, 1, ch, 1);

				if (trackObject < 0)
				{
					// Object has been selected by user, set up CAMShift search properties once
					Mat roi(hue, selection), maskroi(mask, selection);
					calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
					normalize(hist, hist, 0, 255, NORM_MINMAX);

					trackWindow = selection;
					trackObject = 1; // Don't set up again, unless user selects new ROI

					histimg = Scalar::all(0);
					int binW = histimg.cols / hsize;
					Mat buf(1, hsize, CV_8UC3);
					for (int i = 0; i < hsize; i++)
						buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i * 180. / hsize), 255, 255);
					cvtColor(buf, buf, COLOR_HSV2BGR);

					for (int i = 0; i < hsize; i++)
					{
						int val = saturate_cast<int>(hist.at<float>(i) * histimg.rows / 255);
						rectangle(histimg, Point(i * binW, histimg.rows),
							Point((i + 1) * binW, histimg.rows - val),
							Scalar(buf.at<Vec3b>(i)), -1, 8);
					}
				}

				// Perform CAMShift
				calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
				backproj &= mask;
				RotatedRect trackBox = CamShift(backproj, trackWindow,
					TermCriteria(TermCriteria::EPS | TermCriteria::COUNT, 10, 1));
				if (trackWindow.area() <= 1)
				{
					int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5) / 6;
					trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
						trackWindow.x + r, trackWindow.y + r) &
						Rect(0, 0, cols, rows);
				}

				if (backprojMode)
					cvtColor(backproj, image, COLOR_GRAY2BGR);
				ellipse(image, trackBox, Scalar(0, 0, 255), 3, LINE_AA);
			}
		}
		else if (trackObject < 0)
			paused = false;

		if (selectObject && selection.width > 0 && selection.height > 0)
		{
			Mat roi(image, selection);
			bitwise_not(roi, roi);
		}

		imshow("CamShift Demo", image);
		imshow("Histogram", histimg);

		char c = (char)waitKey(10);
		if (c == 27)
			break;

		switch (c)
		{
		case 'b':
			backprojMode = !backprojMode;
			break;
		case 'c':
			trackObject = 0;
			histimg = Scalar::all(0);
			break;
		case 'h':
			showHist = !showHist;
			if (!showHist)
				destroyWindow("Histogram");
			else
				namedWindow("Histogram", 1);
			break;
		case 'p':
			paused = !paused;
			break;
		default:
			;
		}
	}



}

//CascadeClassifier 
void CMFCworkDlg::OnBnClickedButton21()
{
	// TODO: 在此添加控件通知处理程序代码
	CascadeClassifier faceCascade;
	faceCascade.load("haarcascade_frontalface_alt2.xml");//加载分类器
	VideoCapture capture;
	capture.open(0);// 打开摄像头
	//      capture.open("video.avi");    // 打开视频
	if (!capture.isOpened())
	{
		_cprintf("open camera failed. \n");
		return;
	}
	Mat img, imgGray;
	vector<Rect> faces;
	while (1)
	{
		capture >> img;// 读取图像至img
		if (img.empty())continue;
		if (img.channels() == 3)
			cvtColor(img, imgGray, CV_RGB2GRAY);
		else
		{
			imgGray = img;
		}
		double start = GetTickCount();
		faceCascade.detectMultiScale(imgGray, faces, 1.2, 6, 0, Size(0, 0));// 检测人脸
		double end = GetTickCount();
		_cprintf("run time: %f ms\n", (end - start));
		if (faces.size() > 0)
		{
			for (int i = 0; i < faces.size(); i++)
			{
				rectangle(img, Point(faces[i].x, faces[i].y), Point(faces[i].x + faces[i].width, faces[i].y + faces[i].height), Scalar(0, 255, 0), 1, 8);
			}
		}
		imshow("CamerFace", img); // 显示
		if (waitKey(1) != -1)
			break;// delay ms 等待按键退出
	}
}


void CMFCworkDlg::OnBnClickedButton22()
{
	// TODO: 在此添加控件通知处理程序代码
	VideoCapture pCapture;
	Mat pframe;
	pCapture = VideoCapture("car.avi");
	CascadeClassifier faceCascade;
	faceCascade.load("ar-svm-model.xml");//加载分类器
	vector<Rect> cars;
	
	while (1)
	{
		pCapture >> pframe;
		if (pframe.data == NULL)
			break;
		 
		double start = GetTickCount();
		faceCascade.detectMultiScale(pframe, cars, 1.3, 4, 0, Size(20, 20), Size(90, 90));// 检测人脸
		if (cars.size() > 0)
		{
			for (int i = 0; i < cars.size(); i++)
			{
				rectangle(pframe, Point(cars[i].x, cars[i].y), Point(cars[i].x + cars[i].width, cars[i].y + cars[i].height), Scalar(0, 255, 0), 1, 8);
			}
		}
		double  end = GetTickCount();
		
		_cprintf("end - start=%f ms \n", end - start);
		imshow("result", pframe);

		
		//char c = cvWaitKey(10);
        //if (c == 27)break;
		if (waitKey(1) != -1)
			break;// delay ms 等待按键退出

	}
	
}


void CMFCworkDlg::OnBnClickedButton23()
{
	// TODO: 在此添加控件通知处理程序代码
	size_t inWidth = 300;
	size_t inHeight = 300;
	double inScaleFactor = 1.0;
	Scalar meanVal(104.0, 177.0, 123.0);
	float min_confidence = 0.5;
	String modelConfiguration = "deploy.prototxt";
	String modelBinary = "res10_300x300_ssd_iter_140000.caffemodel";
	//! [Initialize network]
	dnn::Net net = dnn::readNetFromCaffe(modelConfiguration, modelBinary);
	//! [Initialize network]
	if (net.empty())
	{
		cerr << "Can't load network by using the following files: " << endl;
		cerr << "prototxt:   " << modelConfiguration << endl;
		cerr << "caffemodel: " << modelBinary << endl;
		cerr << "Models are available here:" << endl;
		cerr << "<OPENCV_SRC_DIR>/samples/dnn/face_detector" << endl;
		cerr << "or here:" << endl;
		cerr << "https://github.com/opencv/opencv/tree/master/samples/dnn/face_detector" << endl;
		exit(-1);
	}
	VideoCapture cap(0);//must be -1
	if (!cap.isOpened())
	{
		_cprintf("Couldn't open camera : \n");
		return;
	}
	for (;;)//while(1)
	{
		Mat frame;
		cap >> frame; // get a new frame from camera/video or read image
		if (frame.empty())break;
		if (frame.channels() == 4)cvtColor(frame, frame, COLOR_BGRA2BGR);
		//! [Prepare blob]
		Mat inputBlob = dnn::blobFromImage(frame, inScaleFactor,
			Size(inWidth, inHeight),
			meanVal, false, false); //Convert Mat to batch of images
									//! [Set input blob]
		net.setInput(inputBlob, "data"); //set the network input
										 //! [Make forward pass]
		Mat detection = net.forward("detection_out"); //compute output

		vector<double> layersTimings;
		double freq = getTickFrequency() / 1000;
		double time = net.getPerfProfile(layersTimings) / freq;
		Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
		ostringstream ss;
		ss << "FPS: " << 1000 / time << " ; time: " << time << " ms";
		putText(frame, ss.str(), Point(20, 20), 0, 0.5, Scalar(255, 0, 255));

		float confidenceThreshold = min_confidence;
		for (int i = 0; i < detectionMat.rows; i++)
		{
			float confidence = detectionMat.at<float>(i, 2);
			if (confidence > confidenceThreshold)
			{
				auto xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * frame.cols);
				auto yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * frame.rows);
				auto xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * frame.cols);
				auto yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * frame.rows);
				Rect object(xLeftBottom, yLeftBottom,
					(xRightTop - xLeftBottom),
					(yRightTop - yLeftBottom));
				rectangle(frame, object, Scalar(0, 255, 0));
				ss.str("");
				ss << confidence;
				String conf(ss.str());
				String label = "Face: " + conf;
				int baseLine = 0;
				Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
				rectangle(frame, Rect(Point(xLeftBottom, yLeftBottom - labelSize.height),
					Size(labelSize.width, labelSize.height + baseLine)),
					//Scalar(255, 255, 255), CV_FILLED);
					Scalar(255, 255, 255), FILLED);
				putText(frame, label, Point(xLeftBottom, yLeftBottom),
					FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 0));
			}
		}
		imshow("detections", frame);
		if (waitKey(1) != -1) break;
	}
}

  • 2
    点赞
  • 12
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值