跟着小琼琼学习opencv~

楼楼最近整理笔记,发现了自己学习Opencv时候的码的代码和简单介绍,现贴上来~主调用方法在最下方。转载请注明出处~

#include <opencv2\xfeatures2d\nonfree.hpp>
#include <opencv2\opencv.hpp>
#include <opencv2\features2d\features2d.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\calib3d\calib3d.hpp>
#include <opencv2/ml/ml.hpp>
#include <iostream>
#include <fstream>
#include <omp.h>
#include<time.h>
#include<string.h>
#include<opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgproc.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/core/core.hpp>
#include "opencv2/objdetect/objdetect.hpp"


using namespace cv;
using namespace std;
using namespace cv::ml;

#define WINDOW_WIDTH 400

Mat img1 = imread("1.jpg",CV_LOAD_IMAGE_ANYCOLOR);
Mat img2 = imread("2.jpg");

/*读取图片*/
void desply(string img_name)
{
    Mat img = imread(img_name, CV_LOAD_IMAGE_ANYCOLOR);
    if (img.empty())
    {
        return;
    }
    Mat gray,res;
    cvtColor(img, gray, COLOR_BGR2GRAY);
    blur(gray, res, Size(7, 7)); //均值滤波  模糊处理
//  Mat ele = getStructuringElement(MORPH_RECT, Size(15, 15));
//  erode(img, res, ele);  //腐蚀
//  dilate(img, res, ele);  //膨胀   腐蚀和膨胀是对白色部分(高亮部分)进行腐蚀或膨胀
    Canny(res, res, 3, 3); //canny算子 cvCanny只接受单通道图像作为输入 threshold1和threshold2 当中的小阈值用来控制边缘连接,大的阈值用来控制强边缘的初始分割。
//  Mat M(256, 256, CV_8UC3, Scalar(0, 0, 255));
//  Mat roi = img(Rect(10, 10, 900, 800));  //兴趣区域
//  Mat roi = img(Range(10,900),Range(10,800));
//  imwrite("imge.jpg",res); //写图像
//  cvNamedWindow("Example", CV_WINDOW_AUTOSIZE);
    imshow("Example", res);
    cvWaitKey(0);
//  cvDestroyWindow("Example");

}
////////////////////////////////////////////////////////////////////////////////
/*写入视频*/
void writevid()
{
    Size s=Size(320, 240);
    VideoWriter wrt=VideoWriter("w.avi", CV_FOURCC('M', 'J', 'P', 'G'), 25, s);
    if (!wrt.isOpened())
        return;
    Mat fr(s, CV_8UC3);
    for (int i = 0; i < 100; i++)
    {
        fr = Scalar::all(0);
        char text[128];
        snprintf(text, sizeof(text), "%d", i);
        putText(fr,text,Point(s.width/3,s.height/2), FONT_HERSHEY_SCRIPT_SIMPLEX,3,Scalar(0,0,255),1,2);
        wrt << fr;
    }

}
/////////////////////////////////////////////////////////////////////////////////////
/*读视频*/
void readvid()
{
    VideoCapture cap = ("w.avi");
    if (!cap.isOpened())
        return;
    namedWindow("edgs", 1);
    for (;;)
    {
        Mat fra;
        cap >> fra;
        if (fra.empty())
            return;
        imshow("edgs", fra);
        if (waitKey(30) > 0)
            break;
    }
}
///////////////////////////////////////////////////////////////////////////////////
/*图片融合*/
void merge()
{
    Mat img2 = imread("3.jpg", CV_LOAD_IMAGE_ANYCOLOR);
    if (img2.empty())
    {
        return;
    }
    Mat roil;
    roil = img1(Rect(200, 200, img2.cols, img2.rows));
    //addWeighted(roil, 0.5, img2, 0.3, 0, roil);
    Mat mask = imread("3.jpg", 0); //加载掩模  ???
    img2.copyTo(roil,mask);
}
////////////////////////////////////////////////////////////////////
/*创建滑动条*/

const int g_max = 100;
int g_slid;

void ontrack(int)
{
    double g_alphav1 = (double) g_slid / g_max;
    double g_alphav2 = 1.0 - g_alphav1;
    Mat dst;
    addWeighted(img1, g_alphav1, img2, g_alphav2, 0.0, dst);
    imshow("res", dst);
}

void track(Mat &img1, Mat &img2)
{
    namedWindow("res", 1);
    char TrackbarName[5] = {'h','e','l','l','o'};
    g_slid = 70;
    cvCreateTrackbar(TrackbarName, "res", &g_slid, g_max, ontrack);
    ontrack(g_slid);
    cout << getTrackbarPos(TrackbarName, "res") << endl;
    waitKey();
}
////////////////////////////////////////////////////////////////////////////////////////
/*鼠标操作*/

Rect g_rectangle;
bool g_bDrawingBox = false;
RNG g_rng(12345); //生成随机数

void DrawRectangle(Mat &tempImage, Rect g_rectangle)
{
    rectangle(tempImage, g_rectangle.tl(), g_rectangle.br(), Scalar(g_rng.uniform(0, 255)));
}

void on_MouseHandle(int event, int x, int y, int flags, void* userdata)
{
    Mat& img = *(Mat*)userdata;
    switch (event)
    {
    case EVENT_MOUSEMOVE:
    {
        if (g_bDrawingBox)
        {
            g_rectangle.width = x - g_rectangle.x;
            g_rectangle.height = y - g_rectangle.y;
        }
        break;
    }
    case EVENT_LBUTTONDOWN:
    {
        g_bDrawingBox = true;
        g_rectangle = Rect(x, y, 0, 0);
    }
    break;
    case EVENT_LBUTTONUP:
    {
        g_bDrawingBox = false;
        if (g_rectangle.width < 0)
        {
            g_rectangle.x += g_rectangle.width;
            g_rectangle.width *= -1;
        }
        if (g_rectangle.height < 0)
        {
            g_rectangle.y += g_rectangle.height;
            g_rectangle.height *= -1;
        }
        DrawRectangle(img, g_rectangle);
    }
    }
}


void Mousetrack()
{
    g_rectangle = Rect(-1, -1, 0, 0);
    Mat srcImage(600, 800, CV_8UC3), tempImage;
    srcImage.copyTo(tempImage);
    srcImage = Scalar::all(0);
    namedWindow("Mouse", 1);
    setMouseCallback("Mouse", on_MouseHandle, (void*) &srcImage);
    while (1)
    {
        srcImage.copyTo(tempImage);
        if (g_bDrawingBox)
            DrawRectangle(tempImage, g_rectangle);
        imshow("Mouse", tempImage);
        if (waitKey(10) == 27) break;
    }
}
///////////////////////////////////////////////////////////////////////////

/*测试vector输出*/
void tstVec()
{
    vector<Point2f> points(20);
    for (int i = 0; i < points.size(); ++i)
    {
        points[i] = Point2f((float)(i*5),(float)(i%7));
    }
    cout << points << endl; //???竟然可以直接这么输出  ~~~ ^_*
}
/////////////////////////////////////////////////////////////////////////////
/*绘制椭圆*/
void DrawEllipse(Mat img, double angle)
{
    ellipse(img,Point(200,200),Size(200,200),angle,0,360,Scalar(255,129,0),2,8);
    imshow("res",img);
}
/////////////////////////////////////////////////////////////////////////////////
/*绘制实心圆*/
void DrawFilledCircle()
{
    int thickness = -1;
    int lineType = 8;
    Point center = Point2f(WINDOW_WIDTH / 2, WINDOW_WIDTH / 2);
    circle(img1,center,WINDOW_WIDTH/32,Scalar(0,0,255),thickness,lineType);

}
//////////////////////////////////////////////////////////////////////////////
/*自定义绘制函数,实现多边形绘制*/
void DrawPolygon()
{
    Point rookPoints[1][20];
    rookPoints[0][0] = Point(WINDOW_WIDTH / 4, 7 * WINDOW_WIDTH / 8);
    rookPoints[0][1] = Point(3 * WINDOW_WIDTH / 4, 7 * WINDOW_WIDTH / 8);
    rookPoints[0][2] = Point(3 * WINDOW_WIDTH / 4, 13 * WINDOW_WIDTH / 16);
    rookPoints[0][3] = Point(11 * WINDOW_WIDTH / 16, 13 * WINDOW_WIDTH / 16);
    rookPoints[0][4] = Point(19 * WINDOW_WIDTH / 32, 3 * WINDOW_WIDTH / 8);
    rookPoints[0][5] = Point(3 * WINDOW_WIDTH / 4, 3 * WINDOW_WIDTH / 8);
    rookPoints[0][6] = Point(3 * WINDOW_WIDTH / 4, WINDOW_WIDTH / 8);
    rookPoints[0][7] = Point(26 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 8);
    rookPoints[0][8] = Point(26 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 4);
    rookPoints[0][9] = Point(22 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 4);
    rookPoints[0][10] = Point(22 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 8);
    rookPoints[0][11] = Point(18 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 8);
    rookPoints[0][12] = Point(18 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 4);
    rookPoints[0][13] = Point(14 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 4);
    rookPoints[0][14] = Point(14 * WINDOW_WIDTH / 40, WINDOW_WIDTH / 8);
    rookPoints[0][15] = Point(WINDOW_WIDTH / 4, WINDOW_WIDTH / 8);
    rookPoints[0][16] = Point(WINDOW_WIDTH / 4, 3 * WINDOW_WIDTH / 8);
    rookPoints[0][17] = Point(13 * WINDOW_WIDTH / 32, 3 * WINDOW_WIDTH / 8);
    rookPoints[0][18] = Point(5 * WINDOW_WIDTH / 16, 13 * WINDOW_WIDTH / 16);
    rookPoints[0][19] = Point(WINDOW_WIDTH / 4, 13 * WINDOW_WIDTH / 16);

    const Point *pts[1] = { rookPoints[0] };
    int npts[] = { 20 };
    fillPoly(img1, pts, npts, 1, Scalar(0, 0, 255), 8);

}
///////////////////////////////////////////////////////////////////////////////
/*自定义绘制函数,实现线绘制*/
void DrawLine()
{
    Point pt1 = Point2f(1.0, 1.0);
    Point pt2 = Point2f(100.0, 100.0);
    line(img1,pt1,pt2,Scalar(0,0,255),1,8);
}
/////////////////////////////////////////////////////////////////////////////////////
/*颜色空间缩减--指针*/
void colorReduce()
{
    int coefficient = 32;
//  Mat resImg = img1.clone();
    int Rows = img1.rows;
    int Cols = img1.cols*img1.channels();
    for (int i = 0; i < Rows; i++)
    {
        uchar *p = img1.ptr<uchar>(i);
        for (int j = 0; j < Cols; j++)
        {
            p[j] = p[j] / coefficient*coefficient + coefficient / 2;
        }
    }
}
///////////////////////////////////////////////////////////////////////////////////////
/*颜色空间缩减--迭代器*/
void colorReduce1()
{
    int coefficient = 32;
    //  Mat resImg = img1.clone();
    Mat_<Vec3b>::iterator it = img1.begin<Vec3b>();
    Mat_<Vec3b>::iterator itend = img1.end<Vec3b>();
    for (; it != itend; ++it)
    {
        (*it)[0]= (*it)[0] / coefficient*coefficient + coefficient / 2;
        (*it)[1] = (*it)[1] / coefficient*coefficient + coefficient / 2;
        (*it)[2] = (*it)[2] / coefficient*coefficient + coefficient / 2;
    }

}
/////////////////////////////////////////////////////////////////////////////////////////////
/*颜色空间缩减--动态地址*/
void colorReduce2()
{
    int coefficient = 32;
    //  Mat resImg = img1.clone();
    int Rows = img1.rows;
    int Cols = img1.cols;
    for (int i = 0; i < Rows; i++)
    {
        for (int j = 0; j < Cols; j++)
        {
            img1.at<Vec3b>(i,j)[0]= img1.at<Vec3b>(i, j)[0] / coefficient*coefficient + coefficient / 2;
            img1.at<Vec3b>(i, j)[1] = img1.at<Vec3b>(i, j)[1] / coefficient*coefficient + coefficient / 2;
            img1.at<Vec3b>(i, j)[2] = img1.at<Vec3b>(i, j)[2] / coefficient*coefficient + coefficient / 2;
        }
    }
}
//////////////////////////////////////////////////////////////////////////////////////////////////

/*分离颜色通道,多通道颜色混合*/
void spli_merge_Image()
{
    Mat img3 = imread("3.jpg",0);
    vector<Mat> channels;
    Mat imageRoi;
    Mat roi;
    Mat resl;
    split(img1, channels);
    imageRoi = channels.at(1);
    roi = imageRoi(Rect(200,200,img3.cols,img3.rows));
    addWeighted(roi,1.0,img3,0.3,0,roi);
    merge(channels,resl);
    imshow("res", resl);
    waitKey();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/*图像对比度,亮度调节*/
int Max_contrast = 200;
int Max_bright = 200;
int bright;
int contrast;
void BC_callback(int)
{
    for (int i = 0; i < img1.rows; i++)
    {
        for (int j = 0; j < img1.cols; j++)
        {
            img1.at<Vec3b>(i, j)(0) = saturate_cast<uchar>((contrast*0.01)*(img1.at<Vec3b>(i, j)(0)) + bright);
            img1.at<Vec3b>(i, j)(1) = saturate_cast<uchar>((contrast*0.01)*(img1.at<Vec3b>(i, j)(1)) + bright);
            img1.at<Vec3b>(i, j)(2) = saturate_cast<uchar>((contrast*0.01)*(img1.at<Vec3b>(i, j)(2)) + bright);
        }
    }
    imshow("res", img1);
}
void bright_contrast_adjust()
{
    namedWindow("res",1);
    bright = 0;
    contrast = 100;
    cvCreateTrackbar("c", "res", &contrast, Max_contrast, BC_callback);
    BC_callback(contrast);
    cvCreateTrackbar("b", "res", &bright, Max_bright, BC_callback);
    BC_callback(bright);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/*重新生成图片*/
void Make_and_save_new_pic()
{
    int count = 0;//图片数目
    vector<string> img_names;//无序图片
    ifstream in("main.txt");
    string s;
    while (getline(in, s))
    {
        count++;
        img_names.push_back(s);
    }
    vector<vector<vector<int>>> info(count);  //读取txt文档中的像素点信息,并保保存
    vector<Mat> imVec(img_names.size());
    vector<IplImage> qImg(img_names.size());
    char name[5] = { 0 };
    int idx = 0;
    ifstream fin("f.txt");
    string ss;
    string item;
    int i = 0;
    imVec[i] = imread(img_names[i], CV_LOAD_IMAGE_ANYCOLOR);
    //  imVec[i].rows = 4;
    //  imVec[i].cols = 2;
    info[i].resize(imVec[i].rows);
    int j = 0;
    int k = 0;
    while (getline(fin, ss, ' '))
    {
        istringstream items(ss);
        while (items >> item) {
            if (j<imVec[i].rows){
                if (info[i][j].size()< imVec[i].cols){
                    info[i][j].push_back(atoi(item.c_str()));
                    k++;
                }
                else{
                    k = 1;
                    j++;
                    if (j < imVec[i].rows){
                        info[i][j].push_back(atoi(item.c_str()));
                    }
                    else{
                        i++;
                        j = 0;
                        k = 1;
                        imVec[i] = imread(img_names[i], CV_LOAD_IMAGE_ANYCOLOR);
                        info[i].resize(imVec[i].rows);
                        info[i][j].push_back(atoi(item.c_str()));
                    }
                }
            }
        }
    }
    for (int k = 0; k < count; k++){
        for (int i = 0; i < imVec[k].rows; i++){
            for (int j = 0; j < imVec[k].cols; j++){
                if (info[k][i][j] == 0){
                    imVec[k].at<Vec3b>(i, j)[0] = 255;
                    imVec[k].at<Vec3b>(i, j)[1] = 255;
                    imVec[k].at<Vec3b>(i, j)[2] = 255;
                }
            }
        }
        imshow("res", imVec[k]);
        qImg[k] = IplImage(imVec[k]);
        sprintf(name, "%d.jpg", idx++);//同时保存多张图片
        cvSaveImage(name, &qImg[k]);
    }
}
///////////////////////////////////////////////////////////

/*离散傅里叶变换*/
/*傅里叶级数,在时域是一个周期且连续的函数,而在频域是一个非周期离散的函数/*
/*傅里叶变换,则是将一个时域非周期的连续信号,转换为一个在频域非周期的连续信号*/
/*在频域里面,对于一幅图片,高频部分代表图像的细节,纹理信息,低频部分代表图像的轮廓信息*/
/*傅里叶变换在图像处理中可以做到图像增强与图像去噪,图像分割与边缘检测,图像特征提取,图像压缩等*/
/*计算两个二维实矩阵卷积*/
void convolveDFT()
{
    Mat InputA = (Mat_<float>(2, 2) <<
        1, 2,
        3, 4);
    Mat InputB = (Mat_<float>(2, 2) <<
        -1, 1,
        -2, 2);
    /*当A,B尺寸相等的时候,这个时候的高斯滤波得到的也就是中心点的那一个值(卷积核滤波的差别在于需要绕中心180度旋转*/
    Mat OutputC((abs(InputA.rows - InputB.rows) + 1), (abs(InputA.cols - InputB.cols) + 1), InputA.type()); //初始化输出矩阵
    Size dftsize;
    dftsize.width = getOptimalDFTSize(InputA.cols + InputB.cols - 1);//在图像尺寸是2,3,5的整数倍时,计算速度最快,此函数用来使得图片尺寸满足要求
    dftsize.height = getOptimalDFTSize(InputA.rows + InputB.rows - 1);
    Mat tempA(dftsize, InputA.type(), Scalar::all(0));
    Mat tempB(dftsize, InputB.type(), Scalar::all(0));
    Mat roiA(tempA, Rect(0, 0, InputA.cols, InputA.rows));
    InputA.copyTo(roiA);
    Mat roiB(tempB, Rect(0, 0, InputB.cols, InputB.rows));
    InputB.copyTo(roiB);
    //cout << tempA << endl;
    dft(tempA, tempA, 0, InputA.rows);
    //cout << tempA << endl;
    dft(tempB, tempB, 0, InputB.rows);
    mulSpectrums(tempA, tempB, tempA, 0); //将结果相乘,存在A中
                                          //cout << tempA << endl;
    dft(tempA, tempA, DCT_INVERSE + DFT_SCALE, OutputC.rows);
    tempA(Rect(0, 0, OutputC.cols, OutputC.rows)).copyTo(OutputC);
    //cout << OutputC << endl;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////

/*离散傅里叶变换*/
void DFT()
{
    Mat imgt = imread("a.jpg", 0);
    Size optSize;
    optSize.width = getOptimalDFTSize(imgt.cols);
    optSize.height = getOptimalDFTSize(imgt.rows);
    Mat padded;
    copyMakeBorder(imgt,padded,0,optSize.height-imgt.rows,0,optSize.width-imgt.cols,BORDER_CONSTANT,Scalar::all(0));
    //为傅里叶变换的实部和虚部分配存储空间
    Mat planes[] = {Mat_<float>(padded),Mat::zeros(padded.size(),CV_32F)};
    Mat complexI;
    merge(planes, 2, complexI);
    //进行就地离散傅里叶变换
    dft(complexI, complexI);
    split(complexI, planes);
    magnitude(planes[0], planes[1], planes[0]); //计算幅值
    Mat magnitudeImg = planes[0];
    magnitudeImg += Scalar::all(1);
    log(magnitudeImg, magnitudeImg); //缩放 M1=Log(1+M);
    magnitudeImg = magnitudeImg(Rect(0, 0, magnitudeImg.cols& -2, magnitudeImg.rows &-2)); //获取偶数,剔除奇数,当奇数的时候,裁剪
    int cx = magnitudeImg.cols / 2;
    int cy = magnitudeImg.rows / 2;
    Mat q0(magnitudeImg, Rect(0, 0, cx, cy));
    Mat q1(magnitudeImg, Rect(cx, 0, cx, cy));
    Mat q2(magnitudeImg, Rect(0, cy, cx, cy));
    Mat q3(magnitudeImg, Rect(cx, cy, cx, cy));
    //交换1,3(2,4)像限,直流和低频放中间,将(0,0)放中间  方便显示
    Mat tmp;
    q0.copyTo(tmp);
    q3.copyTo(q0);
    tmp.copyTo(q3);
    q1.copyTo(tmp);
    q2.copyTo(q1);
    tmp.copyTo(q2);
    //归一化 为了显示
    normalize(magnitudeImg, magnitudeImg, 0, 1, NORM_MINMAX);
    imshow("dft", magnitudeImg);
}
////////////////////////////////////////////////////////////////////////////////////////

/*输入XML,YAML文件*/
void wr_Xml()
{
    FileStorage fs("test.yaml", FileStorage::WRITE);
    fs << "frameCount" << 5;
    time_t rawtime;
    time(&rawtime);
    fs << "calibrationDate" << asctime(localtime(&rawtime));
    Mat cameraMatrix = (Mat_<double>(3, 3) << 1000, 0, 320, 0, 1000, 240, 0, 0, 1);
    fs << "cameraMatrix" << cameraMatrix;
    fs << "features" << "[";
    for (int i = 0; i < 3; i++)
    {
        int x = rand() % 640;
        int y = rand() % 480;
        fs << "{:" << "x" << x << "y" << y;  //{: 就不换行了 注意语法规则 注意"["和"[:"是不一样的语法规则~
        fs << "}";
    }
    fs << "]";
    fs.release();
}
//////////////////////////////////////////////////////////////////
/*读XML,YAML文件*/
void re_Xml()
{
    system("colcor 6F");
    FileStorage fs2("test.yaml", FileStorage::READ);
    //第一种方法,对FileNode操作
    int frameCount = (int)fs2["frameCount"];
    string data;
    //第二种方法,使用FileNode运算符>>
    fs2["calibrationDate"] >> data;
    Mat cameraMatrix;
    fs2["cameraMatrix"] >> cameraMatrix;
    FileNode feature = fs2["features"];
    //vector<uchar> feature1;
    //fs2["features"] >> feature1;//也可这种形式  ??这里类型不对 故不好使
    FileNodeIterator it = feature.begin(), it_end = feature.end();
    for (; it != it_end; ++it)
    {
        cout << (int)(*it)["x"] << " " << (int)(*it)["y"] << endl;
    }
    /*for (int i = 0; i < feature1.size(); i++)
    {
        cout << (int)feature1[i]["x"] << " " << (int)feature1[i]["y"] << endl;
    }*/
}
////////////////////////////////////////////////////////////////
/*滤波*/
/*高斯滤波是指用高斯函数作为滤波函数的滤波操作
高斯模糊就是高斯低通滤波
低通是模糊,高通是锐化*/
/*线性滤波操作分为三种:方框滤波 boxblur  均值滤波 blur 高斯滤波 GaussianBlur*/
/*非线性滤波--中值滤波,双边滤波  双线性滤波适用于噪声是散粒噪声而不是高斯噪声,即图像偶尔出现很大值的时候,用高斯滤波去除不掉*/
/*中值滤波--用领域灰度值的中值替代该像素点的灰度值--去除脉冲噪声,椒盐噪声同时保留图像的边缘细节*/
/*双边滤波--基于空间分布的高斯滤波函数,可以很好的保存边缘像素,由于保存过多的高频信息,因此对于彩色图像中的高频噪声,不能干净的滤除,只能对低频信息进行较好的滤波*/
void M_mediablur()
{
    Mat out;
    medianBlur(img1, out, 7);
    imshow("res", out);
}
void M_bilaterblur()
{
    Mat out;
    bilateralFilter(img1, out, 25, 25 * 25, 25 / 2);
    imshow("res", out);
}
void M_blur()
{
    Mat out;
    Mat out1;
    Mat out2;
    boxFilter(img1, out, -1, Size(5, 5));
    blur(img1, out1, Size(5, 5));
    GaussianBlur(img1, out2, Size(5, 5), 0, 0);
    imshow("res", out);;
    imshow("res1", out1);
    imshow("res2", out2);
}
int g_first_value;
int max_g_value = 50;
Mat out;
void G_blur(int,void *)
{
    GaussianBlur(img1, out, Size(g_first_value*2+1, g_first_value*2+1), 0, 0); //高斯滤波的高斯核必须为奇数
    imshow("res", out);
}
void blur_track()
{
    namedWindow("res");
    g_first_value = 4;
    createTrackbar("blur", "res", &g_first_value, max_g_value,G_blur);
    G_blur(g_first_value,0);
}

///////////////////////////////////////////////////////////////////
/*形态学滤波*/
/*运用腐蚀和膨胀两种基本的形态学操作,实现开运算,闭运算,形态学梯度,顶帽,黑帽*/
/*开运算--先腐蚀后膨胀--用于消除小物体,在纤细点处分离物体,并在平滑较大物体边界的同时不明显改变其面积*/
/*闭运算--先膨胀和腐蚀--用于排除小型黑洞*/
/*形态学梯度--是膨胀图与腐蚀图之差,可以讲团块的边缘突出出来,可以运用形态学梯度来保留物体的边缘轮廓*/
/*顶帽--是原图像与开运算的结果差,得到的效果图突出比原图轮廓周围区域更明亮的区域,且这一操作与选择的核的大小无关
常常用来分离比邻近点亮一些的板块,在一幅图像具有大幅的背景,而微小物品比较有规律的情况下,可以进行背景提取*/
/*黑帽--闭运算的结果图与原图像之差,得到的效果图突出比原图轮廓周围区域更暗的区域,且这一操作与选择的核的大小有关
用来分离比邻近点暗一些的斑块,效果图有非常完美的轮廓*/
//////////////////////////*核心函数  morphologyEx()*//////////////////////////////////

/*漫水填充--魔术棒*/
Mat g_srcImage = imread("4.jpg");
Mat g_dstImage;
Mat g_grayImage;
Mat g_maskImage;
int g_nFillMode = 1;//漫水填充的模式
int g_nLowDIF = 20;
int g_nHighDIF = 20;
int g_nConnect = 4;//表示floodfill函数标识符低八位的连通值
bool g_bIsColor = true; //是否为彩色图的标识符布尔值
bool g_UseMask = false;//是否显示掩模窗口
bool g_nNewMaskVal = 255;//新的重新绘制的像素值

void onMouse(int event, int x, int y, int flags, void* userdata)
{
    if (event != CV_EVENT_LBUTTONDOWN)
        return;
    Point seed = Point(x, y);
    int LowDIf = g_nFillMode == 0 ? 0 : g_nLowDIF; //是否为空的漫水填充
    int UpDif = g_nFillMode == 0 ? 0 : g_nHighDIF;
    int flag = g_nConnect + (g_nNewMaskVal << 8) + (g_nFillMode == 1 ? CV_FLOODFILL_FIXED_RANGE : 0);  //为什么要左移8位
    int b = (unsigned)theRNG() & 255;
    int g = (unsigned)theRNG() & 255;
    int r = (unsigned)theRNG() & 255;
    Rect ccomp;
    Scalar newVal = g_bIsColor ? Scalar(b, g, r) : Scalar(r*0.299 + g*0.587 + b*0.114);
    Mat dst = g_bIsColor ? g_dstImage : g_grayImage;
    int area;
    if (g_UseMask)
    {
        /*阙值操作属于像素级处理,在灰度图像中,每个像素都有一个灰度值,我们可以对灰度值设置阙值,通过像素与阙值的比较,
        来实现对较大的较小的噪声的滤波处理*/
        threshold(g_maskImage,g_maskImage,1,128,THRESH_BINARY);
        area = floodFill(dst, seed, newVal, &ccomp, Scalar(LowDIf, LowDIf, LowDIf), Scalar(UpDif, UpDif, UpDif), flag);
        imshow("mask", g_maskImage);    
    }
    else
    {
        area = floodFill(dst, seed, newVal, &ccomp, Scalar(LowDIf, LowDIf, LowDIf), Scalar(UpDif, UpDif, UpDif), flag);
    }
    imshow("效果图", dst);
    cout << area << "个像素被重绘" << endl;
}

void m_flood_full()
{
    g_dstImage = g_srcImage.clone();
    cvtColor(g_srcImage,g_grayImage,COLOR_BGR2GRAY);
    g_maskImage.create(g_srcImage.rows + 2, g_srcImage.cols + 2, CV_8UC1); //为什么这么设置????
    namedWindow("效果图", 1);
    createTrackbar("负差最大值", "效果图", &g_nLowDIF, 255,0); //?为啥没有回掉函数
    createTrackbar("正差最大值", "效果图", &g_nHighDIF, 255, 0); //?为啥没有回掉函数
    setMouseCallback("效果图",onMouse,0);
    while (1)
    {
        imshow("效果图", g_bIsColor ? g_dstImage : g_grayImage);
        int c = waitKey(0);
        if ((c & 255) == 27)
        {
            break;
        }
        switch ((char)c)
        {
            //如果键盘“1”被按下,效果图在在灰度图,彩色图之间互换
        case '1':
            if (g_bIsColor)
            {
                cvtColor(g_srcImage, g_grayImage, COLOR_BGR2GRAY);
                g_maskImage = Scalar::all(0);
                g_bIsColor = 0;
            }
            else
            {
                g_srcImage.copyTo(g_dstImage);
                g_maskImage = Scalar::all(0);
                g_bIsColor = 1;
            }
            break;
            //如果键盘按键“2”被按下,显示/隐藏掩膜窗口
        case '2':
            if (g_UseMask)
            {
                destroyWindow("mask");
                g_UseMask = 0;
            }
            else
            {
                namedWindow("mask", 0);
                g_maskImage = Scalar::all(0);
                imshow("mask", g_maskImage);
                g_UseMask = 1;
            }
            break;
            //如果键盘按键“3”被按下,恢复原始图像
        case '3':
            g_srcImage.copyTo(g_dstImage);
            cvtColor(g_srcImage, g_grayImage, COLOR_BGR2GRAY);
            g_maskImage = Scalar::all(0);
            break;
            //如果键盘按键“4”被按下,使用空范围的漫水填充
        case '4':
            g_nFillMode = 0;
            break;
            //如果键盘按键“5”被按下,使用渐变、固定范围的漫水填充
        case '5':
            g_nFillMode = 1;
            break;
            //如果键盘按键“6”被按下,使用渐变、浮动范围的漫水填充
        case '6':
            g_nFillMode = 2;
            break;
            //如果键盘按键“7”被按下,操作标志符的低八位使用4位的连接模式
        case '7':
            g_nConnect = 4;
            break;
            //如果键盘按键“8”被按下,操作标志符的低八位使用8位的连接模式
        case '8':
            g_nConnect = 8;
            break;
        }
    }
}
///////////////////////////////////////////////////////////////////////////////////
/*图像金字塔--pyrup上采样(图像尺寸加倍,按照图像尺寸来的,和金字塔相反) pyrdown下采样 resize*/
/*高斯金字塔用来向下采样,拉普拉斯金字塔用来从底层中向上采样,可对图像进行最大程度地还原,配合高斯金字塔一起使用*/
/*高斯金字塔丢失的数据形成了拉普拉斯金子塔*/
/*关于图像金字塔,一个重要的应用为图片分割,图像分割的话,先要建立一个图像金字塔,然后对Gi和G(i+1)的像素直接依照对应的关系
建立起“父与子”的关系,而快速初始分割可以现在金字塔高层的低分辨率图像上完成,然后逐层对分割加以优化*/
/*resize()函数,若设置了ROI区域,则调整原图像ROI区域尺寸或者对原图像调整并填充到目标图像ROI中*/

void Pyramid()
{
    Mat dst_Imag;
    //resize(img1, dst_Imag, Size(img1.cols / 2, img1.rows / 2), (0, 0), (0, 0), 3);
    //resize(dst_Imag, dst_Imag, Size(dst_Imag.cols * 2, dst_Imag.rows * 2), (0, 0), (0, 0), 3);
    //pyrUp(img1, dst_Imag, Size(img1.cols * 2, img1.rows * 2));
    pyrDown(img1, dst_Imag, Size(img1.cols / 2, img1.rows / 2));
    imshow("res", dst_Imag);
}
/////////////////////////////////////////////////////////////////////////////////////
/*阈值操作*/
Mat thre_dst;
Mat gray_Image;
int g_value = 50;
int g_th_mode = 3;
void on_THreshod(int,void*)
{
    //threshold(gray_Image, thre_dst, g_value, 255, g_th_mode);
    adaptiveThreshold(gray_Image, thre_dst, 255, ADAPTIVE_THRESH_GAUSSIAN_C, 0, g_th_mode*2+1, g_value);
    imshow("res",thre_dst);
}
void M_threshod()
{
    cvtColor(img1, gray_Image, COLOR_RGB2GRAY);
    namedWindow("res");
    createTrackbar("模式", "res", &g_th_mode, 4, on_THreshod);
    createTrackbar("参数值", "res", &g_value, 255, on_THreshod);
    on_THreshod(0,0);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/*边缘检测*/
/*第一步:滤波(边缘检测的算法是基于图像的一阶和二阶导数,但导数通常对噪声很敏感,因此必须采用滤波器来改善与噪声有关的边缘检测器的性能)*/
/*第二步:增强(增强边缘的基础是确定各点领域强度的变化值,可将有变化的点凸显出来,可通过计算梯度幅值来确定)*/
/*第三步:检测(经过增强的图像,有很多点梯度值比较大,要进行取舍,即通过阈值化的方法)*/

/*图像掩模--数字图像处理中,图像掩模主要用于:①提取感兴趣区,用预先制作的感兴趣区掩模与待处理图像相乘,得到感兴趣区图像,感兴趣区内图像值保持不变,而区外图像值都为0。
②屏蔽作用,用掩模对图像上某些区域作屏蔽,使其不参加处理或不参加处理参数的计算,或仅对屏蔽区作处理或统计。
③结构特征提取,用相似性变量或图像匹配方法检测和提取图像中与掩模相似的结构特征。
④特殊形状图像的制作。*/

/*Canny边缘检测 P249页*/
/*1:消除噪声,用size=5的高斯内核*/
/*2:计算梯度幅值和方向,按照Sobel滤波器(包含两组3*3的矩阵,分为横向和纵向,将其与图像平面做卷积。即可得到横向和纵向的亮度差分近似值)*/

int c_value;
Mat c_dst;
Mat c_edge;
Mat c_gray;
void oncanny(int)
{
    blur(c_gray, c_edge, Size(3, 3));
    Canny(c_edge, c_edge, c_value, c_value*3, 3);
    c_dst = Scalar::all(0);
    img1.copyTo(c_dst, c_edge);
    imshow("s", c_dst);
}
void M_canny()
{
    namedWindow("Canny", WINDOW_AUTOSIZE);
    cvtColor(img1, c_gray, COLOR_BGR2GRAY);
    img1.copyTo(c_dst);
    cvCreateTrackbar("canny", "Canny", &c_value, 120, oncanny);
    oncanny(0);
}
/*Sobel算子 Scharr*/
/*当内核大小为3时,Sobel内核误差比较大,此时采用 Scharr 函数*/
/*Sobel算子结合了高斯平滑和分化,因此结果有更多的抗噪性*/
void M_sobel()
{
    Mat So_gray;
    Mat So_x;
    Mat So_y;
    Mat abs_So_x;
    Mat abs_So_y;
    Mat So_dst;
    cvtColor(img1, So_gray, COLOR_BGR2GRAY);
    Sobel(So_gray, So_x, CV_16S, 1, 0, 3, 1, 1, BORDER_DEFAULT); //求x方向梯度
    //Scharr(So_gray, So_x, CV_16S, 0, 1, 1, 0, BORDER_DEFAULT);
    convertScaleAbs(So_x, abs_So_x);
    imshow("x", abs_So_x);
    Sobel(So_gray, So_y, CV_16S, 0, 1, 3, 1, 1, BORDER_DEFAULT); //求y方向梯度
    //Scharr(So_gray, So_y, CV_16S, 0, 1, 1, 0, BORDER_DEFAULT);
    convertScaleAbs(So_y, abs_So_y);
    imshow("y", abs_So_y);
    //合并梯度
    addWeighted(abs_So_x, 0.5, abs_So_y, 0.5, 0, So_dst);
    imshow("z", So_dst);
}

/*Laplacian算子*/
/*让一副图片减去它的Laplacian算子可以增强对比度*/
void M_Laplacian()
{
    Mat lp_gray_dst;
    Mat lp_dst;
    Mat abs_lp_dst;
    GaussianBlur(img1, lp_gray_dst, Size(3,3), 0, 0, BORDER_DEFAULT);
    cvtColor(img1, lp_gray_dst, COLOR_BGR2GRAY);
    Laplacian(lp_gray_dst, lp_dst, CV_16S, 3, 1, 0, BORDER_DEFAULT);
    convertScaleAbs(lp_dst, abs_lp_dst);
    imshow("z", abs_lp_dst);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/*霍夫变换--最基本的霍夫变换是从黑白图像中检测直线(线段)*/
/*追踪图像中每个点对应曲线的交点,如果交于一点的曲线的数量超过了阈值,那么可以认为这个交点所代表的参数在原图像中为一条直线*/
/*其为图像处理中从图像中识别几何形状的基本方法之一*/
/*标准霍夫变换(SHT-Houghlines)
多尺度霍夫变换(MSHT-Houghlines)
累计概率霍夫变换(PPHT-HoughLinesP)*/
void M_Hough()
{
    Mat midImage;
    Mat dstImage;
    Canny(img1, midImage, 50, 200, 3);
    cvtColor(midImage, dstImage, COLOR_GRAY2BGR);  //CV_GRAY2BGR

    //vector<Vec2f> lines;
    vector<Vec4i> lines;
    //HoughLines(midImage, lines, 1, CV_PI / 180, 150, 0, 0);
    HoughLinesP(midImage, lines, 1, CV_PI / 180, 80, 50, 10);
    for (int i = 0; i < lines.size(); i++)
    {
        float rho = lines[i][0];
        float theta = lines[i][1];
        Point pt1, pt2;
        double a = cos(theta);
        double b = sin(theta);
        double x0 = a*rho; //A是与直线垂直的线交点,坐标为(x0,y0);  向上取整函数cvCeil,向下取整函数cvFloor,四舍五入函数cvRound
        double y0 = b*rho;
        /*没理解*/
        pt1.x = cvRound(x0 + 1000 * (-b)); //1000是取两点之间的距离,可操控量 pt1是位于A较上的一个点
        pt1.y = cvRound(y0 + 1000 * (a));
        pt2.x = cvRound(x0 - 1000 * (-b)); //1000是取两点之间的距离,可操控量 pt2是位于A较下的一个点
        pt2.y = cvRound(y0 - 1000 * (a));
        line(dstImage, pt1, pt2, Scalar(0, 0, 195),1,LINE_AA);
        imshow("b", dstImage);
    }
}
/*霍夫圆变换*/
void M_HoughC()
{
    Mat img11 = imread("0.jpg");
    imshow("a", img11);
    Mat hmidimg;
    Mat hdstimg;
    cvtColor(img11, hmidimg, COLOR_BGR2GRAY);
    GaussianBlur(hmidimg, hmidimg, Size(9, 9), 2, 2);
    vector<Vec3f> circles;
    HoughCircles(hmidimg, circles, HOUGH_GRADIENT, 1.5, 10, 200, 100, 0, 0);
    for (int i = 0; i < circles.size(); i++)
    {
        Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
        int radius = cvRound(circles[i][2]);
        circle(img11, center, 3, Scalar(255, 255, 255), -1, 3, 0);
        circle(img11, center, radius, Scalar(0, 255, 50), -1, 8, 0);
    }
    imshow("e", img11);
}
/*重映射*/
/*重映射-就是把一副图像中某位置的像素放置到另一个图片指定位置的过程,为了完成映射过程,需要获得一些插值为非整数像素的坐标
因为源图像与目标图像的像素坐标不是一一对应的,因此一般情况下,我们通过重映射来表达每个像素的位置*/
void M_remap()
{
    Mat redstimg;
    Mat redstimgx;
    Mat redstimgy;
    redstimg.create(img1.size(), img1.type());
    redstimgx.create(img1.size(), CV_32FC1);
    redstimgy.create(img1.size(), CV_32FC1);
    for (int i = 0; i < img1.rows; i++)
    {
        for (int j = 0; j < img1.cols; j++)
        {
            redstimgx.at<float>(i, j) = static_cast<float>(j);
            redstimgy.at<float>(i, j) = static_cast<float>(img1.rows - i);
        }
    }
    remap(img1, redstimg, redstimgx, redstimgy, INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0));
    imshow("e", redstimg);
}
////////////////////////////////////////////////////////////////////////////////////////////
/*仿射变换*/
/*线性变换+平移  一个任意的放射变换都能表示为乘以一个矩阵(线性变换),接着再加上一个向量(平移)的形式*/
/*warpAffine(开销大,可以部分保留)  cvGetQuadrangleSubPix(开销小,总是全部改变输出图像的内容)  
getRotationMatrix2D(二维旋转变换矩阵,变换会将旋转中心映射到它自身)*/
void M_affine()
{
    Mat adddst;
    Mat rotMat(2, 3, CV_32FC1);
    Mat warpMat(2, 3, CV_32FC1);
    Point2f srcTriangle[3];
    Point2f dstTriangle[3];
    adddst.create(img1.size(), img1.type());
    srcTriangle[0] = Point2f(0, 0);
    srcTriangle[1] = Point2f(static_cast<float>(img1.cols-1),0);
    srcTriangle[2] = Point2f(0, static_cast<float>(img1.rows - 1));
    dstTriangle[0]= Point2f(static_cast<float>(img1.cols*0.0), static_cast<float>(img1.rows*0.33));
    dstTriangle[1] = Point2f(static_cast<float>(img1.cols*0.65), static_cast<float>(img1.rows*0.35));
    dstTriangle[2] = Point2f(static_cast<float>(img1.cols*0.15), static_cast<float>(img1.rows*0.6));
    warpMat = getAffineTransform(srcTriangle, dstTriangle);
    warpAffine(img1, adddst, warpMat, adddst.size());
    Point center = Point(adddst.cols / 2, adddst.rows / 2);
    double angle = -30.0;
    double scale = 0.8;
    rotMat = getRotationMatrix2D(center, angle, scale);
    warpAffine(adddst, adddst, rotMat, adddst.size());
    imshow("r", adddst);
}
/////////////////////////////////////////////////////////
/*直方图均衡化--作用于灰度图*/
/*直方图均衡化是通过拉伸像素强度分布范围来增强图像对比度的一种方法,其本质是扩大量化间隔,而量化级别反而减少了*/
/*在原始图像对比度本来就很高的情况下,如果再均衡化则灰度调和,对比度会降低。在泛白缓和的图像中,均衡化会合并一些像素灰度,从而增大对比度*/
void M_equalizeHist()
{
    Mat equdst;
    cvtColor(img1, equdst, COLOR_BGR2GRAY);
    equalizeHist(img1, equdst);
    imshow("e", equdst);
}
/////////////////////////////////////////////////////////////////////////////
/*图像轮廓与图像分割修复*/
/*图像分割的算法(第六章:图像形态学,阈值化以及金字塔分割法),本章介绍分水岭算法和如何进行图像修补*/
/*寻找轮廓*/
void M_findContours()
{
    Mat contours_src = imread("1.jpg", 0);//需要以二值图模式载入
    Mat contours_dst = Mat::zeros(contours_src.rows, contours_src.cols, CV_8UC3);
    //threshold(contours_src, contours_src, 119,255,0);
    contours_src = contours_src >= 150; //此句与前一句的结果是一样的(注意前一句最后一个为 0)
    vector<vector<Point>> contours; //轮廓
    vector<Vec4i> hierarchy;  //每个轮廓对应4个hierarchy元素 hierarchy[i][0] 
    //前一个轮廓 hierarchy[i][1] 后一个轮廓 hierarchy[i][2] 父轮廓 hierarchy[i][3] 内嵌轮廓
    findContours(contours_src, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
    //遍历所有顶层的轮廓,以随机的颜色绘制出每个连接组件颜色
    int index = 0;
    for (; index >= 0; index = hierarchy[index][0])
    {
        Scalar color(rand() & 255, rand() & 255, rand() & 255);
        drawContours(contours_dst, contours, index, color, 2, 8, hierarchy);//第五个值为是否选择填充轮廓内部
    }
    imshow("r", contours_dst);
}
//////////////////////////////////////////////////////////////////////
/*寻找物体的凸包*/
/*给定二维平面上的点集,凸包就是将最外层的店连接起来构成的凸多边形,他是能包含点集合中的所有点的
理解物体形状或轮廓的一种比较有用的方法就是计算一个物体的凸包,然后计算其凸缺陷*/
void M_covexHull()
{
    Mat image(600, 600, CV_8UC1);
    RNG& rng = theRNG(); //随机数生成器
    //循环,按下ESC,Q,q键退出,否则如果有按键按下,则一直更新
    while (1)
    {
        char covekey;
        int count = (unsigned)rng % 100 + 3;
        vector<Point> points;
        for (int i = 0; i < count; i++)
        {
            Point pointmp;
            pointmp.x = rng.uniform(image.cols / 4, image.cols * 3 / 4); //随机生成某个范围内的数
            pointmp.y = rng.uniform(image.rows / 4, image.rows * 3 / 4);
            points.push_back(pointmp);
        }
        image = Scalar::all(0);
        for (int i = 0; i < count; i++)
        {
            circle(image, points[i], 3, Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), FILLED, 8);
        }
        vector<int> Hull;
        convexHull(points, Hull);
        int count_Hull = Hull.size() - 1; //凸包的边数
        Point point0 = points[Hull[count_Hull-1]];
        for (int i = 0; i < count_Hull; i++)
        {
            Point point1 = points[Hull[i]];
            line(image, point0, point1, Scalar(255, 255, 255), 1, 8);
            point0 = point1;
        }
        imshow("r", image);
        covekey = char(waitKey());
        if (covekey == 27)
            break;
    }
}
///////////////////////////////////////////////////////////////////////////
/*使用多边形将轮廓包围*/
/*如何用多边形来表示出轮廓,或者如何用轮廓来提取出多边形*/
/*最后一个基本数据类是一种特殊的矩形称为RotatedRect。这个类通过中心点,宽度和高度和旋转角度来表示一个旋转的矩形*/
void M_Contain_Contours()
{
    Mat image(600, 600, CV_8UC1);
    RNG& rng = theRNG(); //随机数生成器
    int count = (unsigned)rng % 100 + 3;
    vector<Point> points;
    for (int i = 0; i < count; i++)
    {
        Point pointmp;
        pointmp.x = rng.uniform(image.cols / 4, image.cols * 3 / 4); //随机生成某个范围内的数
        pointmp.y = rng.uniform(image.rows / 4, image.rows * 3 / 4);
        points.push_back(pointmp);
    }
    image = Scalar::all(0);
    for (int i = 0; i < count; i++)
    {
        circle(image, points[i], 3, Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)), FILLED, 8);
    }
    boundingRect(points);//返回外部边界矩形
    minAreaRect(points);//返回最小包围矩形
    Point2f center;
    float radius = 0;
    minEnclosingCircle(points, center, radius);//返回最小包围圆形,注意第二个值和第三个值为圆的输出半径和圆心
    vector<Point> approxcure;
    fitEllipse(points);//用椭圆拟合二维点集
    approxPolyDP(points,approxcure,0.3,0);//用指定精度逼近多边形曲线
}
///////////////////////////////////////////////////////////////////////////////////////
/*图像的矩*/
/*moments() 计算多边形和光栅形状的最高达三阶的所有矩*/
/*contourArea() 计算整个轮廓或部分轮廓的面积*/
/*arcLength() 计算封闭轮廓的周长或曲线的长度*/
/*矩函数在图像分析中有着广泛的应用,如模式识别,目标分类,目标识别与方位估计,图像编码与重构等
一个从一副数字图像中计算出来的矩,通常描述了该图像形状的全局特征,并提供了大量的关于该图像不同类型的几何特性信息
比如大小,位置,方向等。图像的矩广泛应用于计算机视觉和机器人领域的目标识别和方向估计中。
包括零阶矩求面积、一阶矩确定重心、二阶矩确定主方向
  “一阶”矩和形状有关,  “二阶”矩显示曲线围绕直线平均值的扩展程度, “三阶”矩则是关于平均值的对称性的测量。
由二阶矩和三阶矩可以导出一共7个不变矩,而不变矩是图像的统计特性,满足平移,伸缩和旋转的不变性*/
// http://blog.csdn.net/keith_bb/article/details/70197104
Mat moment_src;
Mat moment_gray;
Mat canny_moment;
int g_nThresh = 100;
int g_nMaxThresh = 255;
RNG moment_rng(12345);
vector<vector<Point>> moment_contours;
vector<Vec4i> moment_Hierarchy;
void on_ThresgChang(int,void*)
{
    Canny(moment_gray, canny_moment, g_nThresh, g_nThresh * 2, 3);
    findContours(canny_moment, moment_contours, moment_Hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
    //计算轮廓矩
    vector<Moments> mu(moment_contours.size());
    for (int i = 0; i < moment_contours.size(); i++)
    {
        mu[i]=(moments(moment_contours[i]));
    }
    //计算轮廓的质心
    vector<Point2f> mc(moment_contours.size());
    for (int i = 0; i < moment_contours.size(); i++)
    {
        mc[i]=(Point2f(static_cast<float>(mu[i].m10 / mu[i].m00), static_cast<float>(mu[i].m01 / mu[i].m00)));
    }
    //绘制轮廓
    Mat drawing = Mat::zeros(canny_moment.size(), CV_8UC3);
    for (int i = 0; i < moment_contours.size(); i++)
    {
        Scalar color = Scalar(moment_rng.uniform(0, 255), moment_rng.uniform(0, 255), moment_rng.uniform(0, 255));
        drawContours(drawing, moment_contours, i, color, 2, 8, moment_Hierarchy, 0, Point());
        circle(drawing, mc[i], 4, color, -1, 8, 0);
    }
    imshow("res", drawing);
    printf("\t Info: Area and Contour Length \n");
    for (int i = 0; i < moment_contours.size(); i++)
    {
        printf("* Contour[%d] - Area(M_00)=%.2f-Area OpenCV:%.2f - Length:%.2f\n", i, mu[i].m00, contourArea(moment_contours[i]), arcLength(moment_contours[i], true));
        /*Scalar color = Scalar(moment_rng.uniform(0, 255), moment_rng.uniform(0, 255), moment_rng.uniform(0, 255));
        drawContours(drawing, moment_contours, i, color, 2, 8, moment_Hierarchy, 0, Point());
        circle(drawing, mc[i], 4, color, -1, 8, 0);*/
    }
}
void M_moment()
{
    moment_src = imread("1.jpg");
    cvtColor(moment_src, moment_gray, COLOR_BGR2GRAY);
    blur(moment_gray, moment_gray, Size(1, 1));
    namedWindow("res", WINDOW_AUTOSIZE);
    createTrackbar("therd", "res", &g_nThresh, g_nMaxThresh, on_ThresgChang);
    on_ThresgChang(0, 0);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/*分水岭算法*/
/*分水岭的概念和形成可以通过模拟浸入过程来说明。在每一个局部极小值表面,刺穿一个小孔,然后把整个模型慢慢浸入水中,
随着浸入的加深,每一个局部极小值的影响域慢慢向外扩展,在两个集水盆汇合处构筑大坝,即形成分水岭*/
/*分水岭算法对微弱边缘具有良好的响应,图像中的噪声、物体表面细微的灰度变化,都会产生过度分割的现象。
但同时应当看出,分水岭算法对微弱边缘具有良好的响应,是得到封闭连续边缘的保证的。
另外,分水岭算法所得到的封闭的集水盆,为分析图像的区域特征提供了可能。
为消除分水岭算法产生的过度分割,通常可以采用两种处理方法,一是利用先验知识去除无关边缘信息。
二是修改梯度函数使得集水盆只响应想要探测的目标。
为降低分水岭算法产生的过度分割,
通常要对梯度函数进行修改,一个简单的方法是对梯度图像进行阈值处理,以消除灰度的微小变化产生的过度分割*/
Mat g_maskwatershedImage, g_srcwatershedImage;
Point prevPt(-1, -1);
static void on_Mouse(int event, int x, int y, int flags, void*)
{
    //处理鼠标不在窗口中的情况
    if (x < 0 || x >= g_srcwatershedImage.cols || y < 0 || y >= g_srcwatershedImage.rows)
        return;

    //处理鼠标左键相关消息
    if (event == EVENT_LBUTTONUP || !(flags & EVENT_FLAG_LBUTTON))
        prevPt = Point(-1, -1);
    else if (event == EVENT_LBUTTONDOWN)
        prevPt = Point(x, y);

    //鼠标左键按下并移动,绘制出白色线条
    else if (event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON))
    {
        Point pt(x, y);
        if (prevPt.x < 0)
            prevPt = pt;
        line(g_maskwatershedImage, prevPt, pt, Scalar::all(255), 5, 8, 0);
        line(g_srcwatershedImage, prevPt, pt, Scalar::all(255), 5, 8, 0);
        prevPt = pt;
        imshow("res", g_srcwatershedImage);
    }
}
void M_watershed()
{
    //函数M_watershed实现的分水岭算法是基于标记的分割算法的一种
    g_srcwatershedImage = imread("1.jpg", 1);
    Mat srcImage;
    g_srcwatershedImage.copyTo(srcImage);
    imshow("res", g_srcwatershedImage);
    Mat graywatershedImage;
    cvtColor(g_srcwatershedImage, g_maskwatershedImage, COLOR_BGR2GRAY);
    cvtColor(g_maskwatershedImage, graywatershedImage, COLOR_GRAY2BGR);
    g_maskwatershedImage = Scalar::all(0);
    setMouseCallback("res", on_Mouse, 0);
    while (1)
    {
        int c = waitKey(0);
        if ((char)c == '1' || (char)c == ' ') {
            int compCount = 0;
            vector<vector<Point> > contours;
            vector<Vec4i> hierarchy;
            //寻找轮廓

            findContours(g_maskwatershedImage, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);//?为什么用掩码图寻找轮廓

            //拷贝掩膜
            Mat maskImage(g_maskwatershedImage.size(), CV_32S);
            maskImage = Scalar::all(0);

            //循环绘制出轮廓
            for (int index = 0; index >= 0; index = hierarchy[index][0], compCount++)
                drawContours(maskImage, contours, index, Scalar::all(compCount + 1), -1, 8, hierarchy, INT_MAX);
            //生成随机颜色
            vector<Vec3b> colorTab;
            for (int i = 0; i < compCount; i++)
            {
                int b = theRNG().uniform(0, 255);
                int g = theRNG().uniform(0, 255);
                int r = theRNG().uniform(0, 255);
                colorTab.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
            }
            watershed(srcImage, maskImage); //分水岭图像
            //双层循环,将分水岭图像遍历存入watershedImage中
            Mat watershedImage(maskImage.size(), CV_8UC3);
            for (int i = 0; i < maskImage.rows; i++)
                for (int j = 0; j < maskImage.cols; j++)
                {
                    int index = maskImage.at<int>(i, j);
                    if (index == -1)
                        watershedImage.at<Vec3b>(i, j) = Vec3b(255, 255, 255);
                    else if (index <= 0 || index > compCount)
                        watershedImage.at<Vec3b>(i, j) = Vec3b(0, 0, 0);
                    else
                        watershedImage.at<Vec3b>(i, j) = colorTab[index - 1];
                }
            watershedImage = watershedImage*0.5 + graywatershedImage*0.5;
            imshow("s", watershedImage);

        }
    }
}
//////////////////////////////////////////////////////////////////////////////////////
/*图像修补*/
/*修补图片中由于灰尘或者水滴等带来的噪声*/
void M_inpaint()
{
    g_srcwatershedImage = imread("1.jpg", 1);
    g_maskwatershedImage = Mat::zeros(g_srcwatershedImage.size(), CV_8U);
    imshow("res", g_srcwatershedImage);
    setMouseCallback("res", on_Mouse, 0);
    while (1) {
        char c = (char)waitKey();
        if (c == '1' || c == ' ')
        {
            Mat inpaintedImage;
            inpaint(g_srcwatershedImage, g_maskwatershedImage, inpaintedImage, 3, INPAINT_TELEA);
            imshow("r", inpaintedImage);
        }
    }
}
//////////////////////////////////////////////////////////////////////////////////////////////
/*直方图与匹配*/
/*直方图就是对数据进行统计的一种方法,并将统计值组织到一系列实现定义好的bin当中
bin:直条/组距 */
/*直方图是图像中像素强度分布的图形表达方式,他统计了每一个强度所具有的像素个数(计算机视觉领域常借助于直方图来实现图像的二值化)*/
/* --dims 需要统计的特征的数目*/
/* --bins 每个特征空间子区段的数目*/
/* --range 每个特征空间的取值范围*/
/*HSV颜色模型*/ /*这个模型中颜色的参数分别是:色调(H),饱和度(S),明度(V)*/
  /*色调H:::用角度度量,取值范围为0°~360°,从红色开始按逆时针方向计算,红色为0°,绿色为120°,蓝色为240°。
它们的补色是:黄色为60°,青色为180°,品红为300°*/
  /*饱和度S:::饱和度S表示颜色接近光谱色的程度。一种颜色,可以看成是某种光谱色与白色混合的结果。
其中光谱色所占的比例愈大,颜色接近光谱色的程度就愈高,颜色的饱和度也就愈高。饱和度高,颜色则深而艳。
光谱色的白光成分为0,饱和度达到最高。通常取值范围为0%~100%,值越大,颜色越饱和*/
  /*明度V:::明度表示颜色明亮的程度,对于光源色,明度值与发光体的光亮度有关;
对于物体色,此值和物体的透射比或反射比有关。通常取值范围为0%(黑)到100%(白)*/
void M_HS()
{
    Mat HS_src = imread("1.jpg");
    Mat HS_dst;
    cvtColor(HS_src, HS_dst, COLOR_BGR2HSV);
    int hueBinNum = 30;//色调的直方图直条数量
    int saturationBinNum = 32;//饱和度的直方图直条数量
    int histSize[] = { hueBinNum, saturationBinNum };
    // 定义色调的变化范围为0到179
    float hueRanges[] = { 0, 180 };
    //定义饱和度的变化范围为0(黑、白、灰)到255(纯光谱颜色)
    float saturationRanges[] = { 0, 256 };
    const float* ranges[] = { hueRanges, saturationRanges };
    MatND dstHist;//输出的目标直方图  存储直方图的一种数据结构(多维矩阵)
    int channels[] = { 0, 1 };
    //【3】正式调用calcHist,进行直方图计算
    calcHist(&HS_dst,//输入的数组
        1, //数组个数为1
        channels,//通道索引
        Mat(), //不使用掩膜
        dstHist, //输出的目标直方图
        2, //需要计算的直方图的维度为2
        histSize, //存放每个维度的直方图尺寸的数组
        ranges,//每一维数值的取值范围数组
        true, // 指示直方图是否均匀的标识符,true表示均匀的直方图
        false);//累计标识符,false表示直方图在配置阶段会被清零
    //【4】为绘制直方图准备参数
    double maxValue = 0;//最大值
    minMaxLoc(dstHist, 0, &maxValue, 0, 0);//查找数组和子数组的全局最小值和最大值存入maxValue中
    int scale = 10;
    Mat histImg = Mat::zeros(saturationBinNum*scale, hueBinNum * scale, CV_8UC3);

    //【5】双层循环,进行直方图绘制
    for (int hue = 0; hue < hueBinNum; hue++)
        for (int saturation = 0; saturation < saturationBinNum; saturation++)
        {
            float binValue = dstHist.at<float>(hue, saturation);//直方图组距的值
            int intensity = cvRound(binValue * 255 / maxValue);//强度

                                                               //正式进行绘制
            rectangle(histImg, Point(hue*scale, saturation*scale),
                Point((hue + 1)*scale - 1, (saturation + 1)*scale - 1),
                Scalar::all(intensity), FILLED);
        }

    //【6】显示效果图
    imshow("素材图", HS_src);
    imshow("H-S 直方图", histImg);
}
/*一维直方图的绘制*/
void M_one_dim()
{
    Mat srcone_dim = imread("1.jpg", 0);
    imshow("原图", srcone_dim);
    //【2】定义变量
    MatND dstHist;       // 在cv中用CvHistogram *hist = cvCreateHist
    int dims = 1;
    float hranges[] = { 0, 255 };
    const float *ranges[] = { hranges };   // 这里需要为const类型
    int size = 256;
    int channels = 0;

    //【3】计算图像的直方图
    calcHist(&srcone_dim, 1, &channels, Mat(), dstHist, dims, &size, ranges);    // cv 中是cvCalcHist
    int scale = 1;

    Mat dstImage(size * scale, size, CV_8U, Scalar(0));
    //【4】获取最大值和最小值
    double minValue = 0;
    double maxValue = 0;
    minMaxLoc(dstHist, &minValue, &maxValue, 0, 0);  //  在cv中用的是cvGetMinMaxHistValue

                                                     //【5】绘制出直方图
    int hpt = saturate_cast<int>(0.9 * size);
    for (int i = 0; i < 256; i++)
    {
        float binValue = dstHist.at<float>(i);           //   注意hist中是float类型    而在OpenCV1.0版中用cvQueryHistValue_1D
        int realValue = saturate_cast<int>(binValue * hpt / maxValue);
        rectangle(dstImage, Point(i*scale, size - 1), Point((i + 1)*scale - 1, size - realValue), Scalar(255));
    }
    imshow("一维直方图", dstImage);
}
/*RGB三色分量的直方图绘制*/
void M_RGB()
{
    Mat srcRGB;
    srcRGB = imread("1.jpg");
    //【2】参数准备
    int bins = 256;
    int hist_size[] = { bins };
    float range[] = { 0, 256 };
    const float* ranges[] = { range };
    MatND redHist, grayHist, blueHist;
    int channels_r[] = { 0 };

    //【3】进行直方图的计算(红色分量部分)
    calcHist(&srcRGB, 1, channels_r, Mat(), //不使用掩膜
        redHist, 1, hist_size, ranges,
        true, false);

    //【4】进行直方图的计算(绿色分量部分)
    int channels_g[] = { 1 };
    calcHist(&srcRGB, 1, channels_g, Mat(), // do not use mask
        grayHist, 1, hist_size, ranges,
        true, // the histogram is uniform
        false);

    //【5】进行直方图的计算(蓝色分量部分)
    int channels_b[] = { 2 };
    calcHist(&srcRGB, 1, channels_b, Mat(), // do not use mask
        blueHist, 1, hist_size, ranges,
        true, // the histogram is uniform
        false);
    double maxValue_red, maxValue_green, maxValue_blue;
    minMaxLoc(redHist, 0, &maxValue_red, 0, 0);
    minMaxLoc(grayHist, 0, &maxValue_green, 0, 0);
    minMaxLoc(blueHist, 0, &maxValue_blue, 0, 0);
    int scale = 1;
    int histHeight = 256;
    Mat histImage = Mat::zeros(histHeight, bins * 3, CV_8UC3);
    for (int i = 0; i<bins; i++)
    {
        //参数准备
        float binValue_red = redHist.at<float>(i);
        float binValue_green = grayHist.at<float>(i);
        float binValue_blue = blueHist.at<float>(i);
        int intensity_red = cvRound(binValue_red*histHeight / maxValue_red);  //要绘制的高度
        int intensity_green = cvRound(binValue_green*histHeight / maxValue_green);  //要绘制的高度
        int intensity_blue = cvRound(binValue_blue*histHeight / maxValue_blue);  //要绘制的高度

        //绘制红色分量的直方图
        rectangle(histImage, Point(i*scale, histHeight - 1),
            Point((i + 1)*scale - 1, histHeight - intensity_red),
            Scalar(255, 0, 0));

        //绘制绿色分量的直方图
        rectangle(histImage, Point((i + bins)*scale, histHeight - 1),
            Point((i + bins + 1)*scale - 1, histHeight - intensity_green),
            Scalar(0, 255, 0));

        //绘制蓝色分量的直方图
        rectangle(histImage, Point((i + bins * 2)*scale, histHeight - 1),
            Point((i + bins * 2 + 1)*scale - 1, histHeight - intensity_blue),
            Scalar(0, 0, 255));

    }

    //在窗口中显示出绘制好的直方图
    imshow("图像的RGB直方图", histImage);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*直方图对比*/
void M_calcHist()
{
    //【1】声明储存基准图像和另外两张对比图像的矩阵( RGB 和 HSV )
    Mat srcImage_base, hsvImage_base;
    Mat srcImage_test1, hsvImage_test1;
    Mat srcImage_test2, hsvImage_test2;
    Mat hsvImage_halfDown;

    //【2】载入基准图像(srcImage_base) 和两张测试图像srcImage_test1、srcImage_test2,并显示
    srcImage_base = imread("1.jpg", 1);
    srcImage_test1 = imread("2.jpg", 1);
    srcImage_test2 = imread("3.jpg", 1);
    //显示载入的3张图像
    imshow("基准图像", srcImage_base);
    imshow("测试图像1", srcImage_test1);
    imshow("测试图像2", srcImage_test2);

    // 【3】将图像由BGR色彩空间转换到 HSV色彩空间
    cvtColor(srcImage_base, hsvImage_base, COLOR_BGR2HSV);
    cvtColor(srcImage_test1, hsvImage_test1, COLOR_BGR2HSV);
    cvtColor(srcImage_test2, hsvImage_test2, COLOR_BGR2HSV);

    //【4】创建包含基准图像下半部的半身图像(HSV格式)
    hsvImage_halfDown = hsvImage_base(Range(hsvImage_base.rows / 2, hsvImage_base.rows - 1), Range(0, hsvImage_base.cols - 1));

    //【5】初始化计算直方图需要的实参
    // 对hue通道使用30个bin,对saturatoin通道使用32个bin
    int h_bins = 50; int s_bins = 60;
    int histSize[] = { h_bins, s_bins };
    // hue的取值范围从0到256, saturation取值范围从0到180
    float h_ranges[] = { 0, 256 };
    float s_ranges[] = { 0, 180 };
    const float* ranges[] = { h_ranges, s_ranges };
    // 使用第0和第1通道
    int channels[] = { 0, 1 };

    // 【6】创建储存直方图的 MatND 类的实例:
    MatND baseHist;
    MatND halfDownHist;
    MatND testHist1;
    MatND testHist2;

    // 【7】计算基准图像,两张测试图像,半身基准图像的HSV直方图:
    calcHist(&hsvImage_base, 1, channels, Mat(), baseHist, 2, histSize, ranges, true, false);
    normalize(baseHist, baseHist, 0, 1, NORM_MINMAX, -1, Mat());

    calcHist(&hsvImage_halfDown, 1, channels, Mat(), halfDownHist, 2, histSize, ranges, true, false);
    normalize(halfDownHist, halfDownHist, 0, 1, NORM_MINMAX, -1, Mat());

    calcHist(&hsvImage_test1, 1, channels, Mat(), testHist1, 2, histSize, ranges, true, false);
    normalize(testHist1, testHist1, 0, 1, NORM_MINMAX, -1, Mat());

    calcHist(&hsvImage_test2, 1, channels, Mat(), testHist2, 2, histSize, ranges, true, false);
    normalize(testHist2, testHist2, 0, 1, NORM_MINMAX, -1, Mat());


    //【8】按顺序使用4种对比标准将基准图像的直方图与其余各直方图进行对比:
    for (int i = 0; i < 4; i++)
    {
        //进行图像直方图的对比
        int compare_method = i;
        double base_base = compareHist(baseHist, baseHist, compare_method);
        double base_half = compareHist(baseHist, halfDownHist, compare_method);
        double base_test1 = compareHist(baseHist, testHist1, compare_method);
        double base_test2 = compareHist(baseHist, testHist2, compare_method);
        //输出结果
        printf(" 方法 [%d] 的匹配结果如下:\n\n 【基准图 - 基准图】:%f, 【基准图 - 半身图】:%f,【基准图 - 测试图1】: %f, 【基准图 - 测试图2】:%f \n-----------------------------------------------------------------\n", i, base_base, base_half, base_test1, base_test2);
    }
}
/*反向投影*/
/*所谓反向投影,就是首先计算某一特征的直方图模型,然后使用模型取寻找图像中存在的该特征的方法*/
/*反向投影用于在输入图像(通常较大)中查找和特定图像(通常较小或者仅有1个像素,以下将其称为模板图像)最匹配的点或者区域,
也就是定位模板图像出现在输入图像的位置*/
void M_calcBackProject()
{
    Mat cal_src = imread("1.jpg");
    Mat cal_hsv;
    Mat cal_g_hsv;
    cvtColor(cal_src, cal_hsv, COLOR_BGR2HSV);
    cal_g_hsv.create(cal_hsv.size(), cal_hsv.depth());
    int ch[] = { 0, 0 };
    mixChannels(&cal_hsv, 1, &cal_g_hsv, 1, ch, 1);
    MatND hist;
    int histSize = 2;
    float hue_range[] = { 0, 180 };
    const float* ranges = { hue_range };
    calcHist(&cal_g_hsv, 1, 0, Mat(), hist, 1, &histSize, &ranges, true, false);
    normalize(hist, hist, 0, 255, NORM_MINMAX, -1, Mat());
    MatND backproj;
    calcBackProject(&cal_g_hsv, 1, 0, hist, backproj, &ranges, 1, true);
    imshow("反向投影图", backproj);
}
/*模板匹配*/
Mat g_mat_srcImage;
Mat g_mat_templateImage;
Mat g_mat_resultImage;
void M_matchTemplate()
{
    g_mat_srcImage = imread("1.jpg", 1);
    g_mat_templateImage = imread("2.jpg", 1);
    namedWindow("r", WINDOW_AUTOSIZE);
    namedWindow("e", WINDOW_AUTOSIZE);
    int resultImage_rows = g_mat_srcImage.rows - g_mat_templateImage.rows + 1;
    int resultImage_cols = g_mat_srcImage.cols - g_mat_templateImage.cols + 1;
    g_mat_resultImage.create(resultImage_rows, resultImage_cols, CV_32FC1);

    matchTemplate(g_mat_srcImage, g_mat_templateImage, g_mat_resultImage, TM_SQDIFF);
    normalize(g_mat_resultImage, g_mat_resultImage, 0, 1, NORM_MINMAX, -1, Mat());

    double minValue; double maxValue; Point minLocation; Point maxLocation;
    Point matchLocation;
    minMaxLoc(g_mat_resultImage, &minValue, &maxValue, &minLocation, &maxLocation, Mat());
    //对于方法 SQDIFF 和 SQDIFF_NORMED, 越小的数值有着更高的匹配结果.而其余的方法, 数值越大匹配效果越好
    matchLocation = minLocation;
    //matchLocation = maxLocation;
    rectangle(g_mat_srcImage, matchLocation, Point(matchLocation.x + g_mat_templateImage.cols, matchLocation.y + g_mat_templateImage.rows), Scalar(0, 0, 255), 2, 8, 0);
    rectangle(g_mat_resultImage, matchLocation, Point(matchLocation.x + g_mat_templateImage.cols, matchLocation.y + g_mat_templateImage.rows), Scalar(0, 0, 255), 2, 8, 0);
    imshow("r", g_mat_srcImage);
    imshow("s", g_mat_resultImage);
}
////////////////////////////////////////////////////////////////////////////////////////
/*-----------------------------深入feature2d组件---------------------------------------*/
/*角点检测-Harries*/
void M_Harries()
{
    //以灰度模式载入图像并显示
    Mat srcImage = imread("1.jpg", 0);
    imshow("原始图", srcImage);
    //进行Harris角点检测找出角点
    Mat cornerStrength;
    cornerHarris(srcImage, cornerStrength, 2, 3, 0.04,BORDER_DEFAULT);
    imshow("r", cornerStrength);
    //对灰度图进行阈值操作,得到二值图并显示  
    Mat harrisCorner;
    Mat h;
    //threshold(cornerStrength, harrisCorner, 0.000001, 255, THRESH_BINARY);
    normalize(cornerStrength, harrisCorner, 0, 255, NORM_MINMAX, CV_32FC1,Mat());//归一化到0-255之间
    convertScaleAbs(harrisCorner,h);
    for (int i = 0; i < harrisCorner.rows; i++)
    {
        for (int j = 0; j < harrisCorner.cols; j++)
        {

            if ((int)harrisCorner.at<float>(i, j) >93)
            {

                circle(srcImage, Point(j,i), 5, Scalar(0, 0, 255), -1, 8, 0);//为什么是Point(j,i)不是(i,j);我的理解是Point里的x,y是相对于x轴和y轴,因此x相当于列,y相当于行
            }   
        }
    }
    imshow("角点检测后的效果图", srcImage);
    imshow("角点检测后的二值效果图", harrisCorner);
}
/*角点检测-Shi-Tomasi*/  //确定图像强角点
void M_goodFeaturesToTrack()
{
    Mat srcImage = imread("1.jpg", 1);
    Mat dstImage;
    RNG h_rng;
    cvtColor(srcImage, dstImage, COLOR_BGR2GRAY);
    vector<Point2f> corners;
    int g_maxCornerNumber = 33;
    double qualityLevel = 0.01;
    double minDistance = 10;
    int blockSize = 3;//计算导数自相关矩阵时指定的邻域范围
    double k = 0.04;//权重系数
    goodFeaturesToTrack(dstImage,//输入图像
        corners,//检测到的角点的输出向量
        g_maxCornerNumber,//角点的最大数量
        qualityLevel,//角点检测可接受的最小特征值
        minDistance,//角点之间的最小距离
        Mat(),//感兴趣区域
        blockSize,//计算导数自相关矩阵时指定的邻域范围
        false,//不使用Harris角点检测
        k);//权重系数
    for (int i = 0; i < corners.size(); i++)
    {
        //以随机的颜色绘制出角点
        circle(srcImage, corners[i], 3, Scalar(h_rng.uniform(0, 255), h_rng.uniform(0, 255),
            h_rng.uniform(0, 255)), -1, 8, 0);
    }
    imshow("角点检测后的效果图", srcImage);
}
/*角点检测-亚像素级角点检测*/ //我们进行图片处理的目的不适用于识别的特征点而是进行几何测量,需要更高的精度
/*亚像素角点即实数坐标值,其位置在摄像机标定,跟踪并重建摄像机的轨迹,或者重建被跟踪目标的三维结构时,是一个基本的测量值*/
void M_cornerSubPix()
{
    Mat srcImage = imread("1.jpg", 1);
    Mat dstImage;
    RNG h_rng;
    cvtColor(srcImage, dstImage, COLOR_BGR2GRAY);
    vector<Point2f> corners;
    int g_maxCornerNumber = 33;
    double qualityLevel = 0.01;
    double minDistance = 10;
    int blockSize = 3;//计算导数自相关矩阵时指定的邻域范围
    double k = 0.04;//权重系数
    goodFeaturesToTrack(dstImage,//输入图像
        corners,//检测到的角点的输出向量
        g_maxCornerNumber,//角点的最大数量
        qualityLevel,//角点检测可接受的最小特征值
        minDistance,//角点之间的最小距离
        Mat(),//感兴趣区域
        blockSize,//计算导数自相关矩阵时指定的邻域范围
        false,//不使用Harris角点检测
        k);//权重系数
    for (int i = 0; i < corners.size(); i++)
    {
        //以随机的颜色绘制出角点
        circle(srcImage, corners[i], 3, Scalar(h_rng.uniform(0, 255), h_rng.uniform(0, 255),
            h_rng.uniform(0, 255)), -1, 8, 0);
    }
    imshow("角点检测后的效果图", srcImage);
    Size winSize = Size(5, 5);
    Size zeroZone = Size(-1, -1);
    TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::MAX_ITER, 40, 0.001);//EPS 达到阈值
    cornerSubPix(dstImage, corners, winSize, zeroZone, criteria); //corners输入的角点初始坐标和精确的输出坐标
    for (int i = 0; i < corners.size(); i++)
    {
        cout << " \t>>精确角点坐标[" << i << "]  (" << corners[i].x << "," << corners[i].y << ")" << endl;
    }
}
///////////////////////////////////////////////////////////////////////////////////
/*----------------------------特征检测与匹配---------------------------*/
/*opencv为我们提供了以下10种特征检测方法*/
/*---FAST 求速度,一般应用在实时的视频处理中
-----STAR
-----SIFT
-----SURF SpeededUp Robust Features 是SIFT的加速版 其最大的特征是采用了harr特征以及积分图像的概念 Hessian矩阵是Surf算法的核心
          Haar特征是一种反映图像的灰度变化的,像素分模块求差值的一种特征。
-----ORB
-----MSER
-----GFTT
-----HARRIS
-----Dense
-----SimpleBlob*/
void M_features()
{
    Mat src1 = imread("1.jpg", 1);
    Mat src2 = imread("2.jpg", 1);
    Ptr<Feature2D> surf = xfeatures2d::SURF::create(100,4,3,false,false);
    vector<KeyPoint> keypoint1;
    vector<KeyPoint> keypoint2;
    Mat descriptor1;
    Mat descriptor2;
    surf->detectAndCompute(src1, Mat(), keypoint1, descriptor1);
    surf->detectAndCompute(src2, Mat(), keypoint2, descriptor2);
    vector<DMatch> match12;
    FlannBasedMatcher matcher;
    matcher.match(descriptor1, descriptor2, match12);
    Mat imagematch;
    drawMatches(src1, keypoint1, src2, keypoint2, match12, imagematch, Scalar(0, 255, 255), Scalar(0, 0, 255), Mat());
    imshow("res", imagematch);
}
/*寻找已知物体*/
void M_findHomography_perspectiveTransform()
{
    Mat src1 = imread("1.jpg", 1);
    Mat src2 = imread("2.jpg", 1);
    Ptr<Feature2D> surf = xfeatures2d::SURF::create(100, 4, 3, false, false);
    vector<KeyPoint> keypoint1;
    vector<KeyPoint> keypoint2;
    Mat descriptor1;
    Mat descriptor2;
    surf->detectAndCompute(src1, Mat(), keypoint1, descriptor1);
    surf->detectAndCompute(src2, Mat(), keypoint2, descriptor2);
    vector<DMatch> match12;
    FlannBasedMatcher matcher;
    matcher.match(descriptor1, descriptor2, match12);
    vector<Point2f> src1_matchkeypoint_loc;
    vector<Point2f> src2_matchkeypoint_loc;
    for (int i = 0; i < match12.size(); i++)
    {
        src1_matchkeypoint_loc.push_back(keypoint1[match12[i].queryIdx].pt);
        src2_matchkeypoint_loc.push_back(keypoint2[match12[i].trainIdx].pt);
    }
    Mat H = findHomography(src1_matchkeypoint_loc, src2_matchkeypoint_loc, CV_RANSAC);
    vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(src1.cols, 0);
    obj_corners[2] = cvPoint(src1.cols, src1.rows); obj_corners[3] = cvPoint(0, src1.rows);
    vector<Point2f> scene_corners(4);
    perspectiveTransform(obj_corners, scene_corners, H);
    Mat imagematch;
    drawMatches(src1, keypoint1, src2, keypoint2, match12, imagematch, Scalar(0, 255, 255), Scalar(0, 0, 255), Mat());
    line(imagematch, scene_corners[0] + Point2f(static_cast<float>(src1.cols), 0), scene_corners[1] + Point2f(static_cast<float>(src1.cols), 0), Scalar(255, 0, 123), 4);
    line(imagematch, scene_corners[1] + Point2f(static_cast<float>(src1.cols), 0), scene_corners[2] + Point2f(static_cast<float>(src1.cols), 0), Scalar(255, 0, 123), 4);
    line(imagematch, scene_corners[2] + Point2f(static_cast<float>(src1.cols), 0), scene_corners[3] + Point2f(static_cast<float>(src1.cols), 0), Scalar(255, 0, 123), 4);
    line(imagematch, scene_corners[3] + Point2f(static_cast<float>(src1.cols), 0), scene_corners[0] + Point2f(static_cast<float>(src1.cols), 0), Scalar(255, 0, 123), 4);
    imshow("res", imagematch);
}
/*ORB特征提取*/
/*有关Brief描述子 即在特征点附近随机选取若干个特征点对,将这些点对的灰度值的大小,组合成一个二进制串,将这个二进制串作为该特征点的特征描述子
其不具备旋转不变性,且对噪声比较敏感,也不具备尺度不变性,ORB算法解决了旋转和噪声问题,但不具备尺度不变性*/
void M_ORB()
{
    Mat src1 = imread("1.jpg", 0);
    Mat src2 = imread("2.jpg", 0);
    Ptr<ORB> orb = ORB::create();
    vector<KeyPoint> keypoint1;
    vector<KeyPoint> keypoint2;
    Mat descriptor1;
    Mat descriptor2;
    orb->detectAndCompute(src1, Mat(), keypoint1, descriptor1);
    orb->detectAndCompute(src2, Mat(), keypoint2, descriptor2);
    vector<DMatch> match12;
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(2);
    matcher->match(descriptor1, descriptor2, match12);
    Mat imagematch;
    drawMatches(src1, keypoint1, src2, keypoint2, match12, imagematch, Scalar(0, 255, 255), Scalar(0, 0, 255), Mat());
    imshow("res", imagematch);
}
/////////////////////////////////////-----------------示例程序------------------------///////////////////////////////
/*支持向量机*/
void supportVector()
{
    int width = 512, height = 512;
    Mat image = Mat::zeros(height, width, CV_8UC3);
    //建立训练数据
    int labels[10] = { 1, -1, 1, 1,-1,1,-1,1,-1,-1 };
    Mat labelsMat(10, 1, CV_32SC1, labels);
    float trainingData[10][2] = { { 501, 150 },{ 255, 10 },{ 501, 255 },{ 10, 501 },{ 25, 80 },
    { 150, 300 },{ 77, 200 } ,{ 300, 300 } ,{ 45, 250 } ,{ 200, 200 } };
    Mat trainingDataMat(10, 2, CV_32FC1, trainingData);

    //设置支持向量机的参数
    cv::Ptr<cv::ml::SVM> svm = cv::ml::SVM::create();
    svm->setType(cv::ml::SVM::Types::C_SVC); //表示以SVM做分类器
    svm->setKernel(cv::ml::SVM::KernelTypes::LINEAR); //表示忽视内核
    svm->setTermCriteria(cv::TermCriteria(cv::TermCriteria::MAX_ITER, 100, 1e-6)); //设置终止条件
    //训练支持向量机
    svm->train(trainingDataMat, cv::ml::SampleTypes::ROW_SAMPLE, labelsMat);
    //显示由SVM给出的决定性区域
    Vec3b green(0, 255, 0), blue(255, 0, 0);
    for (int i = 0; i < image.rows; ++i)
    {
        for (int j = 0; j < image.cols; j++)
        {
            Mat sampleMat = (Mat_<float>(1, 2) << j, i);
            float response = svm->predict(sampleMat);

            if (response == 1)
                image.at<Vec3b>(i, j) = green;
            else if (response == -1)
                image.at<Vec3b>(i, j) = blue;
        }
    }
    int thickness = -1;
    int lineType = 8;
    Scalar c1 = Scalar::all(0); //标记为1的显示成黑点
    Scalar c2 = Scalar::all(255); //标记成-1的显示成白点
                                  //绘图时,先宽后高,对应先列后行
    for (int i = 0; i < labelsMat.rows; i++)
    {
        const float* v = trainingDataMat.ptr<float>(i); //取出每行的头指针
        Point pt = Point((int)v[0], (int)v[1]);
        if (labels[i] == 1)
            circle(image, pt, 5, c1, thickness, lineType);
        else
            circle(image, pt, 5, c2, thickness, lineType);

    }

    thickness = 2;
    lineType = 8;
    //显示支持向量 (Show support vectors)
    Mat sv = svm->getUncompressedSupportVectors();
    cout << sv << endl;
    for (int i = 0; i < sv.rows; ++i)
    {
        const float* v = sv.ptr<float>(i);
        circle(image, Point((int)v[0], (int)v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
    }
    imshow("SVM Simple Example", image);
}
void SVM_fornonliner()
{
    const int WIDTH = 512, HEIGHT = 512;
    Mat I = Mat::zeros(HEIGHT, WIDTH, CV_8UC3);

    //---------------------【1】随机建立训练数据---------------------------------------
    Mat trainData(200, 2, CV_32FC1);
    Mat labels(200, 1, CV_32SC1);
    RNG rng(100);
    //建立训练数据的线性可分的情况
    int nLinearSamples = 90;
    //为class1生成随机点
    Mat trainClass = trainData.rowRange(0, nLinearSamples); //?指针类型
    //点x的坐标
    Mat c = trainClass.colRange(0, 1);
    rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(0.4*WIDTH));
    //点y的坐标
    c = trainClass.colRange(1, 2);
    rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
    //为class2生成随机点
    trainClass = trainData.rowRange(2 * 100- nLinearSamples, 2 * 100);
    //点x的坐标
    c = trainClass.colRange(0, 1);
    rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(WIDTH));
    //点y的坐标
    c = trainClass.colRange(1, 2);
    rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
    //------------------建立训练数据的非线性可分组成部分 ---------------
    // 随机生成Class1和Class2的点
    trainClass = trainData.rowRange(nLinearSamples, 2 * 100 - nLinearSamples);
    // 点的x坐标为[0.4, 0.6)
    c = trainClass.colRange(0, 1);
    rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
    // 点的y坐标为[0, 1)
    c = trainClass.colRange(1, 2);
    rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
    //------------------------- 为类设置标签 ---------------------------------
    labels.rowRange(0, 100).setTo(1);
    labels.rowRange(100, 200).setTo(2);
    //------------------------ 2. 设置支持向量机的参数 --------------------
    cv::Ptr<cv::ml::SVM> svm = cv::ml::SVM::create();
    svm->setType(cv::ml::SVM::Types::C_SVC); //表示以SVM做分类器
    svm->setC(1);
    svm->setKernel(cv::ml::SVM::KernelTypes::LINEAR); //表示忽视内核
    svm->setTermCriteria(cv::TermCriteria(cv::TermCriteria::MAX_ITER, (int)1e7, 1e-6)); //设置终止条件
                                                                                   //训练支持向量机
    svm->train(trainData, cv::ml::SampleTypes::ROW_SAMPLE, labels);
    Vec3b green(0, 100, 0), blue(100, 0, 0);
    for (int i = 0; i < I.rows; ++i)
        for (int j = 0; j < I.cols; ++j)
        {
            Mat sampleMat = (Mat_<float>(1, 2) << i, j);
            float response = svm->predict(sampleMat);

            if (response == 1)    I.at<Vec3b>(j, i) = green;
            else if (response == 2)    I.at<Vec3b>(j, i) = blue;
        }
    //----------------------- 5. 显示训练数据(training data)--------------------------------------------
    int thick = -1;
    int lineType = 8;
    float px, py;
    // Class 1
    for (int i = 0; i < 100; ++i)
    {
        px = trainData.at<float>(i, 0);
        py = trainData.at<float>(i, 1);
        circle(I, Point((int)px, (int)py), 3, Scalar(0, 255, 0), thick, lineType);
    }
    // Class 2
    for (int i = 100; i <2 * 100; ++i)
    {
        px = trainData.at<float>(i, 0);
        py = trainData.at<float>(i, 1);
        circle(I, Point((int)px, (int)py), 3, Scalar(255, 0, 0), thick, lineType);
    }
    thick = 2;
    lineType = 8;
    Mat sv = svm->getUncompressedSupportVectors();

    for (int i = 0; i < sv.rows; ++i)
    {
        const float* v = sv.ptr<float>(i);
        circle(I, Point((int)v[0], (int)v[1]), 6, Scalar(128, 128, 128), thick, lineType);
    }
    imshow("SVM for Non-Linear Training Data", I);
}
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
/*人脸检测*/
void M_face_cascade_detectMultiScale()
{
    if (!face_cascade.load(face_cascade_name)) { printf("--(!)Error loading\n"); return; };
    if (!eyes_cascade.load(eyes_cascade_name)) { printf("--(!)Error loading\n"); return; };
    vector<Rect> faces;
    Mat face = imread("1.jpg", 1);
    Mat face_gray;
    cvtColor(face, face_gray, COLOR_BGR2GRAY);
    equalizeHist(face_gray, face_gray);
    face_cascade.detectMultiScale(face_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
    for (size_t i = 0; i < faces.size(); i++)
    {
        Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
        ellipse(face, center, Size(faces[i].width / 2, faces[i].height / 2), 0, 0, 360, Scalar(255, 0, 255), 2, 8, 0);

        Mat faceROI = face_gray(faces[i]);
        std::vector<Rect> eyes;
        eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));

        for (size_t j = 0; j < eyes.size(); j++)
        {
            Point eye_center(faces[i].x + eyes[j].x + eyes[j].width / 2, faces[i].y + eyes[j].y + eyes[j].height / 2);
            int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
            circle(face, eye_center, radius, Scalar(255, 0, 0), 3, 8, 0);
        }
    }
    //-- 显示最终效果图
    imshow("res", face);
}
void test()
{
    int i = 1;
    i = i++;
    cout << i << endl;
}
int main()
{
    //Make_and_save_new_pic(); /*重新生成图片*/
    //desply("1.jpg"); /*读取图片*/
    //writevid();  /*写入视频*/
    //readvid(); /*读视频*/
    //merge(); /*图片融合*/
    //track(img1, img2); /*创建滑动条*/
    //Mousetrack(); /*鼠标操作*/
    //tstVec(); /*测试vector输出*/
    //DrawEllipse(img1, 20.0); /*绘制椭圆*/
    //DrawFilledCircle(); /*绘制实心圆*/
    //DrawPolygon(); /*自定义绘制函数,实现凹多边形绘制*/
    //DrawLine(); /*自定义绘制函数,实现线绘制*/
    //colorReduce(); /*颜色空间缩减--指针*/
    //colorReduce1(); /*颜色空间缩减--迭代器*/
    //colorReduce2(); /*颜色空间缩减--动态地址*/
    //spli_merge_Image(); /*分离颜色通道,多通道颜色混合*/
    //bright_contrast_adjust();/*图像对比度,亮度调节*/
    //convolveDFT(); /*离散傅里叶变换,计算两个二维实矩阵卷积*/ // ?没理解
    //DFT();/*离散傅里叶变换*/
    //wr_Xml();/*输入XML,YAML文件*/
    //re_Xml();/*读XML,YAML文件*/
    //M_blur();/*滤波*/
    //blur_track();/*滑动条滤波*/
    //m_flood_full(); /*漫水填充--魔术棒*/
    //Pyramid(); /*图像金字塔*/
    //M_threshod(); /*阈值操作*/ //与阈值thresh之间比较操作。根据源图像像素和阈值的大小关系,目标像素可能被置为0、原像素值、或者设定的最大值maxValue。
    //M_canny();/*Canny边缘检测*/
    //M_sobel();/*Sobel算子 Scharr*/
    //M_Laplacian();/*Laplacian算子*/
    //M_Hough();/*霍夫变换*/
    //M_HoughC();/*霍夫圆变换*/
    //M_remap();/*重映射*/
    //M_affine();/*仿射变换*/
    //M_equalizeHist();/*直方图均衡化*/
    //M_findContours();/*寻找轮廓*/
    //M_covexHull();/*凸包检测*/
    //M_Contain_Contours(); /*使用多边形将轮廓包围*/
    //M_moment();/*图像的矩*/
    //M_watershed();/*分水岭算法*/
    //M_inpaint();/*图像修补*/
    //M_HS();/*HS直方图绘制*/
    //M_one_dim();/*一维直方图的绘制*/
    //M_RGB();/*RGB三色分量的直方图绘制*/
    //M_calcHist();/*直方图对比*/
    //M_calcBackProject();/*反向投影*/
    //M_matchTemplate();/*模板匹配*/
    //M_Harries();/*角点检测-Harries*/
    //M_goodFeaturesToTrack();/*角点检测-Shi-Tomasi*/ //可以设置感兴趣的图像角点检测区域
    //M_cornerSubPix();/*角点检测-亚像素级角点检测*/
    //M_features();/*surf特征点,FlannBasedMatcher匹配*/
    //M_findHomography_perspectiveTransform();//寻找已知物体,计算透视变换矩阵,进行透视变换。
    //M_ORB();/*ORB特征提取*/
    //supportVector();//SVM   //注意支持向量应该用getUncompressedSupportVectors()这个函数
    //SVM_fornonliner();//SVM线性不可分的情况
    //M_face_cascade_detectMultiScale(); //人脸检测
    test();
    waitKey(0);
}这里写代码片
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值