OpenCV图像基础操作一

Mat类分为了两个部分:

1.矩阵头和指向矩阵数据部分的指针。

2.data就是指向矩阵数据的指针。

下面是VS调试显示属性:

 

flags:

flags是int类型,共占32位,

从低位到高位:

0-2位代表depth即数据类型(如CV_8U),OpenCV的数据类型共7类,故只需3位即可全部表示。

3-11位代表通道数channels,因为OpenCV默认最大通道数为512,故只需要9位即可全部表示,可参照下面求通道数的部分。

0-11位共同代表type即通道数和数据类型(如CV_8UC3),CV_(位数)+(数据类型)+(通道数),其中数据类型有U:无符号整数,S有符号整数,F浮点型。

12-13位

14位代表Mat的内存是否连续,一般由creat创建的mat均是连续的,如果是连续,将加快对数据的访问。

15位代表该Mat是否为某一个Mat的submatrix,一般通过ROI以及row()、col()、rowRange()、colRange()等得到的mat均为submatrix。

16-31代表magic signature,暂理解为用来区分Mat的类型,如果Mat和SparseMat

 

下面是一些图像的基本操作代码和注释,具体注释会陆续更新。

下面是基于QT的C++代码,用的OpenCV库:

opencv.pro文件如下:

#2021.1.15 xuxiaopeng

QT       += core gui

greaterThan(QT_MAJOR_VERSION, 4): QT += widgets

CONFIG += c++11


DEFINES += QT_DEPRECATED_WARNINGS

#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000    # disables all the APIs deprecated before Qt 6.0.0

SOURCES += \
    main.cpp \
    mainwindow.cpp

HEADERS += \
    mainwindow.h

FORMS += \
    mainwindow.ui

INCLUDEPATH += C:/opencv/build/include/ #*(这个是opencv解压以后存放的路径+所选的文件)*

CONFIG(debug, debug|release): {
LIBS += -LC:/opencv/build/x64/vc15/lib/ -lopencv_world451d
} else:CONFIG(release, debug|release): {
LIBS += -LC:/opencv/build/x64/vc15/lib/ -lopencv_world451
}


# Default rules for deployment.
qnx: target.path = /tmp/$${TARGET}/bin
else: unix:!android: target.path = /opt/$${TARGET}/bin
!isEmpty(target.path): INSTALLS += target

头文件mainwindow.h:

#ifndef MAINWINDOW_H
#define MAINWINDOW_H

#include <QMainWindow>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <math.h>

using namespace cv;
using namespace std;

QT_BEGIN_NAMESPACE
namespace Ui { class MainWindow; }
QT_END_NAMESPACE

class MainWindow : public QMainWindow
{
    Q_OBJECT

public:
    MainWindow(QWidget *parent = nullptr);
    ~MainWindow();


private slots:
    Mat readimage();                  //读取图片
    Mat readimage2();
    void on_imagemask_clicked();      //掩膜
    void on_gray_clicked();           //灰度化
    void on_equalization_clicked();   //直方图均衡化
    void on_createTrackbar_clicked(); //滑动条改变亮度
    void on_mouse_clicked();          //鼠标响应
    void on_houghlines_clicked();     //直线检测
    void on_equalizehist_clicked();   //直方图均衡化
    void on_histmatch_clicked();      //直方图规定化
    void on_inpaint_clicked();        //图像恢复
    void on_pushButton_clicked();     //去除背景
    void on_mycvcolor_clicked();      //颜色模型转换
    void on_splitandmerge_clicked();  //图像分离与合并
    void on_minmaxloc_clicked();      //像素最大最小
    void on_meanandstddev_clicked();  //平均值与标准差
    void on_twomaxandMin_clicked();   //图像比较
    void on_threshold_clicked();      //图像二值化
    void on_LuT_clicked();            //图像查找映射表

private:
    Ui::MainWindow *ui;
    Mat image;
};
#endif // MAINWINDOW_H

main.cpp

#include "mainwindow.h"
#include <QApplication>

int main(int argc, char *argv[])
{
    QApplication a(argc, argv);
    MainWindow w;
    w.show();
    return a.exec();
}

mainwindow.ui

 

 

 

 

mainwindow.cpp

#include "mainwindow.h"
#include "ui_mainwindow.h"

MainWindow::MainWindow(QWidget *parent)
    : QMainWindow(parent)
    , ui(new Ui::MainWindow)
{
    ui->setupUi(this);

}

MainWindow::~MainWindow()
{
    delete ui;
}

/********************************读取图像************************************/
Mat MainWindow::readimage()
{
    image = imread("E:/opencv4-c++/opencv-mask/picture/lena.png");
    if (image.empty())
    {
        cout << "请确认图像文件名称是否正确" << endl;
    }
    return image;
}

Mat MainWindow::readimage2()
{
    image = imread("E:/opencv4-c++/opencv-mask/picture/bai.jpg");
    if (image.empty())
    {
        cout << "请确认图像文件名称是否正确" << endl;
    }
    return image;
}

/*********************************图像掩膜***********************************/

///
///
///图像掩膜就是中心像素乘5在减去左右上下的像素,达到增强图像的效果。
///
///

void MainWindow::on_imagemask_clicked()
{
    Mat src,dst,temp;
    src=readimage();
    cout<<src.rows<<endl;                      //获取行数
    cout<<src.cols<<endl;                      //获取列数
//    cout<<src<<endl;                         //s输出图像矩阵信息
    Mat b(src,Range(10,600),Range(10,600));    //截取src中的图像区域范围
    cv::namedWindow("截取图像",WINDOW_AUTOSIZE);
    cv::imshow("截取图像",b);

    /
    ///
    /// QPixmap依赖于硬件,QImage不依赖于硬件。QPixmap主要是用于绘图,针对屏幕显示而最佳化设计,
    /// QImage主要是为图像I/O、图片访问和像素修改而设计的。直接用QPixmap加载进来会放大很多倍
    /// 一般图片大的情况下,用QImage进行加载,然后转乘QPixmap用户绘制。QPixmap绘制效果是最好的。
    ///
    /// /

    cvtColor(src, temp, COLOR_BGR2RGB);//BGR convert to RGB
    QImage Qtemp1 = QImage((const unsigned char*)(temp.data), temp.cols, temp.rows, temp.step, QImage::Format_RGB888);
    Qtemp1 = Qtemp1.scaled(ui->img1->size(), Qt::KeepAspectRatio, Qt::SmoothTransformation);//图像缩放
    ui->img1->setPixmap(QPixmap::fromImage(Qtemp1));
    ui->img1->resize(Qtemp1.size());
    ui->img1->show();

    /
    /// data:  uchar类型的指针,指向Mat数据矩阵的首地址。
    /// dims:  Mat矩阵的维度,若Mat是一个二维矩阵,则dims=2,三维则dims=3,
    /// size():首先size是一个结构体,定义了Mat矩阵内数据的分布形式,数值上有关系式:
    /// image.size().width==image.cols;        image.size().height==image.rows
    ///
    /// depth:  用来度量每一个像素中每一个通道的精度,但它本身与图像的通道数无关!depth数值越大,精度越高。在
    ///          Opencv中,Mat.depth()得到的是一个0~6的数字,分别代表不同的位数,对应关系如下:
    ///          enum{CV_8U=0,CV_8S=1,CV_16U=2,CV_16S=3,CV_32S=4,CV_32F=5,CV_64F=6}
    ///
    /// 可以理解为房间内每张床可以睡多少人,这个跟房间内有多少床并无关系;
    ///
    /// elemSize:elem是element(元素)的缩写,表示矩阵中每一个元素的数据大小,如果Mat中的数据类型是CV_8UC1,那么
    ///            elemSize==1;如果是CV_8UC3或CV_8SC3,那么elemSize==3;如果是CV_16UC3或者CV_16SC3,那么
    ///           elemSize==6;即elemSize是以8位(一个字节)为一个单位,乘以通道数和8位的整数倍;
    /// elemSize1:elemSize加上一个“1”构成了elemSize1这个属性,1可以认为是元素内1个通道的意思,这样从命名上拆分后就很
    ///       容易解释这个属性了:表示Mat矩阵中每一个元素单个通道的数据大小,以字节为一个单位,所以有:
    /// eleSize1==elemSize/channels;
    /// step:可以理解为Mat矩阵中每一行的“步长”,以字节为基本单位,每一行中所有元素的字节总量,是累计了一行中所
    ///  有元素、所有通道、所有通道的elemSize1之后的值;
    /// step1(): 以字节为基本单位,Mat矩阵中每一个像素的大小,累计了所有通道、所有通道的elemSize1之后的值,所以有:
    /// step1==step/elemSize1;
    /// type: Mat矩阵的类型,包含有矩阵中元素的类型以及通道数信息,type的命名格式为CV_(位数)+(数据类型)+(通道数)。
    /// empty() 判断文件读取是否正确
    /// rows 获取图像行数(高度)
    /// cols 获取图像列数(长度)
    /// channels() 获取图像通道数
    /// depth() 获取图像位深度
    ///
    ///

    double timeStart = (double)getTickCount();               //计算时间语句
    int    offsetx   = src.channels();                       //图像的通道数
    int    cols      = (src.cols - 1)*offsetx;               //获取图像的列数,一定不要忘记图像的通道数
    int    rows      = src.rows;                             //行
           dst       = Mat::zeros(src.size(), src.type());   //返回指定的大小和类型的数组 创建一个跟src一样大小类型的图像矩阵像素为0(黑)

    for (int row = 1; row < (rows - 1); row++)
    {
        //Mat.ptr<uchar>(int i=0) 获取像素矩阵的指针,索引i表示第几行,从0开始计行数。
        //获取当前像素点P(row, col)的像素值 p(row, col) =current[col]
        //Mat.ptr<uchar>(row):获取第row行的图像像素指针。图像的行数从0开始计数
        //获取点P(row, col)的像素值:P(row.col) = Mat.ptr<uchar>(row)[col]

        const uchar *previous = src.ptr<uchar>(row - 1);//获取上一行指针
        const uchar *current  = src.ptr<uchar>(row);    //获取当前行的指针
        const uchar *next     = src.ptr<uchar>(row + 1);//获取下一行的指针
              uchar *output   = dst.ptr<uchar>(row);    //目标对象像素

        for (int col = offsetx; col < cols; col++)
        {
            //current[col-offsetx]是当前的像素点的左边那个像素点的位置,因为一个像素点有三个通道
            //current[col+offsetx]是当前的像素点的右边那个像素点的位置,因为一个像素点有三个通道
            //previous[col]表示当前像素点对应的上一行的那个像素点
            //next[col]表示当前像素点对应的下一行的那个像素点
            //像素范围处理saturate_cast<uchar>
            output[col] = saturate_cast<uchar>(5 * current[col] - (current[col - offsetx] + current[col + offsetx] + previous[col] + next[col]));
        }
    }
    //函数直接处理///

    //OpenCV提高了函数filter2D来实现掩膜操作:
    //Mat kernel = (Mat_<char>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);//定义掩膜
    //调用filter2D
    //filter2D(src, dst, src.depth(), kernel);

    ///

    double timeconsume = ((double)getTickCount() - timeStart) / getTickFrequency();
    cout << "运行上面程序共耗时: " << timeconsume << endl;
    cvtColor(dst, temp, COLOR_BGR2RGB);//BGR convert to RGB
    QImage Qtemp = QImage((const unsigned char*)(temp.data), temp.cols, temp.rows, temp.step, QImage::Format_RGB888);
    Qtemp = Qtemp.scaled(ui->img2->size(), Qt::KeepAspectRatio, Qt::SmoothTransformation);
    ui->img2->setPixmap(QPixmap::fromImage(Qtemp));
    ui->img2->resize(Qtemp.size());
    ui->img2->show();
}

/**********************图像灰度化*************************/

void MainWindow::on_gray_clicked()
{
    Mat src,temp,gray;
    src = readimage();

    cvtColor(src,  temp, COLOR_BGR2GRAY);
    cvtColor(temp, temp, COLOR_GRAY2RGB); //BGR convert to RGB
    QImage Qtemp = QImage((const unsigned char*)(temp.data), temp.cols, temp.rows, temp.step, QImage::Format_RGB888);
    Qtemp = Qtemp.scaled(ui->img3->size(), Qt::KeepAspectRatio, Qt::SmoothTransformation);

    ui->img3->setPixmap(QPixmap::fromImage(Qtemp));
    ui->img3->resize(Qtemp.size());
    ui->img3->show();
}

/*******************图像均值化**************************/

void MainWindow::on_equalization_clicked()
{
    const int grayMax=255;
    vector<vector<int>>graylevel(grayMax+1);//二维数组
    Mat image=imread("../picture/112.jpg",0);
    //cout<<image<<endl;
    Mat img,src;
    image.copyTo(img);

    //Mat.ptr<uchar>(int i=0) 获取像素矩阵的指针,索引i表示第几行,从0开始计行数。
    //获取当前像素点P(row, col)的像素值 p(row, col) =current[col]
    //Mat.ptr<uchar>(row):获取第row行的图像像素指针。图像的行数从0开始计数
    //获取点P(row, col)的像素值:P(row.col) = Mat.ptr<uchar>(row)[col]

    for (int i = 0; i < image.rows-1; i++)
    {
        uchar* ptr=image.ptr<uchar>(i);//()表示行
        for (int j = 0; j < image.cols-1; j++)
        {
            int  x=ptr[j];//[]表示列
            cout<<x<<endl;
            graylevel[x].push_back(0);//这个地方写的不好,引入二维数组只是为了记录每一个灰度值的像素个数
            //push_back()在vector末尾加入0
        }
    }
    for (int i = 0; i < img.rows-1; i++)
    {
        uchar* imgptr=img.ptr<uchar>(i);
        uchar* imageptr=image.ptr<uchar>(i);
        for (int j = 0; j < img.cols-1; j++)
        {
            int sumpiexl=0;
            for (int k = 0; k < imageptr[j]; k++)
            {
                sumpiexl=graylevel[k].size()+sumpiexl;
            }
            imgptr[j]=(grayMax*sumpiexl/(image.rows*image.cols));
        }
    }

    equalizeHist(image,src);

    QImage Qtemp = QImage((const unsigned char*)(image.data), image.cols, image.rows, image.step, QImage::Format_Indexed8);
    Qtemp = Qtemp.scaled(ui->img1->size(), Qt::KeepAspectRatio, Qt::SmoothTransformation);
    ui->img1->setPixmap(QPixmap::fromImage(Qtemp));
    ui->img1->resize(Qtemp.size());
    ui->img1->show();

    QImage Qtemp2 = QImage((const unsigned char*)(src.data), src.cols, src.rows, src.step, QImage::Format_Indexed8);
    Qtemp2 = Qtemp2.scaled(ui->img2->size(), Qt::KeepAspectRatio, Qt::SmoothTransformation);
    ui->img2->setPixmap(QPixmap::fromImage(Qtemp2));
    ui->img2->resize(Qtemp2.size());
    ui->img2->show();//opencv自带

    QImage Qtemp3 = QImage((const unsigned char*)(img.data), img.cols, img.rows, img.step, QImage::Format_Indexed8);
    Qtemp3 = Qtemp3.scaled(ui->img3->size(), Qt::KeepAspectRatio, Qt::SmoothTransformation);
    ui->img3->setPixmap(QPixmap::fromImage(Qtemp3));
    ui->img3->resize(Qtemp3.size());
    ui->img3->show();//自己实现

//    imshow("原图",image);
//    imshow("opencv自带",src);
//    imshow("自己实现",img);
    waitKey(0);
}

/*************用滑动条来改变图像的亮度*************/

int value;
Mat img1, img2;

void callBack(int, void*)
{
    float a = value / 100.0;
    img2 = img1 * a;
    imshow("滑动条改变图像亮度", img2);
}

void MainWindow::on_createTrackbar_clicked()
{
    void callBack(int, void*);  //滑动条回调函数
    img1 = readimage();
    namedWindow("滑动条改变图像亮度");
    imshow("滑动条改变图像亮度", img1);
    createTrackbar("亮度值百分比", "滑动条改变图像亮度", &value, 600, callBack, 0);//创建滑动条
}

/******************鼠标响应事件***************************/

Mat img3, imgPoint;
Point prePoint;         //前一时刻鼠标的坐标,用于绘制直线.ctr+i自动排版
void mouse(int event, int x, int y, int flags, void*);

void MainWindow::on_mouse_clicked()
{
    img3 = readimage();
    if (!img3.data)
    {
        cout << "请确认输入图像名称是否正确! " << endl;
    }

    img3.copyTo(imgPoint);
    imshow("图像窗口 1", img3);
    imshow("图像窗口 2", imgPoint);
    setMouseCallback("图像窗口 1", mouse, 0); //鼠标影响,mouse为鼠标响应的回调函数
    waitKey(0);

}

void mouse(int event, int x, int y, int flags, void*)
{
    if (event == EVENT_RBUTTONDOWN) //单击右键
    {
        cout << "点击鼠标左键才可以绘制轨迹" << endl;
    }
    if (event == EVENT_LBUTTONDOWN) //单击左键,输出坐标
    {
        prePoint = Point(x, y);
        cout << "轨迹起始坐标" << prePoint << endl;
    }

    if (event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON)) //鼠标按住左键移动第 3 章 图像基本操作
    {
        //通过改变图像像素显示鼠标移动轨迹
        imgPoint.at<Vec3b>(y, x) = Vec3b(0, 0, 255);
        imgPoint.at<Vec3b>(y, x - 1) = Vec3b(0, 0, 255);
        imgPoint.at<Vec3b>(y, x + 1) = Vec3b(0, 0, 255);
        imgPoint.at<Vec3b>(y + 1, x) = Vec3b(0, 0, 255);
        imgPoint.at<Vec3b>(y + 1, x) = Vec3b(0, 0, 255);
        imshow("图像窗口 2", imgPoint);

        //通过绘制直线显示鼠标移动轨迹
        //img3: 要绘制线段的图像。
        //pt1: 线段的起点。
        //pt2: 线段的终点。
        //color: 线段的颜色,通过一个Scalar对象定义。
        //thickness: 线条的宽度。
        //lineType: 线段的类型。可以取值8, 4, 和CV_AA, 分别代表8邻接连接线,4邻接连接线和反锯齿连接线。默认值为8邻接。为了获得更好地效果可以选用CV_AA(采用了高斯滤波)。
        //shift: 坐标点小数点位数。

        Point pt(x, y);
        line(img3, prePoint, pt, Scalar(100, 12, 255), 2, 16, 0);
        prePoint = pt;
        imshow("图像窗口 1", img3);
    }
}


/**********************检测直线并绘制直线*************************************/


void drawLine(Mat &img,    //要标记直线的图像
    vector<Vec2f> lines,   //检测的直线数据
    double rows,           //原图像的行数(高)
    double cols,           //原图像的列数(宽)
    Scalar scalar,         //绘制直线的颜色
    int n                  //绘制直线的线宽
)
{
    Point pt1, pt2;
    for (size_t i = 0; i < lines.size(); i++)
    {
        float rho = lines[i][0];    //直线距离坐标原点的距离
        float theta = lines[i][1];  //直线过坐标原点垂线与x轴夹角
        double a = cos(theta);      //夹角的余弦值
        double b = sin(theta);      //夹角的正弦值
        double x0 = a*rho, y0 = b*rho;    //直线与过坐标原点的垂线的交点
        double length = max(rows, cols);  //图像高宽的最大值
                                          //计算直线上的一点
        pt1.x = cvRound(x0 + length  * (-b));
        pt1.y = cvRound(y0 + length  * (a));
        //计算直线上另一点
        pt2.x = cvRound(x0 - length  * (-b));
        pt2.y = cvRound(y0 - length  * (a));
        //两点绘制一条直线
        line(img, pt1, pt2, scalar, n);
    }
}
void MainWindow::on_houghlines_clicked()
{

    Mat img = imread("../picture/111.png", IMREAD_GRAYSCALE);
    if (img.empty())
    {
        cout << "请确认图像文件名称是否正确" << endl;
    }
    Mat edge;

    //检测边缘图像,并二值化
    Canny(img, edge, 80, 180, 3, false);
    threshold(edge, edge, 170, 255, THRESH_BINARY);

    //用不同的累加器进行检测直线
    vector<Vec2f> lines1, lines2;
    HoughLines(edge, lines1, 1, CV_PI / 180, 90, 0, 0);
    HoughLines(edge, lines2, 1, CV_PI / 180, 120, 0, 0);

    //在原图像中绘制直线
    Mat img1, img2;
    img.copyTo(img1);
    img.copyTo(img2);
    drawLine(img1, lines1, edge.rows, edge.cols, Scalar(255), 2);
    drawLine(img2, lines2, edge.rows, edge.cols, Scalar(255), 2);

    //显示图像
    imshow("edge", edge);
    imshow("img", img);
    imshow("img1", img1);
    imshow("img2", img2);
    waitKey(0);
}
/***************************************************************************/


void drawHist(Mat &hist, int type, string name)  //归一化并绘制直方图函数
{
    int hist_w = 512;
    int hist_h = 400;
    int width = 2;
    Mat histImage = Mat::zeros(hist_h, hist_w, CV_8UC3);
    normalize(hist, hist, 1, 0, type, -1, Mat());
    for (int i = 1; i <= hist.rows; i++)
    {
        rectangle(histImage, Point(width*(i - 1), hist_h - 1),
            Point(width*i - 1, hist_h - cvRound(hist_h*hist.at<float>(i - 1)) - 1),
            Scalar(255, 255, 255), -1);
    }
    imshow(name, histImage);
}
void MainWindow::on_equalizehist_clicked()
{


        Mat img = readimage();
        if (img.empty())
        {
            cout << "请确认图像文件名称是否正确" << endl;
        }
        Mat gray, hist, hist2;
        cvtColor(img, gray, COLOR_BGR2GRAY);
        Mat equalImg;
        equalizeHist(gray, equalImg);  //将图像直方图均衡化
        const int channels[1] = { 0 };
        float inRanges[2] = { 0,255 };
        const float* ranges[1] = { inRanges };
        const int bins[1] = { 256 };
        calcHist(&gray, 1, channels, Mat(), hist, 1, bins, ranges);
        calcHist(&equalImg, 1, channels, Mat(), hist2, 1, bins, ranges);
        drawHist(hist, NORM_INF, "hist");
        drawHist(hist2, NORM_INF, "hist2");
        imshow("原图", gray);
        imshow("均衡化后的图像", equalImg);
        waitKey(0);


}

/**************************************************************/

void MainWindow::on_histmatch_clicked()
{

    Mat img1 = imread("../picture/112.jpg");
    Mat img2 = imread("../picture/110.jpg");
    if (img1.empty() || img2.empty())
    {
        cout << "请确认图像文件名称是否正确" << endl;
    }
    Mat hist1, hist2;
    //计算两张图像直方图
    const int channels[1] = { 0 };
    float inRanges[2] = { 0,255 };
    const float* ranges[1] = { inRanges };
    const int bins[1] = { 256 };
    calcHist(&img1, 1, channels, Mat(), hist1, 1, bins, ranges);
    calcHist(&img2, 1, channels, Mat(), hist2, 1, bins, ranges);
    //归一化两张图像的直方图
    drawHist(hist1, NORM_L2, "hist1");
    drawHist(hist2, NORM_L2, "hist2");
    //计算两张图像直方图的累积概率
    float hist1_cdf[256] = { hist1.at<float>(0) };
    float hist2_cdf[256] = { hist2.at<float>(0) };
    for (int i = 1; i < 256; i++)
    {
        hist1_cdf[i] = hist1_cdf[i - 1] + hist1.at<float>(i);
        hist2_cdf[i] = hist2_cdf[i - 1] + hist2.at<float>(i);

    }
    //构建累积概率误差矩阵
    float diff_cdf[256][256];
    for (int i = 0; i < 256; i++)
    {
        for (int j = 0; j < 256; j++)
        {
            diff_cdf[i][j] = fabs(hist1_cdf[i] - hist2_cdf[j]);
        }
    }

    //生成LUT映射表
    Mat lut(1, 256, CV_8U);
    for (int i = 0; i < 256; i++)
    {
        // 查找源灰度级为i的映射灰度
        // 和i的累积概率差值最小的规定化灰度
        float min = diff_cdf[i][0];
        int index = 0;
        //寻找累积概率误差矩阵中每一行中的最小值
        for (int j = 1; j < 256; j++)
        {
            if (min > diff_cdf[i][j])
            {
                min = diff_cdf[i][j];
                index = j;
            }
        }
        lut.at<uchar>(i) = (uchar)index;
    }
    Mat result, hist3;
    LUT(img1, lut, result);
    imshow("待匹配图像", img1);
    imshow("匹配的模板图像", img2);
    imshow("直方图匹配结果", result);
    calcHist(&result, 1, channels, Mat(), hist3, 1, bins, ranges);
    drawHist(hist3, NORM_L2, "hist3");  //绘制匹配后的图像直方图
    waitKey(0);
}

void MainWindow::on_inpaint_clicked()
{
    Mat img1 = imread("../picture/2213.png");
    Mat img2 = imread("../picture/2214.png");
    if (img1.empty() || img2.empty())
    {
        cout << "请确认图像文件名称是否正确" << endl;
    }
    imshow("img1", img1);
    imshow("img2", img2);

    //转换为灰度图
    Mat img1Gray, img2Gray;
    cvtColor(img1, img1Gray, COLOR_RGB2GRAY, 0);
    cvtColor(img2, img2Gray, COLOR_RGB2GRAY, 0);

    //通过阈值处理生成Mask掩模
    Mat img1Mask, img2Mask;
    threshold(img1Gray, img1Mask, 245, 255, THRESH_BINARY);
    threshold(img2Gray, img2Mask, 245, 255, THRESH_BINARY);

    //对Mask膨胀处理,增加Mask面积
    Mat Kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
    dilate(img1Mask, img1Mask, Kernel);
    dilate(img2Mask, img2Mask, Kernel);

    //图像修复
    Mat img1Inpaint, img2Inpaint;
    inpaint(img1, img1Mask, img1Inpaint, 5, INPAINT_NS);
    inpaint(img2, img2Mask, img2Inpaint, 5, INPAINT_NS);

    //显示处理结果
    imshow("img1Mask", img1Mask);
    imshow("img1修复后", img1Inpaint);
    imshow("img2Mask", img2Mask);
    imshow("img2修复后", img2Inpaint);
    waitKey();

}

///去除背景/




int m_h_up = 107;
int m_h_down = 90;
int m_s_up = 153;
int m_s_down = 115;
int m_v_up = 255;
int m_v_down = 18;


float EpsilonThd = 0.0008;

void SaveToDxf(vector<Point> approx);
void on_mouse(int EVENT, int x, int y, int flags, void* userdata);

void MainWindow::on_pushButton_clicked()
{
    Mat image_source = imread("../picture/2213.png", IMREAD_COLOR);
    if (!image_source.data)
    {
        cout << "图像加载失败" << endl;
    }

    GaussianBlur(image_source, image_source, Size(3, 3), 0, 0);//高斯滤波

    Mat image_hsv, image_result;
    vector<Mat> hsvSplit;

    cvtColor(image_source, image_hsv, COLOR_BGR2HSV);      //颜色空间转换,输出hsv图像

    split(image_hsv, hsvSplit);      //图像分离三通道,
    equalizeHist(hsvSplit[2], hsvSplit[2]);//直方图均衡化,对比均衡化
    merge(hsvSplit, image_hsv);//合并三通道

    //int spatialRad = 30;  //空间窗口大小
    //int colorRad = 25;   //色彩窗口大小
    //int maxPyrLevel = 2;  //金字塔层数

    inRange(image_hsv, Scalar(m_h_down, m_s_down, m_v_down), Scalar(m_h_up, m_s_up, m_v_up), image_result);       //取出指定颜色区域

    Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(5, 5));
    morphologyEx(image_result, image_result, MORPH_OPEN, kernel, Point(-1, -1));

    vector<vector<Point>> contours;        //寻找轮廓
    findContours(image_result, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);

    int imax = 0; //代表最大轮廓的序号
    int imaxcontour = -1; //代表最大轮廓的大小

    for (int i = 0; i < contours.size(); i++)
    {
        if (contourArea(contours[i]) > imaxcontour)
        {
            imax = i;
            imaxcontour = contourArea(contours[i]);
        }
    }

    Mat image_display;
    vector<Point> approx;

    image_source.copyTo(image_display);
    approxPolyDP(Mat(contours[imax]), approx, arcLength(Mat(contours[imax]), true)*EpsilonThd, true);      //多边形拟合
    //drawContours(image_display, contours, imax, Scalar(0, 0, 255), 3);
    const Point* p = &approx[0];
    int m = (int)approx.size();

    polylines(image_display, &p, &m, 1, true, Scalar(0, 0, 255), 3);


    //SaveToDxf(approx);

    imshow("HSVDetect", image_display);
    namedWindow("Result", WINDOW_NORMAL);
    imshow("Result", image_result);
    namedWindow("Display", WINDOW_NORMAL);
    imshow("Display", image_display);
    namedWindow("image_hsv", WINDOW_NORMAL);
    imshow("image_hsv", image_hsv);
    imwrite("8.jpg", image_display);
    waitKey(40);


    waitKey();
}




//void MainWindow::on_pushButton_clicked()
//{
//    cv::Mat frame;
//    cv::Mat back;
//    cv::Mat fore;
//    cv::Ptr<BackgroundSubtractorMOG2> bg = createBackgroundSubtractorMOG2();//第一个参数为参考历史帧数
//    char path[100];
//    for (int i=3;i<242;)
//    {
//        sprintf_s(path, "../picture/110.jpg", i++);//读入图像
//        frame = imread(path,0);
//        int begin = clock();
//        bg->apply(frame, fore, 0.001);
//        //cv::erode(fore, fore, cv::Mat());//腐蚀
//        //cv::dilate(fore, fore, cv::Mat());//膨胀
//        //threshold(fore, fore, 80, 250, CV_THRESH_BINARY_INV);
//        printf("花费时间:%dms\n", clock() - begin);
//        cv::imshow("", fore);
//        waitKey(30);
//    }
//}

/颜色模型转换//
void MainWindow::on_mycvcolor_clicked()
{
    Mat img = readimage();
    Mat gray, HSV, YUV, Lab, img32;

    img.convertTo(img32, CV_32F, 1.0 / 255);  //将CV_8U类型转换成CV_32F类型
                                              //img32.convertTo(img, CV_8U, 255);  //将CV_32F类型转换成CV_8U类型
    cvtColor(img32, HSV, COLOR_BGR2HSV);
    cvtColor(img32, YUV, COLOR_BGR2YUV);
    cvtColor(img32, Lab, COLOR_BGR2Lab);
    cvtColor(img32, gray, COLOR_BGR2GRAY);
    imshow("原图", img32);
    imshow("HSV", HSV);
    imshow("YUV", YUV);
    imshow("Lab", Lab);
    imshow("gray", gray);
    waitKey(0);
}




/图像分离与合并//
void MainWindow::on_splitandmerge_clicked()
{
    Mat img = readimage();
    Mat HSV;
    cvtColor(img, HSV, COLOR_RGB2HSV);
    Mat imgs0,   imgs1,   imgs2;            //用于存放数组类型的结果
    Mat imgv0,   imgv1,   imgv2;            //用于存放vector类型的结果
    Mat result0, result1, result2;          //多通道合并的结果

                                            //输入数组参数的多通道分离与合并
    Mat imgs[3];
    split(img, imgs);
    imgs0 = imgs[0];
    imgs1 = imgs[1];
    imgs2 = imgs[2];
    imshow("RGB-B通道", imgs0);  //显示分离后B通道的像素值
    imshow("RGB-G通道", imgs1);  //显示分离后G通道的像素值
    imshow("RGB-R通道", imgs2);  //显示分离后R通道的像素值
    imgs[2] = img;              //将数组中的图像通道数变成不统一
    merge(imgs, 3, result0);    //合并图像
                                //imshow("result0", result0);  //imshow最多显示4个通道,因此结果在Image Watch中查看
    Mat zero = cv::Mat::zeros(img.rows, img.cols, CV_8UC1);
    imgs[0] = zero;
    imgs[2] = zero;
    merge(imgs, 3, result1);     //用于还原G通道的真实情况,合并结果为绿色
    imshow("result1", result1);  //显示合并结果

                                 //输入vector参数的多通道分离与合并
    vector<Mat> imgv;
    split(HSV, imgv);
    imgv0 = imgv.at(0);
    imgv1 = imgv.at(1);
    imgv2 = imgv.at(2);
    imshow("HSV-H通道", imgv0);  //显示分离后H通道的像素值
    imshow("HSV-S通道", imgv1);  //显示分离后S通道的像素值
    imshow("HSV-V通道", imgv2);  //显示分离后V通道的像素值
    imgv.push_back(HSV);  //将vector中的图像通道数变成不统一
    merge(imgv, result2);  //合并图像
                           //imshow("result2", result2);  /imshow最多显示4个通道,因此结果在Image Watch中查看
    waitKey(0);
}


//像素最大最小
void MainWindow::on_minmaxloc_clicked()
{

    system("color F0");  //更改输出界面颜色
    float a[12] = { 1, 2, 3, 4, 5, 10, 6, 7, 8, 9, 10, 0 };
    Mat img = Mat(3, 4, CV_32FC1, a);  //单通道矩阵
    Mat imgs = Mat(2, 3, CV_32FC2, a);  //多通道矩阵
    cout<<imgs<<endl;
    double minVal, maxVal;  //用于存放矩阵中的最大值和最小值
    Point minIdx, maxIdx;  用于存放矩阵中的最大值和最小值在矩阵中的位置

    /*寻找单通道矩阵中的最值*/
    minMaxLoc(img, &minVal, &maxVal, &minIdx, &maxIdx);
    cout << "img中最大值是:" << maxVal << "  " << "在矩阵中的位置:" << maxIdx << endl;
    cout << "img中最小值是:" << minVal << "  " << "在矩阵中的位置:" << minIdx << endl;

    /*寻找多通道矩阵中的最值*/
    Mat imgs_re = imgs.reshape(1, 4);  //将多通道矩阵变成单通道矩阵
    cout<<imgs_re<<endl;
    minMaxLoc(imgs_re, &minVal, &maxVal, &minIdx, &maxIdx);
    cout << "imgs中最大值是:" << maxVal << "  " << "在矩阵中的位置:" << maxIdx << endl;
    cout << "imgs中最小值是:" << minVal << "  " << "在矩阵中的位置:" << minIdx << endl;

}


平均值与标准差//
void MainWindow::on_meanandstddev_clicked()
{
    system("color F0");  //更改输出界面颜色
        float a[12] = { 1, 2, 3, 4, 5, 10, 6, 7, 8, 9, 10, 0 };
        Mat img = Mat(3, 4, CV_32FC1, a);  //单通道矩阵
        Mat imgs = Mat(2, 3, CV_32FC2, a);  //多通道矩阵

        cout << "/* 用meanStdDev同时求取图像的均值和标准差 */" << endl;
        Scalar myMean;
        myMean = mean(imgs);
        cout << "imgs均值=" << myMean << endl;
        cout << "imgs第一个通道的均值=" << myMean[0] << "    "
            << "imgs第二个通道的均值=" << myMean[1] << endl << endl;

        cout << "/* 用meanStdDev同时求取图像的均值和标准差 */" << endl;
        Mat myMeanMat, myStddevMat;

        meanStdDev(img, myMeanMat, myStddevMat);
        cout << "img均值=" << myMeanMat << "    " << endl;
        cout << "img标准差=" << myStddevMat << endl << endl;
        meanStdDev(imgs, myMeanMat, myStddevMat);
        cout << "imgs均值=" << myMeanMat << "    " << endl << endl;
        cout << "imgs标准差=" << myStddevMat << endl;
}

///图像比较/

void MainWindow::on_twomaxandMin_clicked()
{
    float a[12] = { 1, 2, 3.3, 4, 5, 9, 5, 7, 8.2, 9, 10, 2 };
        float b[12] = { 1, 2.2, 3, 1, 3, 10, 6, 7, 8, 9.3, 10, 1 };
        Mat imga = Mat(3, 4, CV_32FC1, a);
        Mat imgb = Mat(3, 4, CV_32FC1, b);
        Mat imgas = Mat(2, 3, CV_32FC2, a);
        Mat imgbs = Mat(2, 3, CV_32FC2, b);

        //对两个单通道矩阵进行比较运算
        Mat myMax, myMin;
        max(imga, imgb, myMax);
        min(imga, imgb, myMin);

        //对两个多通道矩阵进行比较运算
        Mat myMaxs, myMins;
        max(imgas, imgbs, myMaxs);
        min(imgas, imgbs, myMins);

        //对两张彩色图像进行比较运算
        Mat img0 = readimage();
        Mat img1 = readimage2();

        if (img0.empty() || img1.empty())
        {
            cout << "请确认图像文件名称是否正确" << endl;

        }
        Mat comMin, comMax;
        max(img0, img1, comMax);
        min(img0, img1, comMin);
        imshow("comMin", comMin);
        imshow("comMax", comMax);

        //与掩模进行比较运算
        Mat src1 = Mat::zeros(Size(512, 512), CV_8UC3);
        Rect rect(100, 100, 300, 300);
        src1(rect) = Scalar(255, 255, 255);  //生成一个低通300*300的掩模
        Mat comsrc1, comsrc2;
        min(img0, src1, comsrc1);
        imshow("comsrc1", comsrc1);

        Mat src2 = Mat(512, 512, CV_8UC3, Scalar(0, 0, 255));  //生成一个显示红色通道的低通掩模
        min(img0, src2, comsrc2);
        imshow("comsrc2", comsrc2);

        //对两张灰度图像进行比较运算
        Mat img0G, img1G, comMinG, comMaxG;
        cvtColor(img0, img0G, COLOR_BGR2GRAY);
        cvtColor(img1, img1G, COLOR_BGR2GRAY);
        max(img0G, img1G, comMaxG);
        min(img0G, img1G, comMinG);
        imshow("comMinG", comMinG);
        imshow("comMaxG", comMaxG);
        waitKey(0);
}


/图像二值化/
void MainWindow::on_threshold_clicked()
{
        Mat img = readimage();
        Mat gray;
        cvtColor(img, gray, COLOR_BGR2GRAY);
        Mat img_B, img_B_V, gray_B, gray_B_V, gray_T, gray_T_V, gray_TRUNC;

        //彩色图像二值化
        threshold(img, img_B, 125, 255, THRESH_BINARY);
        threshold(img, img_B_V, 125, 255, THRESH_BINARY_INV);
        imshow("img_B", img_B);
        imshow("img_B_V", img_B_V);

        //灰度图BINARY二值化
        threshold(gray, gray_B, 125, 255, THRESH_BINARY);
        threshold(gray, gray_B_V, 125, 255, THRESH_BINARY_INV);
        imshow("gray_B", gray_B);
        imshow("gray_B_V", gray_B_V);

        //灰度图像TOZERO变换
        threshold(gray, gray_T, 125, 255, THRESH_TOZERO);
        threshold(gray, gray_T_V, 125, 255, THRESH_TOZERO_INV);
        imshow("gray_T", gray_T);
        imshow("gray_T_V", gray_T_V);

        //灰度图像TRUNC变换
        threshold(gray, gray_TRUNC, 125, 255, THRESH_TRUNC);
        imshow("gray_TRUNC", gray_TRUNC);

        //灰度图像大津法和三角形法二值化
        Mat img_Thr = imread("E:/opencv4-c++/opencv-mask/picture/threshold.png", IMREAD_GRAYSCALE);
        Mat img_Thr_O, img_Thr_T;
        threshold(img_Thr, img_Thr_O, 100, 255, THRESH_BINARY | THRESH_OTSU);
        threshold(img_Thr, img_Thr_T, 125, 255, THRESH_BINARY | THRESH_TRIANGLE);
        imshow("img_Thr", img_Thr);
        imshow("img_Thr_O", img_Thr_O);
        imshow("img_Thr_T", img_Thr_T);

        //灰度图像自适应二值化
        Mat adaptive_mean, adaptive_gauss;
        adaptiveThreshold(img_Thr, adaptive_mean, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 55, 0);
        adaptiveThreshold(img_Thr, adaptive_gauss, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 55, 0);

        imshow("adaptive_mean", adaptive_mean);
        imshow("adaptive_gauss", adaptive_gauss);
        waitKey(0);
}



/图像查找映射表
void MainWindow::on_LuT_clicked()
{
    //LUT查找表第一层
    uchar lutFirst[256];
    for (int i = 0; i<256; i++)
    {
        if (i <= 100)
            lutFirst[i] = 0;
        if (i > 100 && i <= 200)
            lutFirst[i] = 100;
        if (i > 200)
            lutFirst[i] = 255;
    }
    Mat lutOne(1, 256, CV_8UC1, lutFirst);

    //LUT查找表第二层
    uchar lutSecond[256];
    for (int i = 0; i<256; i++)
    {
        if (i <= 100)
            lutSecond[i] = 0;
        if (i > 100 && i <= 150)
            lutSecond[i] = 100;
        if (i > 150 && i <= 200)
            lutSecond[i] = 150;
        if (i > 200)
            lutSecond[i] = 255;
    }
    Mat lutTwo(1, 256, CV_8UC1, lutSecond);

    //LUT查找表第三层
    uchar lutThird[256];
    for (int i = 0; i<256; i++)
    {
        if (i <= 100)
            lutThird[i] = 100;
        if (i > 100 && i <= 200)
            lutThird[i] = 200;
        if (i > 200)
            lutThird[i] = 255;
    }
    Mat lutThree(1, 256, CV_8UC1, lutThird);

    //拥有三通道的LUT查找表矩阵
    vector<Mat> mergeMats;
    mergeMats.push_back(lutOne);
    mergeMats.push_back(lutTwo);
    mergeMats.push_back(lutThree);
    Mat LutTree;
    merge(mergeMats, LutTree);

    //计算图像的查找表
    Mat img = readimage();
    Mat gray, out0, out1, out2;
    cvtColor(img, gray, COLOR_BGR2GRAY);
    LUT(gray, lutOne, out0);
    LUT(img, lutOne, out1);
    LUT(img, LutTree, out2);
    imshow("out0", out0);
    imshow("out1", out1);
    imshow("out2", out2);
    waitKey(0);
}

 

 

 

 

 

 

 

 

 

 

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值