opencv学习系列:实例练习,含多个工程实例

/*-----------------------------------------------------------------------------------------------------------*/
//****16.opencv来采集USB摄像头图像,显示彩色、灰度图像时,可录制彩色和灰度图像和截灰度图****//
/*-----------------------------------------------------------------------------------------------------------*/
//注意:录制的时候只能选择不同的编结码器!!!
//操作:按“Esc”退出,“c”截图,“v”开始录制视频
#include<opencv2/opencv.hpp>

#include <iomanip>


using namespace std;
using namespace cv;

int main()
{
    VideoCapture cap(4);//实验室的摄像头端口
    VideoWriter wri;//录制彩色图像,并保存到libo_output
    VideoWriter wri2;//录制彩色图像,并保存到libo_output
    //获得帧的宽高并在控制台显示
    int frameWidth, frameHeight;
    frameWidth = static_cast<int>(cap.get(CV_CAP_PROP_FRAME_WIDTH));
    frameHeight = static_cast<int>(cap.get(CV_CAP_PROP_FRAME_HEIGHT));
    //cout << "总帧数:" << cap.get(CV_CAP_PROP_FRAME_COUNT)<<endl;//若读入为视频文件,可以输出视频文件总帧数
    cout << "帧宽:" << frameWidth << "像素" << endl;
    cout << "帧高:" << frameHeight << "像素" << endl;
    Size frameSize(frameWidth, frameHeight); //Size(x,y)型,或用Size frameSize = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH),(int)cap.get(CV_CAP_PROP_FRAME_HEIGHT));
    //将从设备或文件获得的帧写入指定的视频文件中
    string outFile = "../libo_output/outputColor.avi";
    string outFile2 = "../libo_output/outputGray.avi";
    //打开要写入的视频文件,准备写入!编解码方式参数设置为-1,表示代码运行时会弹出对话框,手动选择编解码方式
    //当读出帧率为0时,可改为wri.open(outFile, -1, 25.0, frameSize, true);25.0对应的形参影响生产的文件的播放速度  
    wri.open(outFile, -1, 10.0, frameSize, true);
    wri2.open(outFile2, -1, 10.0, frameSize, false);
    //wri.open(outFile, -1, frameRate, frameSize, true);//true代表彩色输出!
    int count = 0;
    if (!wri.isOpened())
    {
        cout << "写视频对象问件预打开操作失败" << endl;
        return -2;
    }
    if (!wri2.isOpened())
    {
        cout << "写视频对象问件预打开操作失败" << endl;
        return -3;
    }
    Mat frame, grayImage;
    int i = 0;
    bool flag = false;
    while (waitKey(30) != 27)
    {
        if (!cap.read(frame))//尝试读取下一帧
            break;//检验确实读到了图像数据到Mat对象的数据缓冲区,或用if(!frame.empty())

        cvtColor(frame, grayImage, CV_BGR2GRAY);//grayImage为单通道灰度图像

        imshow("【摄像头彩色图】", frame);
        imshow("【摄像头灰度图】", grayImage);
        char key = cvWaitKey(1);

        if (key == 'c')
        {
            cout << "提取图像成功!………………" << endl;
            std::stringstream str;
            str << "F:\\img" << std::setw(2) << std::setfill('0') << i + 1 << ".jpg";
            std::cout << "提取的图像保存路径及文件名" << str.str() << endl;
            imwrite(str.str(), grayImage);//
            imshow("截取图像显示", grayImage);
            i = i + 1;
        }
        if (key == 'v')
        {
            flag = true;
            cout << "开始录制视频" << endl;

        }
        if (key == 's')
        {
            flag = false;
            cout << "停止视频录制" << endl;
        }
        if (flag == true)
        {
            //写入此帧到定义的视频文件
            wri << frame;//或者wri.write(frame);写彩色视频帧到文件
            wri2 << grayImage;//或者wri.write(frame);写灰度视频帧到文件
            count++;
        }
    }
    cout << "写入输出的视频文件总帧数:" << count << endl;
    //释放对象
    waitKey(0);
    return 0;
}


/*-----------------------------------------------------------------------------------------------------------*/
//****17. opencv2采集免驱UVC设备图像-截图单通道灰度图像****//
/*-----------------------------------------------------------------------------------------------------------*/


#include<opencv2/opencv.hpp>

#include <iomanip>


using namespace std;
using namespace cv;

int main()
{
    VideoCapture capture(0);
    Mat frame, grayImage;
    int i = 0;
    while (waitKey(30) != 27)
    {
        capture >> frame;

        cvtColor(frame, grayImage, CV_BGR2GRAY);//grayImage为单通道灰度图像

        imshow("【摄像头彩色图】", frame);
        imshow("【摄像头灰度图】", grayImage);
        char key = cvWaitKey(1);

        if (key == 'c')
        {
            cout << "提取图像成功!………………" << endl;
            std::stringstream str;
            str << "F:\\img" << std::setw(2) << std::setfill('0') << i + 1 << ".jpg";
            std::cout << "提取的图像保存路径及文件名" << str.str() << endl;
            imwrite(str.str(), grayImage);//
            i = i + 1;
        }
    }

    return 0;
}


/*-----------------------------------------------------------------------------------------------------------*/
//****18. opencv1.0采集免驱UVC设备图像-截图单通道灰度图像  ****//
/*-----------------------------------------------------------------------------------------------------------*/
#include <opencv2/opencv.hpp>

#include <iomanip>//setfill,setw,setbase,setprecision等等,I/O流控制头文件,就像C里面的格式化输出一样,setw( n ) 设域宽为n个字符,setfill( 'c' ) 设填充字符为c。
//#include <opencv2/opencv.hpp>包含以下:
//#include <stdio.h> 
//#include <iostream>
//#include <sstream>//继承自<iostream>可用stringstream str;

using namespace std;
using namespace cv;

int main()
{
    IplImage* colorImg = NULL;
    IplImage* grayImg = NULL;
    int i = 0;
    CvCapture* pCapture = cvCreateCameraCapture(0);//初始化摄像头,参数可以用0  
    if (NULL == pCapture)
    {
        fprintf(stderr, "Can't init Camera!\n");
        return -1;
    }
    //cvSetCaptureProperty(cam, CV_CAP_PROP_FRAME_WIDTH, 640);//设置图像属性 宽和高  
    //cvSetCaptureProperty(cam, CV_CAP_PROP_FRAME_HEIGHT, 480);
    cvNamedWindow("colorTest", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("grayTest", CV_WINDOW_AUTOSIZE);
    int frameWidth, frameHeight;
    frameWidth = static_cast<int>(cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_WIDTH));
    frameHeight = static_cast<int>(cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_HEIGHT));
    cout << "设备默认输出帧宽:" << frameWidth << "像素" << endl;
    cout << "设备默认输出帧高:" << frameHeight << "像素" << endl;
    //double frameRate = cvGetCaptureProperty(pCapture, CV_CAP_PROP_FPS);//视频帧率
    //cout << "视频帧率:" << frameRate << "fps" << endl;

    grayImg = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1);//注意:不分配确定通道和大小,cvCvtColor()会出错!
    while (1)
    {
        colorImg = cvQueryFrame(pCapture);//获取下一帧
        if (!colorImg)
        {
            fprintf(stderr, "Can't get a frame\n");
            return -2;
        }

        cvCvtColor(colorImg, grayImg, CV_BGR2GRAY);
        cvShowImage("colorTest", colorImg);
        cvShowImage("grayTest", grayImg);
        char key = cvWaitKey(33);
        if (key == 27)
            break;
        if (key == 'c')
        {
            cout << "提取图像成功!………………" << endl;
            std::stringstream str;
            str << "F:\\img" << std::setw(2) << std::setfill('0') << i + 1 << ".jpg";
            std::cout << "提取的图像保存路径及文件名" << str.str() << endl;//循环一次自动析构
            Mat cameraPicture;
            //读入图像
            Mat frame(colorImg, false);//将C的IplImage结构转化为Mat结构,变量用于存储当前帧图像,false表示共用数据缓冲区
            cvtColor(frame, cameraPicture, CV_BGR2GRAY);
            imwrite(str.str(), cameraPicture);//保存的是从硬件得到的源格式图像
            imshow("截取图像显示", cameraPicture);
            i = i + 1;
        }
    }

    cvWaitKey(0);
    cvReleaseImage(&colorImg);
    cvReleaseImage(&grayImg);
    cvDestroyWindow("colorTest");
    cvDestroyWindow("grayTest");
    return 0;
}

/*-----------------------------------------------------------------------------------------------------------*/
//****19. 高度差检测测试1,这个程序有乱****//
/*-----------------------------------------------------------------------------------------------------------*/

#include<opencv2/opencv.hpp>

#include <iomanip>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <vector>

using namespace std;
using namespace cv;

//Filter2D只对单通道进行滤波操作,如果对多通道进行滤波,可以先用Split将图像分解到单通道分别操作
void prewitt(Mat &src, Mat &dst)
{
    //定义prewitt算子的模板  
    dst.create(src.rows, src.cols, src.type());

    Mat Kernelx, Kernely;
    Kernely = (Mat_<double>(3, 3) << 1, 1, 1, 0, 0, 0, -1, -1, -1);
    Kernelx = (Mat_<double>(3, 3) << -1, 0, 1, -1, 0, 1, -1, 0, 1);   //建立内核

    Mat grad_x, grad_y;
    Mat abs_grad_x, abs_grad_y, grad;

    filter2D(src, grad_x, CV_16S, Kernelx, Point(-1, -1));
    filter2D(src, grad_y, CV_16S, Kernely, Point(-1, -1));
    convertScaleAbs(grad_x, abs_grad_x);
    convertScaleAbs(grad_y, abs_grad_y);


    addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);

}

//Sobel核中已经融合进去了高斯平滑!核只可以为1 3 5 或7,其中1代表3*1的内核
void sobelImage(Mat &src, Mat &dst)
{

    dst.create(src.rows, src.cols, src.type());

    Mat grad_x, grad_y;
    Mat abs_grad_x, abs_grad_y, grad;

    //求X方向梯度
    Sobel(src, grad_x, CV_16S, 1,0,3,1,0, BORDER_DEFAULT);
    //求Y方向梯度
    Sobel(src, grad_y, CV_16S, 0, 1, 3, 1, 0, BORDER_DEFAULT);
    convertScaleAbs(grad_x, abs_grad_x);
    imshow("X方向Sobel效果图", abs_grad_x);
    convertScaleAbs(grad_y, abs_grad_y);
    imshow("Y方向Sobel效果图", abs_grad_y);

    //合并梯度,用加权近似
    addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);
    imshow("合并梯度后的Sobel效果图", dst);
}

//Scharr滤波器基本同Sobel,但是比Sobel算子更精确计算图像差分梯度,但是核只能为3*3
void scharrImage(Mat &src, Mat &dst)
{

    dst.create(src.rows, src.cols, src.type());

    Mat grad_x, grad_y;
    Mat abs_grad_x, abs_grad_y, grad;

    //求X方向梯度
    Scharr(src, grad_x, CV_16S, 1, 0, 1, 0, BORDER_DEFAULT);
    //求Y方向梯度
    Scharr(src, grad_y, CV_16S, 0, 1, 1, 0, BORDER_DEFAULT);
    convertScaleAbs(grad_x, abs_grad_x);
    imshow("X方向Scharr效果图", abs_grad_x);
    convertScaleAbs(grad_y, abs_grad_y);
    imshow("Y方向Scharr效果图", abs_grad_y);

    //合并梯度,用加权近似
    addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);
    imshow("合并梯度后的Scharr效果图", dst);
}

//laplace二阶微分边缘检测,输入须为单通道8位灰度图像
void laplaceImage(Mat &src, Mat &dst)
{

    dst.create(src.rows, src.cols, src.type());
    Mat graydst;
    //
    Laplacian(src, graydst, CV_16S, 3, 1, 0, BORDER_DEFAULT);

    //计算绝对值,并将结果转换成8位
    convertScaleAbs(graydst, dst);
    imshow("图像Laplace变换检测边缘效果图", dst);

}

cv::Mat thinImage(const cv::Mat & src, const int maxIterations = -1)
{
    assert(src.type() == CV_8UC1);
    cv::Mat dst;
    int width = src.cols;
    int height = src.rows;
    src.copyTo(dst);
    int count = 0;  //记录迭代次数  
    while (true)
    {
        count++;
        if (maxIterations != -1 && count > maxIterations) //限制次数并且迭代次数到达  
            break;
        std::vector<uchar *> mFlag; //用于标记需要删除的点  
        //对点标记  
        for (int i = 0; i < height; ++i)
        {
            uchar * p = dst.ptr<uchar>(i);
            for (int j = 0; j < width; ++j)
            {
                //如果满足四个条件,进行标记  
                //  p9 p2 p3  
                //  p8 p1 p4  
                //  p7 p6 p5  
                uchar p1 = p[j];
                if (p1 != 1) continue;
                uchar p4 = (j == width - 1) ? 0 : *(p + j + 1);
                uchar p8 = (j == 0) ? 0 : *(p + j - 1);
                uchar p2 = (i == 0) ? 0 : *(p - dst.step + j);
                uchar p3 = (i == 0 || j == width - 1) ? 0 : *(p - dst.step + j + 1);
                uchar p9 = (i == 0 || j == 0) ? 0 : *(p - dst.step + j - 1);
                uchar p6 = (i == height - 1) ? 0 : *(p + dst.step + j);
                uchar p5 = (i == height - 1 || j == width - 1) ? 0 : *(p + dst.step + j + 1);
                uchar p7 = (i == height - 1 || j == 0) ? 0 : *(p + dst.step + j - 1);
                if ((p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9) >= 2 && (p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9) <= 6)
                {
                    int ap = 0;
                    if (p2 == 0 && p3 == 1) ++ap;
                    if (p3 == 0 && p4 == 1) ++ap;
                    if (p4 == 0 && p5 == 1) ++ap;
                    if (p5 == 0 && p6 == 1) ++ap;
                    if (p6 == 0 && p7 == 1) ++ap;
                    if (p7 == 0 && p8 == 1) ++ap;
                    if (p8 == 0 && p9 == 1) ++ap;
                    if (p9 == 0 && p2 == 1) ++ap;

                    if (ap == 1 && p2 * p4 * p6 == 0 && p4 * p6 * p8 == 0)
                    {
                        //标记  
                        mFlag.push_back(p + j);
                    }
                }
            }
        }

        //将标记的点删除  
        for (std::vector<uchar *>::iterator i = mFlag.begin(); i != mFlag.end(); ++i)
        {
            **i = 0;
        }

        //直到没有点满足,算法结束  
        if (mFlag.empty())
        {
            break;
        }
        else
        {
            mFlag.clear();//将mFlag清空  
        }

        //对点标记  
        for (int i = 0; i < height; ++i)
        {
            uchar * p = dst.ptr<uchar>(i);
            for (int j = 0; j < width; ++j)
            {
                //如果满足四个条件,进行标记  
                //  p9 p2 p3  
                //  p8 p1 p4  
                //  p7 p6 p5  
                uchar p1 = p[j];
                if (p1 != 1) continue;
                uchar p4 = (j == width - 1) ? 0 : *(p + j + 1);
                uchar p8 = (j == 0) ? 0 : *(p + j - 1);
                uchar p2 = (i == 0) ? 0 : *(p - dst.step + j);
                uchar p3 = (i == 0 || j == width - 1) ? 0 : *(p - dst.step + j + 1);
                uchar p9 = (i == 0 || j == 0) ? 0 : *(p - dst.step + j - 1);
                uchar p6 = (i == height - 1) ? 0 : *(p + dst.step + j);
                uchar p5 = (i == height - 1 || j == width - 1) ? 0 : *(p + dst.step + j + 1);
                uchar p7 = (i == height - 1 || j == 0) ? 0 : *(p + dst.step + j - 1);

                if ((p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9) >= 2 && (p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9) <= 6)
                {
                    int ap = 0;
                    if (p2 == 0 && p3 == 1) ++ap;
                    if (p3 == 0 && p4 == 1) ++ap;
                    if (p4 == 0 && p5 == 1) ++ap;
                    if (p5 == 0 && p6 == 1) ++ap;
                    if (p6 == 0 && p7 == 1) ++ap;
                    if (p7 == 0 && p8 == 1) ++ap;
                    if (p8 == 0 && p9 == 1) ++ap;
                    if (p9 == 0 && p2 == 1) ++ap;

                    if (ap == 1 && p2 * p4 * p8 == 0 && p2 * p6 * p8 == 0)
                    {
                        //标记  
                        mFlag.push_back(p + j);
                    }
                }
            }
        }

        //将标记的点删除  
        for (std::vector<uchar *>::iterator i = mFlag.begin(); i != mFlag.end(); ++i)
        {
            **i = 0;
        }

        //直到没有点满足,算法结束  
        if (mFlag.empty())
        {
            break;
        }
        else
        {
            mFlag.clear();//将mFlag清空  
        }
    }
    return dst;
}


void main()
{
    Mat src = imread("../libo_resource/22.png", 0);//读入灰度图像
    //srcROI = src(Range(150, 240), Range(120, 170));//只处理感兴趣区域,他们共享数据区

    imshow("原图灰度图", src);
    //imshow("原图灰度图的ROI", srcROI);

    Mat out;
    Mat dst;

    //开始计算高度差检测处理一帧的时间
    double time;
    time = static_cast<double>(getTickCount());
    int middley = src.cols / 2;
    int middlex = 0;

    //图像滤波增强,一般选用3*3,让滤波更明显可以选用5*5卷积核,
    //medianBlur(src, src, 3);//3代表3*3模板取中值,采用中值滤波,其比较费时
    //bilateralFilter(src, src,-1, 25*2, 25/2);25/2位置的值越大,代表越远的像素会相互影响,从而使更大的区域中足够相似的颜色获取相同的颜色
    blur(src, src, Size(3, 3));//采用均值滤波
    //GaussianBlur(src, src, Size(3, 3), 0, 0);//可以较好的保存边缘,“0,0”代表sigmaX和sigmaY参数自动由模板大小计算出,采用高斯加权均值滤波,可用Mat gauss= getGaussianKernel(9, sigma, CV_32F);获取一维模板核系数,此句为获取1*9的模板矩阵,sigma取值决定高斯加权曲线的离散程度。
    //Mat kernelX = getGaussianKernel(kernelSize, sigmaX); 
    //Mat kernelY = getGaussianKernel(kernelSize, sigmaY);
    imshow("滤波后的处理图像", src);


    //边缘检测,需处理的是单通道灰度图像,滤波增强,提取边缘,以下算子都是带方向的
    Mat edge;
    //Canny(src,edge,3, 9 ,3);//低阈值用于边缘连接,高阈值控制什么像素值是强边缘,最后的设定值sobel孔径大小

    prewitt(src, dst);
    imshow("边缘检测后的图像", dst);


    Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
    dilate(dst, dst, element);
    imshow("膨胀后的图像", dst);

    threshold(dst, dst, 30, 1, CV_THRESH_BINARY);
    //图像细化
    dst = thinImage(dst);
    dst = dst * 255;

    for (int i = 0; i < src.rows; i++)
    {
        for (int j = 0; j < src.cols; j++)
        {
            if ((j == middley) && (i != 0))
            {
                middlex = i;
            }
        }
    }

    cout << "焊缝中心坐标=(" << middlex << "," << middley << ")" << endl;

    time = ((double)getTickCount() - time) / getTickFrequency();
    cout << "处理一帧的秒数cost time=" << time << endl;

    imshow("细化后的边缘检测图像", dst);

    waitKey();
}

/*-----------------------------------------------------------------------------------------------------------*/
//****20. 高度差检测测试 结果凑合能用****//
/*-----------------------------------------------------------------------------------------------------------*/

#include<opencv2/opencv.hpp>

#include <iomanip>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <vector>

using namespace std;
using namespace cv;

void prewittProcess(Mat &src, Mat &dst);
void sobelProcess(Mat &src, Mat &dst);
void scharrProcess(Mat &src, Mat &dst);
void laplaceProcess(Mat &src, Mat &dst);
void cannyProcess(Mat &src, Mat &dst);

int main()
{

    //可从摄像头输入视频流或直接播放视频文件
    //VideoCapture capture(0);
    VideoCapture capture("../libo_resource/test.avi");
    if (!capture.isOpened())
    {
        std::cout << "No Video Input!" << std::endl;
        system("pause");
        return -1;
    }
    //获得帧的宽高并在控制台显示
    int frameWidth, frameHeight;
    frameWidth = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
    frameHeight = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));
    //cout << "总帧数:" << cap.get(CV_CAP_PROP_FRAME_COUNT)<<endl;//若读入为视频文件,可以输出视频文件总帧数
    cout << "帧宽:" << frameWidth << "像素" << endl;
    cout << "帧高:" << frameHeight << "像素" << endl;
    int originalFrameSum = capture.get(CV_CAP_PROP_FRAME_COUNT);
    cout << "原始视频文件的总帧数为:" << originalFrameSum << endl;
    double originalFPS = capture.get(CV_CAP_PROP_FPS);
    cout << "原始视频文件的帧率为:" << originalFPS << endl;

    Mat frame;
    Mat frameGray;
    Mat grayROI;
    Mat outPut;

    //Mat src = imread("../libo_resource/22.png", 0);//读入灰度图像


    //imshow("原图灰度图", src);
    //imshow("原图灰度图的ROI", srcROI);

    // 每一帧之间的延迟,即每1s处理一帧图像
    int delay = 1000 / originalFPS;
    bool stop(false);//循环控制标志位

    // 遍历每一帧
    while (!stop)
    {
        // 尝试读取下一帧
        if (!capture.read(frame))
            break;
        // 先要把每帧图像转化为单通道灰度图
        cv::cvtColor(frame, frameGray, CV_BGR2GRAY);
        imshow("Extracted grayFrame", frameGray);
        grayROI = frameGray(Range(240, 440), Range(0, 640));//只处理感兴趣区域,他们共享数据区
        //imshow("Extracted grayROI", grayROI);


        //图像滤波增强,一般选用3*3,让滤波更明显可以选用5*5卷积核,

        blur(grayROI, grayROI, Size(5, 5));//采用均值滤波
        medianBlur(grayROI, grayROI, 9);//3代表3*3模板取中值,采用中值滤波,其比较费时
        //bilateralFilter(src, src,-1, 25*2, 25/2);25/2位置的值越大,代表越远的像素会相互影响,从而使更大的区域中足够相似的颜色获取相同的颜色
        //GaussianBlur(grayROI, grayROI, Size(5, 5), 0, 0);//可以较好的保存边缘,“0,0”代表sigmaX和sigmaY参数自动由模板大小计算出,采用高斯加权均值滤波,可用Mat gauss= getGaussianKernel(9, sigma, CV_32F);获取一维模板核系数,此句为获取1*9的模板矩阵,sigma取值决定高斯加权曲线的离散程度。
        //Mat kernelX = getGaussianKernel(kernelSize, sigmaX); 
        //Mat kernelY = getGaussianKernel(kernelSize, sigmaY);
        imshow("平滑后的grayROI", grayROI);

        //边缘检测,需处理的是单通道灰度图像,滤波增强,提取边缘,算子都是带方向的
        cannyProcess(grayROI, outPut);

        imshow("边缘检测后的grayROI", outPut);

        // 引入延迟
        if (cv::waitKey(delay) >= 0)
            stop = true;
    }
    /*
    //开始计算高度差检测处理一帧的时间
    double time;
    time = static_cast<double>(getTickCount());
    int middley = grayROI.cols / 2;
    int middlex = 0;

    //threshold(outPut, outPut, 30, 1, CV_THRESH_BINARY);


    for (int i = 0; i < outPut.rows; i++)
    {
        for (int j = 0; j < outPut.cols; j++)
        {
            if ((j == middley) && (i != 0))
            {
                middlex = i;
            }
        }
    }

    cout << "焊缝中心坐标=(" << middlex << "," << middley << ")" << endl;

    time = ((double)getTickCount() - time) / getTickFrequency();
    cout << "处理一帧的秒数cost time=" << time << endl;

    imshow("细化后的边缘检测图像", dst);
    */
    waitKey(0);
    return 0;
}

//Filter2D只对单通道进行滤波操作,如果对多通道进行滤波,可以先用Split将图像分解到单通道分别操作
void prewittProcess(Mat &src, Mat &dst)
{
    //定义prewitt算子的模板  
    dst.create(src.rows, src.cols, src.type());

    Mat Kernelx, Kernely;
    Kernely = (Mat_<double>(3, 3) << 1, 1, 1, 0, 0, 0, -1, -1, -1);
    Kernelx = (Mat_<double>(3, 3) << -1, 0, 1, -1, 0, 1, -1, 0, 1);   //建立内核

    Mat grad_x, grad_y;
    Mat abs_grad_x, abs_grad_y, grad;

    filter2D(src, grad_x, CV_16S, Kernelx, Point(-1, -1));
    filter2D(src, grad_y, CV_16S, Kernely, Point(-1, -1));
    convertScaleAbs(grad_x, abs_grad_x);
    convertScaleAbs(grad_y, abs_grad_y);


    addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);

}

//Sobel核中已经融合进去了高斯平滑!核只可以为1 3 5 或7,其中1代表3*1的内核
void sobelProcess(Mat &src, Mat &dst)
{

    //dst.create(src.rows, src.cols, src.type());

    Mat grad_x, grad_y;
    Mat abs_grad_x, abs_grad_y, grad;

    //求X方向梯度
    //Sobel(src, grad_x, CV_16S, 1, 0, 3, 1, 0, BORDER_DEFAULT);
    //求Y方向梯度
    Sobel(src, grad_y, CV_16S, 0, 1, 3, 1, 0, BORDER_DEFAULT);
    //convertScaleAbs(grad_x, abs_grad_x);
    //imshow("X方向Sobel效果图", abs_grad_x);
    convertScaleAbs(grad_y, dst);
    //imshow("Y方向Sobel效果图", abs_grad_y);

    //合并梯度,用加权近似
    //addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);
    //imshow("合并梯度后的Sobel效果图", dst);
}

//Scharr滤波器基本同Sobel,但是比Sobel算子更精确计算图像差分梯度,但是核只能为3*3
void scharrProcess(Mat &src, Mat &dst)
{

    dst.create(src.rows, src.cols, src.type());

    Mat grad_x, grad_y;
    Mat abs_grad_x, abs_grad_y, grad;

    //求X方向梯度
    Scharr(src, grad_x, CV_16S, 1, 0, 1, 0, BORDER_DEFAULT);
    //求Y方向梯度
    Scharr(src, grad_y, CV_16S, 0, 1, 1, 0, BORDER_DEFAULT);
    convertScaleAbs(grad_x, abs_grad_x);
    //imshow("X方向Scharr效果图", abs_grad_x);
    convertScaleAbs(grad_y, abs_grad_y);
    //imshow("Y方向Scharr效果图", abs_grad_y);

    //合并梯度,用加权近似
    addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);
    //imshow("合并梯度后的Scharr效果图", dst);
}

//laplace二阶微分边缘检测,输入须为单通道8位灰度图像
void laplaceProcess(Mat &src, Mat &dst)
{

    dst.create(src.rows, src.cols, src.type());
    Mat graydst;
    //
    Laplacian(src, graydst, CV_16S, 3, 1, 0, BORDER_DEFAULT);

    //计算绝对值,并将结果转换成8位
    convertScaleAbs(graydst, dst);
    //imshow("图像Laplace变换检测边缘效果图", dst);

}

void cannyProcess(cv::Mat& img, cv::Mat& dst)//输入图像应为彩色图像!!!!!
{

    // 调用Canny函数,其是高斯去噪和Sobel的结合
    cv::Canny(img, dst, 80, 240,3);//低阈值用于边缘连接,高阈值控制什么像素值是强边缘,最后的设定值sobel孔径大小
    // 对像素进行翻转
    //cv::threshold(dst, dst, 128, 255, cv::THRESH_BINARY_INV);
}

/*-----------------------------------------------------------------------------------------------------------*/
//****21. OpenCV1.0播放视频并控制位置****//
/*--

#include <cv.h>  
#include <highgui.h>  

CvCapture* capture = NULL;  
int pos=0;  //视频位置  
void ON_Change(int n)  
{  
    cvSetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES,n);     //设置视频走到pos位置  
}  
int _tmain(int argc, _TCHAR* argv[])  
{  
    cvNamedWindow("show");  //新建一个窗口  
    capture  =cvCreateFileCapture("F:\\TDDOWNLOAD\\10.Little.Asians.16\\fle-10la16a.avi");//创建一个视频  
    int frames = (int)cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_COUNT);        //返回视频帧的总数  

    cvCreateTrackbar("frame","show",&pos,frames,ON_Change);//创建滚动条  
    IplImage * frame;//声明视频帧  
    while(1)  
    {  
        frame = cvQueryFrame(capture);  //获取下一帧图像  
        if (!frame)  
        {  
            break;          //如果不存在退出  
        }  
        pos++;              //播放一帧位置加1  
        //cvSetTrackbarPos("frame","show",pos);//设置进度条位置 加入此语句后视频会变卡  
        cvShowImage("show",frame);      //在窗口显示图像  
        char c = cvWaitKey(33);         //间隔33ms  
        if (c == 27)                    //如果按下Esc键中断   
         break;  

    }  
    cvReleaseCapture(&capture); //释放视频空间  
    cvDestroyWindow("show");    //销毁窗口  
}

/*-----------------------------------------------------------------------------------------------------------*/
//****21. 高度差检测测试 比上一个显示好一些****//
/*-----------------------------------------------------------------------------------------------------------*/
/*-----------------------------------------------------------------------------------------------------------*/
//****20. 高度差检测测试****//
/*-----------------------------------------------------------------------------------------------------------*/

#include<opencv2/opencv.hpp>

#include <iomanip>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <vector>
#include <iostream>

using namespace std;
using namespace cv;

void prewittProcess(Mat &src, Mat &dst);
void sobelProcess(Mat &src, Mat &dst);
void scharrProcess(Mat &src, Mat &dst);
void laplaceProcess(Mat &src, Mat &dst);
void cannyProcess(Mat &src, Mat &dst);

int main()
{

    //可从摄像头输入视频流或直接播放视频文件
    //VideoCapture capture(0);
    VideoCapture capture("../libo_resource/test.avi");
    if (!capture.isOpened())
    {
        std::cout << "No Video Input!" << std::endl;
        system("pause");
        return -1;
    }
    //获得帧的宽高并在控制台显示
    int frameWidth, frameHeight;
    frameWidth = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
    frameHeight = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));
    //cout << "总帧数:" << cap.get(CV_CAP_PROP_FRAME_COUNT)<<endl;//若读入为视频文件,可以输出视频文件总帧数
    cout << "帧宽:" << frameWidth << "像素" << endl;
    cout << "帧高:" << frameHeight << "像素" << endl;
    int originalFrameSum = capture.get(CV_CAP_PROP_FRAME_COUNT);
    cout << "原始视频文件的总帧数为:" << originalFrameSum << endl;
    double originalFPS = capture.get(CV_CAP_PROP_FPS);
    cout << "原始视频文件的帧率为:" << originalFPS << endl;

    Mat frame;
    Mat frameGray;
    Mat grayROI;
    Mat outPut;
    int framNumber = 0;//记录输出的第几帧图像
    double time=0;
    int labelX = frameWidth/2;//以原图像的宽的一半为记录数据位置
    int minHeight = 100;//记录高度差的最小值,用于标定偏转阈值
    int maxHeight = 0;//记录高度差的最大值,用于标定偏转阈值
    int heightDiff=0;
    Point point[2];//存遍历到的白点坐标值

    char fpsString[30];//用于帧计数显示和显示像素高度差

    namedWindow("Extracted grayFrame", 1);
    namedWindow("平滑后的grayROI", 1);
    namedWindow("边缘检测后的grayROI", 1);




    // 每一帧之间的延迟,即每1s处理一帧图像
    int delay = 1000 / originalFPS;
    bool stop(false);//循环控制标志位

    // 遍历每一帧
    while (!stop)
    {
        // 尝试读取下一帧
        time = 0;
        time = static_cast<double>(getTickCount());
        if (!capture.read(frame))
            break;
        // 先要把每帧图像转化为单通道灰度图
        cv::cvtColor(frame, frameGray, CV_BGR2GRAY);

        grayROI = frameGray(Range(240, 440), Range(0, 640));//只处理感兴趣区域,他们共享数据区
        //imshow("Extracted grayROI", grayROI);


        //图像滤波增强,一般选用3*3,让滤波更明显可以选用5*5卷积核,

        blur(grayROI, grayROI, Size(5, 5));//采用均值滤波
        medianBlur(grayROI, grayROI, 11);//3代表3*3模板取中值,采用中值滤波,其比较费时
        //bilateralFilter(src, src,-1, 25*2, 25/2);25/2位置的值越大,代表越远的像素会相互影响,从而使更大的区域中足够相似的颜色获取相同的颜色
        //GaussianBlur(grayROI, grayROI, Size(5, 5), 0, 0);//可以较好的保存边缘,“0,0”代表sigmaX和sigmaY参数自动由模板大小计算出,采用高斯加权均值滤波,可用Mat gauss= getGaussianKernel(9, sigma, CV_32F);获取一维模板核系数,此句为获取1*9的模板矩阵,sigma取值决定高斯加权曲线的离散程度。
        //Mat kernelX = getGaussianKernel(kernelSize, sigmaX); 
        //Mat kernelY = getGaussianKernel(kernelSize, sigmaY);
        imshow("平滑后的grayROI", grayROI);

        //边缘检测,需处理的是单通道灰度图像,滤波增强,提取边缘,算子都是带方向的
        cannyProcess(grayROI, outPut);      
        //imshow("边缘检测后的grayROI", outPut);
        //从上往下找白点,并记录y坐标,算出高度差!注意at方法为at(行值,列值)
        for (int i = 0; i < outPut.rows; i++)
        {
            if (outPut.at<uchar>(i, labelX) == 255)
            {
                point[0] = Point(labelX,i );

                break;
            }
        }
        for (int j = outPut.rows-1; j > 0; j--)
        {
            if (outPut.at<uchar>(j, labelX) == 255)
            {
                point[1] = Point(labelX, j);

                break;
            }
        }
        circle(outPut, point[0], 5, Scalar(255,255, 255), -1, 8);
        circle(outPut, point[1], 5, Scalar(255,255, 255), -1, 8);
        circle(grayROI, point[0], 5, Scalar(255, 255, 255), -1, 8);
        circle(grayROI, point[1], 5, Scalar(255, 255, 255), -1, 8);
        line(outPut,
            point[0],
            point[1],
            cv::Scalar(255, 255, 255),
            5,
            8);
        cvtColor(frameGray, frameGray, CV_GRAY2BGR);

        circle(frameGray, point[0] + Point(0,240), 5, Scalar(255, 0, 255), -1, 8);
        circle(frameGray, point[1]+Point(0, 240), 5, Scalar(255, 0, 255), -1, 8);
        line(frameGray,
            point[0] + Point(0, 240),
            point[1] + Point(0, 240),
            cv::Scalar(255, 0, 255),
            5,
            8);

        framNumber++;
        sprintf(fpsString, "%d heightDiff: %d", framNumber, heightDiff); // 帧计数
        std::string fpsString1("frameCounter: ");//此句语句不能在循环体外!!!!!!
        fpsString1 += fpsString; // 在"FPS:"后加入帧率数值字符串

        // 将帧率信息写在输出帧上
        putText(frameGray,                  // 图像矩阵
            fpsString1,                 // string型文字内容
            Point(labelX/5, 240),          // 文字坐标,以左下角为原点
            cv::FONT_HERSHEY_SIMPLEX,   // 字体类型
            1,                          // 字体大小
            cv::Scalar(0, 0, 255),      // 字体颜色:黑色
            4);                         // 加粗

        imshow("边缘检测后的grayROI", outPut);
        imshow("平滑后的grayROI", grayROI);
        imshow("Extracted grayFrame", frameGray);

        heightDiff = point[1].y - point[0].y;
        if (heightDiff >= maxHeight)
            maxHeight = heightDiff;
        if (heightDiff <= minHeight)
            minHeight = heightDiff;


        time = ((double)getTickCount() - time) / getTickFrequency();

        std::stringstream str;
        str << "处理第" << framNumber << "帧图像的";
        cout << str.str() << "heightDiff=" << heightDiff << "个像素" << endl;
        cout << str.str() << "frameROI cost time=" << time << endl;

        // 引入延迟
        if (cv::waitKey(delay) >= 0)
            stop = true;
    }

    cout << "最大高度差maxHeight=" << maxHeight << "个像素" << endl;
    cout << "最大高度差minHeight=" << minHeight << "个像素" << endl;
    waitKey(0);
    return 0;

}

//Filter2D只对单通道进行滤波操作,如果对多通道进行滤波,可以先用Split将图像分解到单通道分别操作
void prewittProcess(Mat &src, Mat &dst)
{
    //定义prewitt算子的模板  
    dst.create(src.rows, src.cols, src.type());

    Mat Kernelx, Kernely;
    Kernely = (Mat_<double>(3, 3) << 1, 1, 1, 0, 0, 0, -1, -1, -1);
    Kernelx = (Mat_<double>(3, 3) << -1, 0, 1, -1, 0, 1, -1, 0, 1);   //建立内核

    Mat grad_x, grad_y;
    Mat abs_grad_x, abs_grad_y, grad;

    filter2D(src, grad_x, CV_16S, Kernelx, Point(-1, -1));
    filter2D(src, grad_y, CV_16S, Kernely, Point(-1, -1));
    convertScaleAbs(grad_x, abs_grad_x);
    convertScaleAbs(grad_y, abs_grad_y);


    addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);

}

//Sobel核中已经融合进去了高斯平滑!核只可以为1 3 5 或7,其中1代表3*1的内核
void sobelProcess(Mat &src, Mat &dst)
{

    //dst.create(src.rows, src.cols, src.type());

    Mat grad_x, grad_y;
    Mat abs_grad_x, abs_grad_y, grad;

    //求X方向梯度
    //Sobel(src, grad_x, CV_16S, 1, 0, 3, 1, 0, BORDER_DEFAULT);
    //求Y方向梯度
    Sobel(src, grad_y, CV_16S, 0, 1, 3, 1, 0, BORDER_DEFAULT);
    //convertScaleAbs(grad_x, abs_grad_x);
    //imshow("X方向Sobel效果图", abs_grad_x);
    convertScaleAbs(grad_y, dst);
    //imshow("Y方向Sobel效果图", abs_grad_y);

    //合并梯度,用加权近似
    //addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);
    //imshow("合并梯度后的Sobel效果图", dst);
}

//Scharr滤波器基本同Sobel,但是比Sobel算子更精确计算图像差分梯度,但是核只能为3*3
void scharrProcess(Mat &src, Mat &dst)
{

    dst.create(src.rows, src.cols, src.type());

    Mat grad_x, grad_y;
    Mat abs_grad_x, abs_grad_y, grad;

    //求X方向梯度
    Scharr(src, grad_x, CV_16S, 1, 0, 1, 0, BORDER_DEFAULT);
    //求Y方向梯度
    Scharr(src, grad_y, CV_16S, 0, 1, 1, 0, BORDER_DEFAULT);
    convertScaleAbs(grad_x, abs_grad_x);
    //imshow("X方向Scharr效果图", abs_grad_x);
    convertScaleAbs(grad_y, abs_grad_y);
    //imshow("Y方向Scharr效果图", abs_grad_y);

    //合并梯度,用加权近似
    addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);
    //imshow("合并梯度后的Scharr效果图", dst);
}

//laplace二阶微分边缘检测,输入须为单通道8位灰度图像
void laplaceProcess(Mat &src, Mat &dst)
{

    dst.create(src.rows, src.cols, src.type());
    Mat graydst;
    //
    Laplacian(src, graydst, CV_16S, 3, 1, 0, BORDER_DEFAULT);

    //计算绝对值,并将结果转换成8位
    convertScaleAbs(graydst, dst);
    //imshow("图像Laplace变换检测边缘效果图", dst);

}

void cannyProcess(cv::Mat& img, cv::Mat& dst)//输入图像应为彩色图像!!!!!
{

    // 调用Canny函数,其是高斯去噪和Sobel的结合
    cv::Canny(img, dst, 80, 240,3);//低阈值用于边缘连接,高阈值控制什么像素值是强边缘,最后的设定值sobel孔径大小
    // 对像素进行翻转
    //cv::threshold(dst, dst, 128, 255, cv::THRESH_BINARY_INV);
}
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值