****应用opencv编程的参考实例(自写+参考)****
/*-----------------------------------------------------------------------------------------------------------*/
//****1. 读某路径图片显示并保存到另一路径****//
/*-----------------------------------------------------------------------------------------------------------*/
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include<iostream>
using namespace std;
using namespace cv;
int main()
{
Mat img;
img = imread("../libo_resource/test.bmp", CV_LOAD_IMAGE_COLOR);//CV_LOAD_IMAGE_COLOR值为1(默认)3通道, CV_LOAD_IMAGE_GRAYSCALE =0灰度单通道
//核实图像是否成功读入内存单元
if (!img.data)
{
cout << "No Image Data:图像数据未成功读入,请核实读入路径及图像信息" << endl;
return -1;
}
namedWindow("display window", CV_WINDOW_AUTOSIZE);//CV_WINDOW_AUTOSIZE=1默认,可以全屏,但是图像大小不变,不会插值适应全屏,CV_WINDOW_NORMAL = 0x00000000, 即可以全屏(自动插值)
imshow("display window", img);
imwrite("../libo_output/output.bmp", img);//资源保存在.cpp源文件所在目录的上一级目录的一个文件夹
waitKey(0);//waiting forever
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****2. OpenCV2从摄像头获取帧并写入视频文件****//
/*-----------------------------------------------------------------------------------------------------------*/
//注意的是,视频文件所在的路径需要存在,例如D:libo_output/output.avi这个目录需要存在。
//要正确打开视频文件,计算机必须安装有对应的解码器,否则VideoCapture没法理解视频格式
//经过测试发现,openCV不能正常读取mp4文件的总帧数和总帧率,opencv能正常读取AVI问件的总帧数和帧率
//显示中用waitKey,即方便控制退出显示循环,又使imshow不会因为两次之间时间过短而显示不出图像
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include<iostream>
#include <string>
using namespace std;
using namespace cv;
int main()
{
VideoCapture cap(0);
VideoWriter wri;
//将从设备或文件获得的帧写入指定的视频文件中
string outFile = "../libo_output/output.avi";
//获得帧的宽高并在控制台显示
int frameWidth, frameHeight;
frameWidth = static_cast<int>(cap.get(CV_CAP_PROP_FRAME_WIDTH));
frameHeight = static_cast<int>(cap.get(CV_CAP_PROP_FRAME_HEIGHT));
//cout << "总帧数:" << cap.get(CV_CAP_PROP_FRAME_COUNT)<<endl;//若读入为视频文件,可以输出视频文件总帧数
cout << "帧宽:" << frameWidth << "像素" << endl;
cout << "帧高:" << frameHeight << "像素" << endl;
Size frameSize(frameWidth, frameHeight); //Size(x,y)型,或用Size frameSize = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH),(int)cap.get(CV_CAP_PROP_FRAME_HEIGHT));
//获得设备或文件原始帧率,并在控制台显示
double frameRate = cap.get(CV_CAP_PROP_FPS);
cout << "原始帧率:" << frameRate << "fps" << endl;//****注意***:当读出帧率为0时,最好不作为wri.open()的帧率设置参数,可以手动设置一个double值
//打开要写入的视频文件,准备写入!编解码方式参数设置为-1,表示代码运行时会弹出对话框,手动选择编解码方式
//当读出帧率为0时,可改为wri.open(outFile, -1, 25.0, frameSize, true);25.0对应的形参影响生产的文件的播放速度
wri.open(outFile, -1, 10.0, frameSize, true);
//wri.open(outFile, -1, frameRate, frameSize, true);//true代表彩色输出!
if (!wri.isOpened())
{
cout << "写视频对象问件预打开操作失败" << endl;
return -2;
}
//打开摄像头或者视频文件
cap.open(0);//打开ID为0的摄像头文件,打开视频文件为,型如:cap.open("test_02.wmv");
//检验必不可少!
if (!cap.isOpened())
{
cout << "打开失败!请检查设备ID或读入文件路径";
return -1;
}
Mat frame;
namedWindow("USB设备图像采集", 1);//自适应设备图像大小
//在窗口显示图像和写入图像到之前打开的新文件!注意:显示帧率不同于设备帧率或者读入的视频文件原始帧率
bool stopDisplay = false;//是否继续显示的控制标志位,也可用if加break控制
int count = 0;
while (!stopDisplay)
{
if (!cap.read(frame))//尝试读取下一帧
break;//检验确实读到了图像数据到Mat对象的数据缓冲区,或用if(!frame.empty())
imshow("USB设备图像采集", frame);
//写入此帧到定义的视频文件
wri << frame;//或者wri.write(frame);
count++;
//退出while循环的开关控制
if (waitKey(40) > 0)//单击激活GUI活动窗口,按任意键,即满足if条件,终止循环
stopDisplay = true;
}
cout << "写入输出的视频文件总帧数:" << count << endl;
//释放对象
waitKey(0);
cap.release();
wri.release();
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****3. 应用OpenCV1.0从摄像头获取帧并写入视频文件和保存每帧图片****//
/*-----------------------------------------------------------------------------------------------------------*/
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;
#define OUTPUT_VIDEO 1 //是否保存读入的视频图像到指定文件夹
#define OUTPUT_FRAME_IMAGES 1 //是否单独保存每帧图像到制定文件夹
int main()
{
cvNamedWindow("图像Display", 1);//显示视频每帧图像窗口
CvCapture* pCapture = cvCreateCameraCapture(0);//-1表示采用默认ID号,从视频文件读入用CvCapture* pCapture = cvCaptureFromAVI("../libo_resource/test.avi");
//CvCapture* pCapture = cvCaptureFromCAM(-1);
//CvCapture* pCapture =cvCreateFileCapture("../libo_resource/test.avi");
//获得帧的宽高并在控制台显示
int frameWidth, frameHeight;
frameWidth = static_cast<int>(cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_WIDTH));
frameHeight = static_cast<int>(cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_HEIGHT));
cout << "帧宽:" << frameWidth << "像素" << endl;
cout << "帧高:" << frameHeight << "像素" << endl;
CvSize frameSize=cvSize(frameWidth,frameHeight); //Size(x,y)型,或用如下:
//Size frameSize = cvSize((int)cvGetCaptureProperty(pCapture,CV_CAP_PROP_FRAME_WIDTH),(int)cvGetCaptureProperty(pCapture,CV_CAP_PROP_FRAME_HEIGHT));
#if OUTPUT_VIDEO
CvVideoWriter* pWri = NULL;
pWri = cvCreateVideoWriter("../libo_output/output1.avi", -1, 25.0, frameSize,1);
#endif
double frameRate = cvGetCaptureProperty(pCapture, CV_CAP_PROP_FPS);//视频帧率
cout << "视频帧率:" << frameRate <<"fps"<< endl;
//cvSetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_WIDTH, 640);
//cvSetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_HEIGHT,480);
IplImage* pFrame = NULL;
int count = 0;
int count1 = 0;
#if OUTPUT_FRAME_IMAGES
char pathAndFileName[40];
#endif
while (1)
{
pFrame = cvQueryFrame(pCapture);
if (!pFrame)
break;
cvShowImage("图像Display", pFrame);
char c = cvWaitKey(60);
if (c >= 0)
break;
#if OUTPUT_VIDEO
cvWriteFrame(pWri, pFrame);//或者
count++;
#endif
#if OUTPUT_FRAME_IMAGES
count1++;
sprintf(pathAndFileName, "../libo_output/%05d.jpg", count1);// 用法示例:sprintf(image_name, "%s%d%s", "image", i++, ".bmp");//保存的图片名
cvSaveImage(pathAndFileName, pFrame);
#endif
//if (count == frameSum) break;//当读入视频文件而非设备时候,可用此句跳出循环
}
#if OUTPUT_VIDEO
cout << "写入输出的视频文件总帧数:" << count << endl;
#endif
cvWaitKey(0);
//释放对象
cvReleaseCapture(&pCapture);
#if OUTPUT_VIDEO
cvReleaseVideoWriter(&pWri);
#endif
//cvReleaseImage(&pFrame);
cvDestroyWindow("图像Display");
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****4. 函数段模块化编程——应用OpenCV2.0从摄像头获取每一帧并应用sobel算子处理保存每一帧结果****//
/*-----------------------------------------------------------------------------------------------------------*/
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
#include <string>
using namespace std;
using namespace cv;
int frameSum;//记录视频的总帧数
int fps;//记录视频的每秒的帧数
int frameW;//视频的宽度
int frameH;//视频的高度
//读入的需为三通道的彩色图像,每次处理一张图片并将结果输出到指定位置
int ImageToSobel(IplImage* img, string filePathName)
{
Mat src(img, 0);//0代表共用数据区
char* window_name = "Sobel Demo - Simple Edge Detector";
int scale = 1;
int delta = 0;
int ddepth = CV_16S;//防止运算结果溢出
if (!src.data){ return -1; }
GaussianBlur(src, src, Size(3, 3), 0, 0, BORDER_DEFAULT);
/// 转换为灰度图
Mat src_gray;
cvtColor(src, src_gray, CV_RGB2GRAY);
/// 创建显示窗口
namedWindow(window_name, CV_WINDOW_AUTOSIZE);
/// 创建 grad_x 和 grad_y 矩阵
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
//用核为3*3的模板处理,结果尺度化为0-255之间
/// 求 X方向梯度
//Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
Sobel(src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
/// 求Y方向梯度
//Scharr( src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT );
Sobel(src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_y, abs_grad_y);
/// 合并梯度(近似)
Mat gradImage;
addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, gradImage);
//显示图片
imshow(window_name, gradImage);
//保存图片
imwrite(filePathName, gradImage);
return 0;
}
void VideoToImage(char* filename, string filePathName)//filename是输入的avi视频文件路径,pathName为输出文件路径
{
string filePathName1 = filePathName;//备份输出路径
printf("------------- video to image ... ----------------\n");
//初始化一个视频文件捕捉器
CvCapture* capture = cvCaptureFromAVI(filename);
//获取视频信息
cvQueryFrame(capture);
frameH = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
frameW = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
fps = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
frameSum = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);
printf("\tvideo height : %d\n\tvideo width : %d \n\tfps : %d\n\tframe numbers : %d\n", frameH, frameW, fps, frameSum);
//定义和初始化变量
int i = 1;
IplImage* img = 0;
cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE);
//读取和显示
while (1)
{
img = cvQueryFrame(capture); //获取一帧图片
cvShowImage("mainWin", img); //将其显示
if (cvWaitKey(20) >= 0) break;
char imageName1[30];
sprintf(imageName1, "%s%05d%s", "image", i++, ".bmp");//保存的图片名
string imageName = imageName1;
filePathName = filePathName + imageName;
// cvSaveImage( image_name, img); //保存一帧图片
ImageToSobel(img, filePathName);
filePathName = filePathName1;
if (i == frameSum) break;
}
cvReleaseCapture(&capture);
cvDestroyWindow("mainWin");
}
int main()
{
char *fileName="../libo_resource/test.avi";
string filePathName = "../libo_output/";
VideoToImage(fileName, filePathName);
waitKey(0);
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****5. opencv1.0采集免驱UVC设备图像-截图单通道灰度图像 ****//
/*-----------------------------------------------------------------------------------------------------------*/
#include <opencv2/opencv.hpp>
#include <iomanip>//setfill,setw,setbase,setprecision等等,I/O流控制头文件,就像C里面的格式化输出一样,setw( n ) 设域宽为n个字符,setfill( 'c' ) 设填充字符为c。
//#include <opencv2/opencv.hpp>包含以下:
//#include <stdio.h>
//#include <iostream>
//#include <sstream>//继承自<iostream>可用stringstream str;
using namespace std;
using namespace cv;
int main()
{
IplImage* colorImg = NULL;
IplImage* grayImg = NULL;
int i = 0;
CvCapture* pCapture = cvCreateCameraCapture(0);//初始化摄像头,参数可以用0
if (NULL == pCapture)
{
fprintf(stderr, "Can't init Camera!\n");
return -1;
}
//cvSetCaptureProperty(cam, CV_CAP_PROP_FRAME_WIDTH, 640);//设置图像属性 宽和高
//cvSetCaptureProperty(cam, CV_CAP_PROP_FRAME_HEIGHT, 480);
cvNamedWindow("colorTest", CV_WINDOW_AUTOSIZE);
cvNamedWindow("grayTest", CV_WINDOW_AUTOSIZE);
int frameWidth, frameHeight;
frameWidth = static_cast<int>(cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_WIDTH));
frameHeight = static_cast<int>(cvGetCaptureProperty(pCapture, CV_CAP_PROP_FRAME_HEIGHT));
cout << "设备默认输出帧宽:" << frameWidth << "像素" << endl;
cout << "设备默认输出帧高:" << frameHeight << "像素" << endl;
//double frameRate = cvGetCaptureProperty(pCapture, CV_CAP_PROP_FPS);//视频帧率
//cout << "视频帧率:" << frameRate << "fps" << endl;
grayImg = cvCreateImage(cvSize(frameWidth, frameHeight), IPL_DEPTH_8U, 1);//注意:不分配确定通道和大小,cvCvtColor()会出错!
while (1)
{
colorImg = cvQueryFrame(pCapture);//获取下一帧
if (!colorImg)
{
fprintf(stderr, "Can't get a frame\n");
return -2;
}
cvCvtColor(colorImg, grayImg, CV_BGR2GRAY);
cvShowImage("colorTest", colorImg);
cvShowImage("grayTest", grayImg);
char key = cvWaitKey(33);
if (key == 27)
break;
if (key == 'c')
{
cout << "提取图像成功!………………" << endl;
std::stringstream str;
str << "F:\\img" << std::setw(2) << std::setfill('0') << i + 1 << ".jpg";
std::cout << "提取的图像保存路径及文件名" << str.str() << endl;//循环一次自动析构
Mat cameraPicture;
//读入图像
Mat frame(colorImg, false);//将C的IplImage结构转化为Mat结构,变量用于存储当前帧图像,false表示共用数据缓冲区
cvtColor(frame, cameraPicture, CV_BGR2GRAY);
imwrite(str.str(), cameraPicture);//保存的是从硬件得到的源格式图像
imshow("截取图像显示", cameraPicture);
i = i + 1;
}
}
cvWaitKey(0);
cvReleaseImage(&colorImg);
cvReleaseImage(&grayImg);
cvDestroyWindow("colorTest");
cvDestroyWindow("grayTest");
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****6. opencv2采集免驱UVC设备图像-截图单通道灰度图像****//
/*-----------------------------------------------------------------------------------------------------------*/
#include<opencv2/opencv.hpp>
#include <iomanip>
using namespace std;
using namespace cv;
int main()
{
VideoCapture capture(0);
Mat frame, grayImage;
int i = 0;
while (waitKey(30) != 27)
{
capture >> frame;
cvtColor(frame, grayImage, CV_BGR2GRAY);//grayImage为单通道灰度图像
imshow("【摄像头彩色图】", frame);
imshow("【摄像头灰度图】", grayImage);
char key = cvWaitKey(1);
if (key == 'c')
{
cout << "提取图像成功!………………" << endl;
std::stringstream str;
str << "F:\\img" << std::setw(2) << std::setfill('0') << i + 1 << ".jpg";
std::cout << "提取的图像保存路径及文件名" << str.str() << endl;
imwrite(str.str(), grayImage);//
i = i + 1;
}
}
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****7. 应用OpenCV1.0和videoInput库采集摄像头图像并有保存单通道灰度图像功能(需配置videoInput库)****//
/*-----------------------------------------------------------------------------------------------------------*/
/*
注:
如果采集卡为天敏的,对于视频源是S-VIDEO信号的设置采集卡输出的信号,应为:VI.setupDevice(device1, 320, 240, VI_S_VIDEO);
例程程序说明:
可显示图像采集设备默认输出图像的大小,并可以更改大小;
可设置视频制式如PAL、NTSC、SECAM(灰度图像);
可设置帧频:videoInput默认为30fps,在此程序设置为25fps;
*/
/*
videoInput含有更详细的成员函数使用信息和简单的使用例程:
VideoInput类内的公有成员函数在此介绍一下,该类还有个相关的类是VideoDevice。
控制是否在控制台输出信息开关setVerbose函数
打印出可用视频设备信息的函数listDevices
之后可以得到设备名函数getDeviceName
视频捕捉的回调函数设置函数setUseCallback
调整捕捉帧率的函数setIdealFramerate(默认30fps,可修改,但不能被保证准确,directshow会尝试一个邻近的帧率),
防止设备休眠重新连接的函数setAutoReconnectOnFreeze
开启设备函数setupDevice,在setpuDevice之前可以设置视频制式,
调用函数为setFormat
检测是否有新的帧函数isFrameNew
检测视频是否开启isDeviceSetup
获得数据的函数getPixels(注意这里获得的数据时uchar型的指针)
显示视频设置窗口函数showSettingsWindow
控制视频设置的相关函数有setVideoSettingFilter、setVideoSettingFilterPct、getVideoSettingFilter、setVideoSettingCamera、setVideoSettingCameraPct、getVideoSettingCamera,
获得视频宽高信息的函数有getWidth、getHeight、getSize,
停止设备函数stopDevice
重启设备函数restartDevice
*/
/*
配置videoInput库说明:
例程采用低版opencv编写,所用videoInput.h和videoInput.cpp来自作者GitHub网站上的工程文件,因为他的最新发布版videoInput0.1995的头文件只含有
videoInput.h而文件里对应的编译库示例程序对应的videoInput.cpp无"qedit.h",会有错误提示,所以采用GitHub网站上的工程文件,网站上的文件已经把
"qedit.h"对应的函数定义写在了videoInput.cpp中
*/
#include<opencv2/opencv.hpp>
#include <iostream>
#include <string>
#include <vector>
#include <iomanip>
#include "videoInput.h"
using namespace cv;
using namespace std;
int main(int argc, char ** argv)
{
// 为读出图像分配480*640的图像缓冲区
IplImage *p_iplFrame = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);
Mat cameraPicture;//这个变量很重要,用于显示高版本openCV的mat类型的3通道灰度图像
// 创建视频捕获对象
videoInput VI;
int i = 0;
int numDevices = VI.listDevices();//
cout << "一共有捕获设备" << numDevices << "个" << endl;
//VI.getDeviceName(int deviceID);//需要输出设备名字时可以用它进行显示!!!返回对应设备编号的字符串!
/*
获取所有摄像头的名称来输出显示:
其实不用自己写,内置程序含有此部分代码,DOS会输出设备名称
for (int i = 0; i < numDevices; i++)
{
char *camera_name = NULL;
camera_name = (char*)videoInput::getDeviceName(i);//因为getDeviceName是静态成员函数,可以不用实例进行调用
if (camera_name > 0)
printf("Camera %d'的 Name is '%s'.\n", i, camera_name);
else
printf("Can not get Camera #%d's name.\n", i);
}
*/
if(numDevices==0)
return -1;
system("pause");//停止程序运行,查看DOS窗口输出显示有多少个捕获设备,deviceID从0,1,2。。
//设置要捕获的设备参数
int device1 = 0; //this could be any deviceID that shows up in listDevices
//turns off console messages - default is to print messages
//VI.setVerbose(bool _verbose);//可设置为不输出调试信息
//if you want to capture at a different frame rate (default is 30)
//设置帧率!!!specify it here, you are not guaranteed to get this fps though.
VI.setIdealFramerate(device1, 25);
//setup the device - there are a number of options:
//VI.setupDevice(device1); //setup the first device with the default settings
//VI.setupDevice(device1, VI_COMPOSITE); //or setup device with specific connection type
//VI.setupDevice(device1, 320, 240); //or setup device with specified video size
//VI.setupDevice(device1, 320, 240, VI_COMPOSITE); //or setup device with video size and connection type
/*设置从采集卡得到的信号类型
#define VI_COMPOSITE 0
#define VI_S_VIDEO 1
#define VI_TUNER 2
#define VI_USB 3
#define VI_1394 4
*/
//VI.setupDevice(device1, 320, 240, VI_S_VIDEO);对于视频源是S-VIDEO信号的要这样设置才能得到图像,而如CVBS的USB采集采用此设置不会出图像
VI.setupDevice(device1, 640, 480, VI_COMPOSITE);// AV复合信号接口输入的CVBS的USB采集图像采用此设置
//defines for formats这些视频制式有些会显示为彩色,有些显示为灰色,可以自由设置
/*#define VI_NTSC_M 0
#define VI_PAL_B 1
#define VI_PAL_D 2
#define VI_PAL_G 3
#define VI_PAL_H 4s
#define VI_PAL_I 5
#define VI_PAL_M 6
#define VI_PAL_N 7
#define VI_PAL_NC 8
#define VI_SECAM_B 9
#define VI_SECAM_D 10
#define VI_SECAM_G 11
#define VI_SECAM_H 12
#define VI_SECAM_K 13
#define VI_SECAM_K1 14
#define VI_SECAM_L 15
#define VI_NTSC_M_J 16
#define VI_NTSC_433 17*/
VI.setFormat(device1, VI_PAL_D); //if your card doesn't remember what format it should be
//call this with the appropriate format listed above
//NOTE: must be called after setupDevice!!!!!!!!!!!!!!!!!!!!As requested width and height can not always be accomodated
//make sure to check the size once the device is setup
//to get a settings dialog for the device该语句可以显示视频设置窗口,可以去掉
VI.showSettingsWindow(device1);
int width = VI.getWidth(device1);
int height = VI.getHeight(device1);
int size = VI.getSize(device1);
//看DOS窗口输出显示捕获设备的分辨率和数据所占内存信息
cout << "该设备图像分辨率被设置后的输出是" << "宽width:" << width << "高height:" << height << endl;
cout << "该设备图像大小被设置后数据所需缓冲区的总字节数(uchar):" << size << "个" << endl;
system("pause");
//***************************************************************************************************
unsigned char * yourBuffer1 = new unsigned char[size];
while (1)
{
if (VI.isFrameNew(device1))//帧更新了,则获取新的一帧
{
//Returns the pixels - flipRedAndBlue toggles RGB/BGR flipping - and you can flip the image too
//类成员函数unsigned char * getPixels(int deviceID, bool flipRedAndBlue = true, bool flipImage = false);
//Or pass in a buffer for getPixels to fill returns true if successful.
//bool getPixels(int id, unsigned char * pixels, bool flipRedAndBlue = true, bool flipImage = false);
VI.getPixels(device1, yourBuffer1, false, true); //false代表fills pixels as a BGR (for openCV) unsigned char array ,true代表翻转为上下翻转
p_iplFrame->imageData = (char *)yourBuffer1;
//cvFlip(p_iplFrame, NULL, 1); //flip_mode = 0 沿X-轴翻转即上下翻转, flip_mode > 0 (如 1) 沿Y-轴翻转即左右翻转, flip_mode < 0 (如 -1) 沿X-轴和Y-轴翻转(即关于原点对称)的翻转.
//读入图像
Mat frame(p_iplFrame, false);//将C的IplImage结构转化为Mat结构,变量用于存储当前帧图像,false表示共用数据缓冲区
cvtColor(frame, cameraPicture, CV_BGR2GRAY);
imshow("Mat类型原图像", frame);
imshow("原图像对应的灰度图像", cameraPicture);
}
char key = cvWaitKey(33);
if (key == 27)
break;
if (key == 'c')
{
cout << "提取图像成功!………………" << endl;
std::stringstream str;
str << "F:\\img" << std::setw(2) << std::setfill('0') << i + 1 << ".jpg";
std::cout << "提取的图像保存路径及文件名" << str.str() << endl;
imwrite(str.str(), cameraPicture);//
i = i + 1;
}
}
//Shut down devices properly
cvReleaseImage(&p_iplFrame);
VI.stopDevice(device1);
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****8. 视频文件流的读取与处理,以封装canny算法为例 ****//
/*-----------------------------------------------------------------------------------------------------------*/
//注:要正确打开视频文件,计算机中必须安装有对应的解码器,否则cv::VideoCapture无法理解视频格式!
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace cv;
using namespace std;
void frameCanny(cv::Mat& img, cv::Mat& out)//输入图像应为彩色图像!!!!!
{
// 先要把每帧图像转化为灰度图
cv::cvtColor(img, out, CV_BGR2GRAY);
// 调用Canny函数
cv::Canny(out, out, 100, 200);
// 对像素进行翻转
cv::threshold(out, out, 128, 255, cv::THRESH_BINARY_INV);
}
int main()
{
// 读取视频流
cv::VideoCapture capture("../libo_resource/test.avi");
// 检测视频是否读取成功
if (!capture.isOpened())
{
cout<< " Open Video,Failed!"<<endl;
return -1;
}
// 获取图像帧率
double rate = capture.get(CV_CAP_PROP_FPS);
cout << "视频原始帧率" <<rate<< endl;
Mat frame; // 当前视频帧
//Mat frameGray;
Mat outPut;
cv::namedWindow("Extracted Frame",1);
// 每一帧之间的延迟
int delay = 1000 / rate;
bool stop(false);//循环控制标志位
// 遍历每一帧
while (!stop)
{
// 尝试读取下一帧
if (!capture.read(frame))
break;
frameCanny(frame, outPut);
imshow("Extracted Frame", frame);
imshow("Processed Frame", outPut);
// 引入延迟
if (cv::waitKey(delay) >= 0)
stop = true;
}
waitKey(0);
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****9. C++自编类封装OpenCV,实现视频文件流的读取与处理,以封装canny算法为例。 ****//
/*-----------------------------------------------------------------------------------------------------------*/
//说明:该程序需要配置自编库中的videoProcessor.h及videoProcessor.cpp及frameProcessor.h!!!!!!!!
//注:要正确打开视频文件,计算机中必须安装有对应的解码器,否则cv::VideoCapture无法理解视频格式!
//处理视频帧,为了对视频的每一帧进行处理,这里创建自己的类VideoProcessor,其中封装了OpenCV的视频获取框架,该类允许我们指定每帧调用的处理函数,可对设备或者视频或者自定义的图像序列进行处理。
//改自:http://blog.csdn.net/liyuefeilong/article/details/44066097
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
#include "videoProcessor.h"
using namespace cv;
using namespace std;
void frameCanny(cv::Mat& img, cv::Mat& out)//输入图像应为彩色图像!!!!!
{
// 先要把每帧图像转化为灰度图
cv::cvtColor(img, out, CV_BGR2GRAY);
// 调用Canny函数
cv::Canny(out, out, 100, 200);
// 对像素进行翻转
cv::threshold(out, out, 128, 255, cv::THRESH_BINARY_INV);
}
int main()
{
// 首先创建实例
VideoProcessor processor;
// 打开视频文件
processor.setInput("../libo_resource/test.avi");//此为以打开视频文件为例
// 声明显示窗口
// 分别为输入和输出视频
processor.displayInput("Input Video");
processor.displayOutput("Output Video");
// 以原始帧率播放视频
processor.setDelay(1000. / processor.getFrameRate());//此处这个表达式含getFrameRate(),所以只能用于视频文件,当处理的是设备或者序列图像的帧时最好不能用getFrameRate()
// 设置处理回调函数
processor.setFrameProcessor(frameCanny);//表示采用在main文件中按一定格式自编的封装canny算法的函数!也可用应用frameProcessor的成员函数方法不过需定义好帧处理算法
// 开始帧处理过程
processor.run();//该类的run()结果只是显示,并没有保存处理的结果,要实现此功能可加入相应代码即可!!
cv::waitKey();
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****10. OpenCV2.0显示视频流的帧率 ****//
/*-----------------------------------------------------------------------------------------------------------*/
//说明:这段程序可以大致测试出视频处理算法的时间消耗。主要过程是先得到每帧之间的时间,再用putText把FPS的数值显示到屏幕上。
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat frame;
//可从摄像头输入视频流或直接播放视频文件
//VideoCapture capture(0);
VideoCapture capture("../libo_resource/test.avi");
if (!capture.isOpened())
{
std::cout << "No Video Input!" << std::endl;
system("pause");
return -1;
}
//获得帧的宽高并在控制台显示
int frameWidth, frameHeight;
frameWidth = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
frameHeight = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));
//cout << "总帧数:" << cap.get(CV_CAP_PROP_FRAME_COUNT)<<endl;//若读入为视频文件,可以输出视频文件总帧数
cout << "帧宽:" << frameWidth << "像素" << endl;
cout << "帧高:" << frameHeight << "像素" << endl;
int originalFrameSum = capture.get(CV_CAP_PROP_FRAME_COUNT);
cout << "原始视频文件的总帧数为:" << originalFrameSum << endl;
double originalFPS = capture.get(CV_CAP_PROP_FPS);
cout << "原始视频文件的帧率为:" << originalFPS << endl;
double fps;
char fpsString[10];//用于存放帧率的字符串
namedWindow("Video_FPS估计", 1);
double t = 0;
int delay = 1000 / originalFPS;
while (true)
{
t = (double)getTickCount();
if (!capture.read(frame))//尝试读取下一帧
break;//检验确实读到了图像数据到Mat对象的数据缓冲区,或用if(!frame.empty())
if (waitKey(delay) >= 0)
break;
// getTickcount函数:返回从操作系统启动到当前所经过的时钟脉冲数
// getTickFrequency函数:返回每秒的计时周期内脉冲个数数
// t为该处代码执行所耗的时间,单位为秒,fps为其倒数
t = ((double)getTickCount() - t) / getTickFrequency();
fps = 1.0 / t;
//用一下更新字符串内容的方式不如用 stringstream strStream;
sprintf(fpsString, "%.2f", fps); // 帧率保留两位小数
std::string fpsString1("Video FPS:");//此句语句不能在循环体外!!!!!!
fpsString1 += fpsString; // 在"FPS:"后加入帧率数值字符串
// 将帧率信息写在输出帧上
putText(frame, // 图像矩阵
fpsString1, // string型文字内容
cv::Point(50, 50), // 文字坐标,以左下角为原点
cv::FONT_HERSHEY_SIMPLEX, // 字体类型
1, // 字体大小
cv::Scalar(0, 0, 255), // 字体颜色:黑色
4); // 加粗
cv::imshow("Video_FPS估计", frame);//注意:要想在图像上显示出文本,这个imshow必须在putText之后!!!!
//fpsString1.clear();
}
waitKey(0);
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****11. opencv关于键盘的操纵实例 ****//
/*-----------------------------------------------------------------------------------------------------------*/
//说明:
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
int c;
int exitFlag = 0;
namedWindow("激活waitKey", 1);
while (!exitFlag)
{
c = waitKey(0);//opencv的函数
switch (c)
{
case 'w':
printf("w \n");
break;
case 'a':
printf("a \n");
break;
case 's':
printf("s \n");
break;
case 'd':
printf("d \n");
break;
case 'q':
printf("exit\n");
exitFlag = 1;
break;
default:
printf("c:%d\n", c);
}
}
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****12. opencv关于鼠标的操纵实例 ****//
/*-----------------------------------------------------------------------------------------------------------*/
//说明:对应的event根据鼠标不同操作返回的对应的值,每个值的对应意义如下:
/*
CV_EVENT_MOUSEMOVE =0, //鼠标移动
CV_EVENT_LBUTTONDOWN =1, //鼠标左键按下
CV_EVENT_RBUTTONDOWN =2, //鼠标右键按下
CV_EVENT_MBUTTONDOWN =3, //鼠标中键按下
CV_EVENT_LBUTTONUP =4, //鼠标左键弹起
CV_EVENT_RBUTTONUP =5, //鼠标右键弹起
CV_EVENT_MBUTTONUP =6, //鼠标中键弹起
CV_EVENT_LBUTTONDBLCLK =7, //鼠标左键双击
CV_EVENT_RBUTTONDBLCLK =8, //鼠标右键双击
CV_EVENT_MBUTTONDBLCLK =9 //鼠标中键双击
*/
//x,y对应的为鼠标当前坐标。
//flag描述鼠标的拖拽事件,如下:
/*需要注意一些组合:比如按下键值CTRL的同时按下鼠标左键拖拽,flag就变成了:9=1+8。如果是按下CTRL+鼠标右键则变成:10=8+2。
CV_EVENT_FLAG_LBUTTON =1, //鼠标左键拖拽(按下鼠标左键,然后在窗口中拖动鼠标)
CV_EVENT_FLAG_RBUTTON =2, //鼠标右键拖拽
CV_EVENT_FLAG_MBUTTON =4, //鼠标中键拖拽
CV_EVENT_FLAG_CTRLKEY =8, //按下键值CTRL不放
CV_EVENT_FLAG_SHIFTKEY =16, //按下键值SHIFT不放
CV_EVENT_FLAG_ALTKEY =32 //按下键值ALT不放
*/
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
using namespace cv;
//相应鼠标的响应函数
void on_mouse(int event, int x, int y, int flags, void* ustc)
{
char *temp = (char *)ustc;
printf("temp:%s, x坐标:%d, y坐标:%d, event:%d, flags:%d\n", temp,x,y, event, flags);
}
int main(){
char str[4] = "tao";
cv::Mat src = cv::imread("../libo_resource/test.bmp", 0);//读入单通道灰度图像
cv::imshow("src", src);
cvSetMouseCallback("src", on_mouse, str);
cv::waitKey(0);
cv::waitKey(0);
cvDestroyAllWindows();
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****13. opencv关于鼠标响应的具体实例 ****//
/*-----------------------------------------------------------------------------------------------------------*/
//在图像上画一个根据鼠标拖拽而变化的矩形,然后按下键值‘C’之后,将对应被画了矩形的位置复制为新图片显示;按下'q'键,退出程序。
//说明:对应的event根据鼠标不同操作返回的对应的值,每个值的对应意义如下:
/*
CV_EVENT_MOUSEMOVE =0, //鼠标移动
CV_EVENT_LBUTTONDOWN =1, //鼠标左键按下
CV_EVENT_RBUTTONDOWN =2, //鼠标右键按下
CV_EVENT_MBUTTONDOWN =3, //鼠标中键按下
CV_EVENT_LBUTTONUP =4, //鼠标左键弹起
CV_EVENT_RBUTTONUP =5, //鼠标右键弹起
CV_EVENT_MBUTTONUP =6, //鼠标中键弹起
CV_EVENT_LBUTTONDBLCLK =7, //鼠标左键双击
CV_EVENT_RBUTTONDBLCLK =8, //鼠标右键双击
CV_EVENT_MBUTTONDBLCLK =9 //鼠标中键双击
*/
//x,y对应的为鼠标当前坐标。
//flag描述鼠标的拖拽事件,如下:
/*需要注意一些组合:比如按下键值CTRL的同时按下鼠标左键拖拽,flag就变成了:9=1+8。如果是按下CTRL+鼠标右键则变成:10=8+2。
CV_EVENT_FLAG_LBUTTON =1, //鼠标左键拖拽(按下鼠标左键,然后在窗口中拖动鼠标)
CV_EVENT_FLAG_RBUTTON =2, //鼠标右键拖拽
CV_EVENT_FLAG_MBUTTON =4, //鼠标中键拖拽
CV_EVENT_FLAG_CTRLKEY =8, //按下键值CTRL不放
CV_EVENT_FLAG_SHIFTKEY =16, //按下键值SHIFT不放
CV_EVENT_FLAG_ALTKEY =32 //按下键值ALT不放
*/
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
using namespace cv;
Mat srcProcessed;
Mat srcImage;
int pic_info[4];
//相应鼠标的响应函数
void on_mouse(int event, int x, int y, int flags, void* ustc)
{
if (event == CV_EVENT_LBUTTONDOWN)
{
pic_info[0] = x; /*width1*/
pic_info[1] = y; /*height1*/
pic_info[2] = 0; /*width2*/
pic_info[3] = 0; /*height2*/
}
else if (event == CV_EVENT_LBUTTONUP)
{
pic_info[2] = x;
pic_info[3] = y;
//src = cv::imread(".. / libo_resource / test.bmp", 1);
rectangle(srcProcessed , cvPoint(pic_info[0], pic_info[1]), cvPoint(pic_info[2], pic_info[3]), cvScalar(255, 0, 0), 2);
imshow("src", srcProcessed);
}
if (flags == CV_EVENT_FLAG_LBUTTON)
{
pic_info[2] = x;
pic_info[3] = y;
rectangle(srcProcessed, cvPoint(pic_info[0], pic_info[1]), cvPoint(pic_info[2], pic_info[3]), cvScalar(255, 0, 0), 2);
imshow("src", srcProcessed);
}
printf("%d %d %d %d\n", pic_info[0], pic_info[1], pic_info[2], pic_info[3]);
}
int main()
{
bool exitFlag = false;
char c;
cv::Mat imageROI;
srcProcessed = imread("../libo_resource/test.bmp", 1);
srcImage = srcProcessed.clone();
imshow("src", srcProcessed);
cvSetMouseCallback("src", on_mouse, NULL);
namedWindow("ROI", 1);
while (!exitFlag)
{
c = waitKey(0);
if (c == 'c')
{
if ((pic_info[0] != 0) && (pic_info[1] != 0) && (pic_info[2] != 0) && (pic_info[3] != 0))
{
resizeWindow("ROI", pic_info[2] - pic_info[0], pic_info[3] - pic_info[1]);
imageROI = srcImage(Rect(pic_info[0], pic_info[1], pic_info[2] - pic_info[0], pic_info[3] - pic_info[1]));
cv::imshow("ROI", imageROI);
}
}
else if (c == 'q')
{
exitFlag = true;
}
}
cvDestroyAllWindows();
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****14. opencv-视频处理--鼠标画线(越线、拌线)检测简例(含两种方法) ****//
/*-----------------------------------------------------------------------------------------------------------*/
//视频处理中,经常有做一些行人、车辆或者其它运动物体越线检测,越界检测。两种方法:
//第一种:固定第一帧,或者暂停视频,在固定的一帧中完成画直线的功能
//第二种:不影响视频播放的情况下,完成画直线的功能
//
//说明:对应的event根据鼠标不同操作返回的对应的值,每个值的对应意义如下:
/*
CV_EVENT_MOUSEMOVE =0, //鼠标移动
CV_EVENT_LBUTTONDOWN =1, //鼠标左键按下
CV_EVENT_RBUTTONDOWN =2, //鼠标右键按下
CV_EVENT_MBUTTONDOWN =3, //鼠标中键按下
CV_EVENT_LBUTTONUP =4, //鼠标左键弹起
CV_EVENT_RBUTTONUP =5, //鼠标右键弹起
CV_EVENT_MBUTTONUP =6, //鼠标中键弹起
CV_EVENT_LBUTTONDBLCLK =7, //鼠标左键双击
CV_EVENT_RBUTTONDBLCLK =8, //鼠标右键双击
CV_EVENT_MBUTTONDBLCLK =9 //鼠标中键双击
*/
//x,y对应的为鼠标当前坐标。
//flag描述鼠标的拖拽事件,如下:
/*需要注意一些组合:比如按下键值CTRL的同时按下鼠标左键拖拽,flag就变成了:9=1+8。如果是按下CTRL+鼠标右键则变成:10=8+2。
CV_EVENT_FLAG_LBUTTON =1, //鼠标左键拖拽(按下鼠标左键,然后在窗口中拖动鼠标)
CV_EVENT_FLAG_RBUTTON =2, //鼠标右键拖拽
CV_EVENT_FLAG_MBUTTON =4, //鼠标中键拖拽
CV_EVENT_FLAG_CTRLKEY =8, //按下键值CTRL不放
CV_EVENT_FLAG_SHIFTKEY =16, //按下键值SHIFT不放
CV_EVENT_FLAG_ALTKEY =32 //按下键值ALT不放
*/
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
using namespace cv;
/*--------------------------------定义全局变量--------------------------*/
bool got_line = false;//画线操作的控制标志,画线结束时,此值变为1
//全局变量
Point beginPoint =NULL;
Point endPoint =NULL;
/*--------------------------------定义鼠标事件--画直线--------------------------*/
void mouseLineHandler(int event, int x, int y, int flags, void *param)
{
switch (event)
{
case CV_EVENT_LBUTTONDOWN:
beginPoint = Point(x, y);
break;
case CV_EVENT_LBUTTONUP:
got_line = true;
endPoint = Point(x, y);
break;
default:
break;
}
switch (flags)
{
case CV_EVENT_FLAG_LBUTTON:
endPoint = Point(x, y);
break;
default:
break;
}
}
int main( )
{
//读取视频
VideoCapture videoCapture("../libo_resource/test.avi");
//判断视频是否打开
if (!videoCapture.isOpened())
return 0;
//视频中的第一帧
Mat firstFrame;
Mat frame;
//读取视频的第一帧
videoCapture >> frame;
//复制到firstFrame中
frame.copyTo(firstFrame);
//register
namedWindow("video", 1);
setMouseCallback("video", mouseLineHandler, NULL);
//画线------第一种方法
while (!got_line)
{
firstFrame.copyTo(frame);
line(frame, beginPoint, endPoint, Scalar(255, 0, 0), 2);
imshow("video", frame);
if (waitKey(50) == 'q')//---------很重要
break;
}
//当为第二种方法时,可将上述的while体改为:
/*
for(;;)
{
videoCapture>>frame;
if (frame.empty())
break;
line(frame,beginPoint,endPoint,Scalar(0,255,255),2);
imshow("video",frame);
if(got_line)
break;
if(waitKey(50)=='q')
break;
}
*/
//remove callback
setMouseCallback("video", NULL, NULL);//注销鼠标事件回调的响应操作
//视频继续
for (;;)//也可以用while(1)
{
videoCapture >> frame;
if (frame.empty())
break;
line(frame, beginPoint, endPoint, Scalar(255, 255, 0), 2);
imshow("video", frame);
if (waitKey(33) == 'q')
break;
}
waitKey(0);
return 0;
}
/*-----------------------------------------------------------------------------------------------------------*/
//****15.opencv来模拟景深特性,应用鼠标响应和roi操作 ****//
/*-----------------------------------------------------------------------------------------------------------*/
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
using namespace cv;
int width, height;
int pic_info[3];
char pic_name[20];
Mat mat1;
IplImage src, *res, *roi;
void mySmooth(Mat mat1, Mat mat2, int flag, int width, int height){
IplImage pI1 = mat1;
IplImage pI2 = mat2;
cvSmooth(&pI1, &pI2, flag, width, height);
}
void mypic_merge(IplImage* src, IplImage* res, IplImage* dst){
CvScalar s;
int height = src->height;
int width = src->width;
int i, j;
for (i = 0; i<height; i++){
for (j = 0; j<width; j++){
s = cvGet2D(res, i, j);
if ((s.val[0] == 0) && (s.val[1] == 0) && (s.val[2] == 0)){
s = cvGet2D(src, i, j);
}
cvSet2D(dst, i, j, s);
}
}
}
void on_mouse(int event, int x, int y, int flags, void* ustc)
{
if (event == CV_EVENT_LBUTTONDOWN){
pic_info[0] = x; /*width1*/
pic_info[1] = y; /*height1*/
pic_info[2] = 0; /*width2*/
//mat1 = cv::imread(pic_name, 1);
//cv::imshow("1", mat1);
}
if (flags == CV_EVENT_FLAG_LBUTTON){
x = abs(x - pic_info[0]);
y = abs(y - pic_info[1]);
pic_info[2] = (int)sqrt((x*x + y*y));
//mat1 = cv::imread(pic_name, 1);
cout << "x:" << x << " ,y:" << y << endl;
}
if (event == CV_EVENT_LBUTTONUP){
src = mat1;
res = cvCreateImage(cvGetSize(&src), 8, 3);
roi = cvCreateImage(cvGetSize(&src), 8, 1);
/**************取出圆形感兴趣区域**********/
cvZero(roi);
cvZero(res);
cvCircle(roi, cvPoint(pic_info[0], pic_info[1]), pic_info[2], CV_RGB(255, 255, 255), -1, 8, 0);
cvAnd(&src, &src, res, roi);//去src中与roi对应的区域,并将src对应区域的像素赋给res对应的roi同样的位置
/******************************************/
mySmooth(mat1, mat1, CV_GAUSSIAN, 23, 23);
mypic_merge(&src, res, res);//将模糊化和roi区的非模糊化图像合成
cvNamedWindow("2", 1);
cvShowImage("2", res);
printf("circle:%d\n", pic_info[2]);
}
}
int main(){
mat1 = imread("../libo_resource/test.bmp", 1);
width = mat1.rows;
height = mat1.cols;
imshow("1", mat1);
cvSetMouseCallback("1", on_mouse, NULL);
cv::waitKey(0);
return 0;
}
opencv学习系列:实例练习,含多个工程实例
最新推荐文章于 2024-09-26 23:00:36 发布