帧差法是背景减图法中的一种,只不过是帧差法不需要建模,因为它的背景模型就是上一帧的图,所以速度非常快,另外帧差法对缓慢变换的光照不是很敏感,所以其用途还是有的,有不少学者对其做出了出色的改进。
其基本原理可以用下面公式看出:
|i(t)-i(t-1)|<T 背景
|i(t)-i(t-1)|>=T 前景
其中i(t),i(t-1)分别为t,t-1时刻对应像素点的像素值,T为阈值。
当然其缺点也不少,容易出现”双影”和”空洞”现象。
// frame_diff.cpp : 定义控制台应用程序的入口点。
//
#include "stdafx.h"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#define threshold_diff 20 //设置简单帧差法阈值
using namespace cv;
using namespace std;
int main(int argc,unsigned char* argv[])
{
Mat img_src1,img_src2,img_dst,gray1,gray2,gray_diff;
bool pause=false;
VideoCapture vido_file("IndoorGTTest1.avi");//在这里改相应的文件名
namedWindow("foreground",0);
for (;;)
{
if(!pause)
{
vido_file >>img_src1; //因为视频文件帧数已经固定了,所以每次到这句语句都是读取相邻的帧数,没到时间视频并不向前走
cvtColor(img_src1,gray1,CV_BGR2GRAY);
imshow("video_src",img_src1);//可以事先不用新建一个窗口
waitKey(5);
vido_file >>img_src2;
cvtColor(img_src2,gray2,CV_BGR2GRAY);
imshow("video_src",img_src2);//可以事先不用新建一个窗口
waitKey(5);
subtract(gray1,gray2,gray_diff);
for(int i=0;i<gray_diff.rows;i++)
for(int j=0;j<gray_diff.cols;j++)
if(abs(gray_diff.at<unsigned char>(i,j))>=threshold_diff)//这里模板参数一定要用unsigned char,否则就一直报错
gray_diff.at<unsigned char>(i,j)=255;
else gray_diff.at<unsigned char>(i,j)=0;
imshow("foreground",gray_diff);
}
char c=(char)waitKey(10);
if (c==27)
{
break;
}
if(c==' ')
pause=!pause;
}
return 0;
}
三帧差法的具体算法如下。
提取连续的三帧图像,I(k-1),I(k),I(k+1) 。
(1) d(k,k-1) [x,y] = | I(k)[x,y] - I(k-1)[x,y] |;
d(k,k+1)[x,y] = | I(k+1)[x,y] - I(k)[x,y] |;
(2) b(k,k-1)[x,y] = 1; if d(k,k-1) [x,y] >= T;
b(k,k-1)[x,y] = 0; if d(k,k-1) [x,y] < T;
b(k+1,k)[x,y] = 1 if d(k+1,k) [x,y] >= T;
b(k+1,k)[x,y] = 0 if d(k+1,k) [x,y] < T;
(3) B(k)[x,y] = 1 ; if b(k,k-1)[x,y] && b(k+1,k)[x,y] == 1 ;
B(k)[x,y] = 0 ; if b(k,k-1)[x,y] && b(k+1,k)[x,y] ==0 ;
可以看出其“双影”和”空洞”比较明显。双影是由于帧差法有2个影子,在该试验中就是轮廓变得很粗,”空洞”是由于物体内部颜色相近,检测不出来。当然帧差法还有个致命的缺点那就是阈值T需要人工设定。
对于帧差法的”双影”现象,有人提出来了三帧差法。其原理如下所示:
1. 由i(t)-i(t-1)得到前景图 F1
2. 由i(t+1)-i(t)得到前景图 F2
3. F1 ∩ F2得到前景图 F3
4. 形态学处理
也就是利用2次相邻帧的差,然后去与操作,就得到了真正的那个影子了。
这个在一定程度上可以解决”双影”现象。
// frame_3diff.cpp : 定义控制台应用程序的入口点。
//
#include "stdafx.h"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#define threshold_diff1 10 //设置简单帧差法阈值
#define threshold_diff2 10 //设置简单帧差法阈值
using namespace cv;
using namespace std;
int main(int argc,unsigned char* argv[])
{
Mat img_src1,img_src2,img_src3;//3帧法需要3帧图片
Mat img_dst,gray1,gray2,gray3;
Mat gray_diff1,gray_diff2;//存储2次相减的图片
Mat gray;//用来显示前景的
bool pause=false;
VideoCapture vido_file("IndoorGTTest1.avi");//在这里改相应的文件名
namedWindow("foreground",0);
for (;;)
{
if(!false)
{
vido_file >>img_src1;
cvtColor(img_src1,gray1,CV_BGR2GRAY);
waitKey(5);
vido_file >>img_src2;
cvtColor(img_src2,gray2,CV_BGR2GRAY);
imshow("video_src",img_src2);//
waitKey(5);
vido_file >>img_src3;
cvtColor(img_src3,gray3,CV_BGR2GRAY);
subtract(gray2,gray1,gray_diff1);//第二帧减第一帧
subtract(gray3,gray2,gray_diff2);//第三帧减第二帧
for(int i=0;i<gray_diff1.rows;i++)
for(int j=0;j<gray_diff1.cols;j++)
{
if(abs(gray_diff1.at<unsigned char>(i,j))>=threshold_diff1)//这里模板参数一定要用unsigned char,否则就一直报错
gray_diff1.at<unsigned char>(i,j)=255; //第一次相减阈值处理
else gray_diff1.at<unsigned char>(i,j)=0;
if(abs(gray_diff2.at<unsigned char>(i,j))>=threshold_diff2)//第二次相减阈值处理
gray_diff2.at<unsigned char>(i,j)=255;
else gray_diff2.at<unsigned char>(i,j)=0;
}
bitwise_and(gray_diff1,gray_diff2,gray);
imshow("foreground",gray);
}
char c=(char)waitKey(10);
if (c==27)
{
break;
}
if(c==' ')
pause=!pause;//为什么暂停不了??
}
return 0;
}
帧差法也可以如下,得到的效果更好!
#include <opencv\cv.h>
#include <opencv\highgui.h>
using namespace std;
using namespace cv;
//our sensitivity value to be used in the absdiff() function
const static int SENSITIVITY_VALUE = 20;
//size of blur used to smooth the intensity image output from absdiff() function
const static int BLUR_SIZE = 10;
//we'll have just one object to search for
//and keep track of its position.
int theObject[2] = {0,0};
//bounding rectangle of the object, we will use the center of this as its position.
Rect objectBoundingRectangle = Rect(0,0,0,0);
//int to string helper function
string intToString(int number){
//this function has a number input and string output
std::stringstream ss;
ss << number;
return ss.str();
}
int main(){
//these two can be toggled by pressing 'd' or 't'
bool debugMode = true;
//pause and resume code
bool pause = false;
//set up the matrices that we will need
//the two frames we will be comparing
Mat frame1,frame2;
//their grayscale images (needed for absdiff() function)
Mat grayImage1,grayImage2;
//resulting difference image
Mat differenceImage;
//thresholded difference image (for use in findContours() function)
Mat thresholdImage;
//video capture object.
VideoCapture capture;
while(1){
//we can loop the video by re-opening the capture every time the video reaches its last frame
capture.open("video.avi");
if(!capture.isOpened()){
cout<<"ERROR ACQUIRING VIDEO FEED\n";
getchar();
return -1;
}
//check if the video has reach its last frame.
//we add '-1' because we are reading two frames from the video at a time.
//if this is not included, we get a memory error!
while(capture.get(CV_CAP_PROP_POS_FRAMES)<capture.get(CV_CAP_PROP_FRAME_COUNT)-1){
//read first frame
capture.read(frame1);
//convert frame1 to gray scale for frame differencing
cv::cvtColor(frame1,grayImage1,COLOR_BGR2GRAY);
//copy second frame
capture.read(frame2);
//convert frame2 to gray scale for frame differencing
cv::cvtColor(frame2,grayImage2,COLOR_BGR2GRAY);
//perform frame differencing with the sequential images. This will output an "intensity image"
//do not confuse this with a threshold image, we will need to perform thresholding afterwards.
cv::absdiff(grayImage1,grayImage2,differenceImage);
//threshold intensity image at a given sensitivity value
cv::threshold(differenceImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
if(debugMode==true){
//show the difference image and threshold image
cv::imshow("Difference Image",differenceImage);
cv::imshow("Threshold Image", thresholdImage);
}else{
//if not in debug mode, destroy the windows so we don't see them anymore
cv::destroyWindow("Difference Image");
cv::destroyWindow("Threshold Image");
}
//blur the image to get rid of the noise. This will output an intensity image
cv::blur(thresholdImage,thresholdImage,cv::Size(BLUR_SIZE,BLUR_SIZE));
//threshold again to obtain binary image from blur output
cv::threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
if(debugMode==true){
//show the threshold image after it's been "blurred"
imshow("Final Threshold Image",thresholdImage);
}
else {
//if not in debug mode, destroy the windows so we don't see them anymore
cv::destroyWindow("Final Threshold Image");
}
//if tracking enabled, search for contours in our thresholded image
//show our captured frame
imshow("Frame1",frame1);
//check to see if a button has been pressed.
//this 10ms delay is necessary for proper operation of this program
//if removed, frames will not have enough time to referesh and a blank
//image will appear.
waitKey(1);
}
//release the capture before re-opening and looping again.
capture.release();
}
return 0;
}
最后本文提供一个完整的源代码,来自的BGSLibrary库,实现帧差法。
具体如下。源码
效果如下:
三帧差法:
#include "highgui.h"
#include "cv.h"
void main()
{
int chen=0;
CvCapture* capture;
capture=cvCaptureFromFile("1.avi");//获取视频
//capture=cvCaptureFromFile("three.avi");//获取视频
//capture=cvCaptureFromFile("1.avi");//获取视频
cvNamedWindow("camera",CV_WINDOW_AUTOSIZE);
cvNamedWindow("moving area",CV_WINDOW_AUTOSIZE);
IplImage* tempFrame;//用于遍历capture中的帧,通道数为3,需要转化为单通道才可以处理
IplImage* tempFrame1;//保存差分结果
IplImage* tempFrame2;
IplImage* tempFrame3;//保存与的结果
IplImage* currentFrame;//当前帧
IplImage* previousFrame;//上一帧
IplImage* nextFrame;//上一帧
/*
CvMat结构,本质上和IplImage差不多,但是因为IplImage里的数据只能用uchar的形式存放,当需要这些图像数据看作数据矩阵来运算时,
0~255的精度显然满足不了要求; 然而CvMat里却可以存放任意通道数、任意格式的数据
*/
CvMat* tempFrameMat1;
CvMat* tempFrameMat2;
CvMat* tempFrameMat;
CvMat* currentFrameMat; //IplImage要转成CvMat进行处理
CvMat* previousFrameMat;
CvMat* nextFrameMat;
int frameNum=0;
while(tempFrame=cvQueryFrame(capture)) //开始读AVI文件 用来将下一帧视频文件载入内存呢
{
chen++;
if(chen==145)
{
chen=20;
}
frameNum++;
if(frameNum==1)
{
//第一帧先初始化各个结构,为它们分配空间
previousFrame=cvCreateImage(cvSize(tempFrame->width,tempFrame->height),IPL_DEPTH_8U,1); //前一帧
currentFrame=cvCreateImage(cvSize(tempFrame->width,tempFrame->height),IPL_DEPTH_8U,1); //当前帧
nextFrame=cvCreateImage(cvSize(tempFrame->width,tempFrame->height),IPL_DEPTH_8U,1);
tempFrame1=cvCreateImage(cvSize(tempFrame->width,tempFrame->height),IPL_DEPTH_8U,1);
tempFrame2=cvCreateImage(cvSize(tempFrame->width,tempFrame->height),IPL_DEPTH_8U,1);
currentFrameMat=cvCreateMat(tempFrame->height, tempFrame->width, CV_32FC1);
tempFrame3=cvCreateImage(cvSize(tempFrame->width,tempFrame->height),IPL_DEPTH_8U,1);
previousFrameMat=cvCreateMat(tempFrame->height, tempFrame->width, CV_32FC1);
tempFrameMat=cvCreateMat(tempFrame->height, tempFrame->width, CV_32FC1);
tempFrameMat1=cvCreateMat(tempFrame->height, tempFrame->width, CV_32FC1);
tempFrameMat2=cvCreateMat(tempFrame->height, tempFrame->width, CV_32FC1);
nextFrameMat=cvCreateMat(tempFrame->height, tempFrame->width, CV_32FC1);
//此时这些IplImage和CvMat都是空的,没有存有数据
cvCvtColor(tempFrame, previousFrame, CV_BGR2GRAY);
}
if(frameNum==2)
{
cvCvtColor(tempFrame, currentFrame, CV_BGR2GRAY);
}
if(frameNum>2)
{
if(frameNum == 3)
{
cvCvtColor(tempFrame, nextFrame,CV_BGR2GRAY); //转化为单通道灰度图,此时currentFrame已经存了tempFrame的内容
}
if(frameNum > 3)
{
cvCopy(currentFrame,previousFrame);
cvCopy(nextFrame,currentFrame);
cvCvtColor(tempFrame,nextFrame,CV_BGR2GRAY);
}
/*用cvConvert将IplImage转为CvMat,接下来用cvAbsDiff对它们处理
经过转换后,currentFrame没有改变,但是tempFrameMat已经存了currentFrame的内容
*/
cvConvert(currentFrame,currentFrameMat);
cvConvert(previousFrame,previousFrameMat);
cvConvert(nextFrame,nextFrameMat);
cvAbsDiff(currentFrameMat,previousFrameMat,tempFrameMat1);//做差求绝对值
cvAbsDiff(nextFrameMat,currentFrameMat,tempFrameMat2);//做差求绝对值
/*
在currentFrameMat中找大于50(阈值)的像素点,把currentFrame中对应的点设为255
此处阈值可以帮助把车辆的阴影消除掉
*/
cvThreshold(tempFrameMat1,tempFrame1,50,255.0,CV_THRESH_BINARY);
cvDilate(tempFrame1,tempFrame1);
cvErode(tempFrame1,tempFrame1);
cvThreshold(tempFrameMat2,tempFrame2,50,255.0,CV_THRESH_BINARY);
cvDilate(tempFrame2,tempFrame2);
cvErode(tempFrame2,tempFrame2);
cvAnd(tempFrame1,tempFrame2,tempFrame3);
//显示图像
cvShowImage("camera",tempFrame);
cvFlip(tempFrame3); //将图像显示为正
cvShowImage("moving area",tempFrame3);
}
//把当前帧保存作为下一次处理的前一帧
cvWaitKey(100);
}//end while
//释放资源
cvReleaseImage(&tempFrame);
//cvReleaseImage(&tempFrame1);
//cvReleaseImage(&tempFrame2);
//cvReleaseImage(&tempFrame3);
//cvReleaseImage(&previousFrame);
//cvReleaseImage(¤tFrame);
//cvReleaseImage(&nextFrame);
//tempFrame不用在这里释放
cvReleaseCapture(&capture);
/*
cvReleaseMat(&previousFrameMat);
cvReleaseMat(¤tFrameMat);
cvReleaseMat(&nextFrameMat);
cvReleaseMat(&tempFrameMat1);
cvReleaseMat(&tempFrameMat2);
cvReleaseMat(&tempFrameMat);
*/
cvDestroyWindow("camera");
cvDestroyWindow("moving area");
}