updateMotionHistory
void updateMotionHistory( InputArray silhouette, InputOutputArray mhi,
double timestamp, double duration );
参数:
silhouette - 具有运动发生的非零像素的轮廓蒙版,非零像素的位置代表有运动发生。
mhi - 由函数更新的运动历史图像(单通道,32位浮点)。
timestamp - 以毫秒或其他单位表示的当前时间。
duration - 运动轨迹的最大持续时间,设置运动历史像素保留在mhi中的时间长度。换句话说,mhi中比(timestamp -duration )更早之前的像素被设为0.
calcMotionGradient:
void calcMotionGradient( InputArray mhi, OutputArray mask,
OutputArray orientation,
double delta1, double delta2,
int apertureSize=3 );
mhi:是updateMotionHistory函数的输出结果;
OutputArray mask:单通道8位图像,非零项表示发现有效梯度的位置;
OutputArray orientation:浮点图像,给出每个点的梯度方向角度,以度为单位,范围0到360.
double delta1, double delta2,表示允许的最小,最大梯度幅度
int apertureSize=3,梯度运算符的宽度和高度。
segmentMotion
void segmentMotion(InputArray mhi, OutputArray segmask,
CV_OUT vector<Rect>& boundingRects,
double timestamp, double segThresh);
mhi:updateMotionHistory函数的输出
OutputArray segmask:单通道32位浮点图像,,各“片段”被标记在图上,其中每个片段被赋予不同的非零标记符(1,2,3.。。。0表示“无运动”)
opencv2中的segmentMotion内部调用的还是cvSegmentMotion这个函数
CvSeq* cvSegmentMotion( const CvArr* mhi, CvArr* seg_mask, CvMemStorage* storage,
double timestamp, double seg_thresh );
mhi 运动历史图像
seg_mask 发现应当存储的 mask 的图像, 单通道, 32bits, 浮点数.
storage 包含运动连通域序列的内存存储仓
timestamp 当前时间,毫秒单位
seg_thresh 分割阈值,推荐等于或大于运动历史“每步”之间的间隔。
函数 cvSegmentMotion 寻找所有的运动分割,并且在seg_mask 用不同的单独数字(1,2,...)标识它们。
返回一个具有 CvConnectedComp 结构的序列,其中每个结构对应一个运动部件。在这之后,每个运动部件的运动方向就可以被函数 cvCalcGlobalOrientation 利用提取的特定部件的掩模(mask)计算出来
calcGlobalOrientation:找到整体运动方向作为有效梯度方向的向量和
double calcGlobalOrientation( InputArray orientation, InputArray mask,
InputArray mhi, double timestamp,
double duration );
InputArray orientation:calcMotionGradient的输出
InputArray mask:calcMotionGradient的输出
InputArray mhi,:updateMotionHistory函数的输出结果
double timestamp:当前时间;
double duration :持续时间。
函数返回目标运动的方向。
实例:
#include<opencv2\video\tracking.hpp>
#include<opencv2\video\video.hpp>
#include<opencv2\highgui.hpp>
#include <opencv2\imgproc.hpp>
#include <time.h>
#include <stdio.h>
#include <ctype.h>
using namespace cv;
using namespace std;
static void help(void)
{
printf(
"\nThis program demonstrated the use of motion templates -- basically using the gradients\n"
"of thresholded layers of decaying frame differencing. New movements are stamped on top with floating system\n"
"time code and motions too old are thresholded away. This is the 'motion history file'. The program reads from the camera of your choice or from\n"
"a file. Gradients of motion history are used to detect direction of motion etc\n"
"Usage :\n"
"./motempl [camera number 0-n or file name, default is camera 0]\n"
);
}
// various tracking parameters (in seconds)
const double MHI_DURATION = 5;
const double MAX_TIME_DELTA = 0.5;
const double MIN_TIME_DELTA = 0.05;
// number of cyclic frame buffer used for motion detection
// (should, probably, depend on FPS)
// ring image buffer
vector<Mat> buf;
int last = 0;
// temporary images
Mat mhi, orient, mask, segmask, zplane;
vector<Rect> regions;
// parameters:
// img - input video frame
// dst - resultant motion picture
// args - optional parameters
static void update_mhi(const Mat& img, Mat& dst, int diff_threshold)
{
double timestamp = (double)clock() / CLOCKS_PER_SEC; // get current time in seconds
Size size = img.size();
int i, idx1 = last;
Rect comp_rect;
double count;
double angle;
Point center;
double magnitude;
Scalar color;
// allocate images at the beginning or
// reallocate them if the frame size is changed
if (mhi.size() != size)
{
mhi = Mat::zeros(size, CV_32F);
zplane = Mat::zeros(size, CV_8U);
buf[0] = Mat::zeros(size, CV_8U);
buf[1] = Mat::zeros(size, CV_8U);
}
cvtColor(img, buf[last], COLOR_BGR2GRAY); // convert frame to grayscale
int idx2 = (last + 1) % 2; // index of (last - (N-1))th frame
last = idx2;
Mat silh = buf[idx2];
absdiff(buf[idx1], buf[idx2], silh); // get difference between frames
threshold(silh, silh, diff_threshold, 1, THRESH_BINARY); // and threshold it
updateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI
// convert MHI to blue 8u image
mhi.convertTo(mask, CV_8U, 255. / MHI_DURATION, (MHI_DURATION - timestamp)*255. / MHI_DURATION);
Mat planes[] = { mask, zplane, zplane };
merge(planes, 3, dst);
// calculate motion gradient orientation and valid orientation mask
calcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3);
// segment motion: get sequence of motion components
// segmask is marked motion components map. It is not used further
regions.clear();
segmentMotion(mhi, segmask, regions, timestamp, MAX_TIME_DELTA);
// iterate through the motion components,
// One more iteration (i == -1) corresponds to the whole image (global motion)
for (i = -1; i < (int)regions.size(); i++) {
if (i < 0) { // case of the whole image
comp_rect = Rect(0, 0, size.width, size.height);
color = Scalar(255, 255, 255);
magnitude = 100;
}
else { // i-th motion component
comp_rect = regions[i];
if (comp_rect.width + comp_rect.height < 100) // reject very small components
continue;
color = Scalar(0, 0, 255);
magnitude = 30;
}
// select component ROI
Mat silh_roi = silh(comp_rect);
Mat mhi_roi = mhi(comp_rect);
Mat orient_roi = orient(comp_rect);
Mat mask_roi = mask(comp_rect);
// calculate orientation
angle = calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION);
angle = 360.0 - angle; // adjust for images with top-left origin
count = norm(silh_roi, NORM_L1);; // calculate number of points within silhouette ROI
// check for the case of little motion
if (count < comp_rect.width*comp_rect.height * 0.05)
continue;
// draw a clock with arrow indicating the direction
center = Point((comp_rect.x + comp_rect.width / 2),
(comp_rect.y + comp_rect.height / 2));
cv::Mat img = img;
circle(img, center, cvRound(magnitude*1.2), color, 3, 16, 0);
line(img, center, Point(cvRound(center.x + magnitude*cos(angle*CV_PI / 180)),
cvRound(center.y - magnitude*sin(angle*CV_PI / 180))), color, 3, 16, 0);
}
}
int main(int argc, char** argv)
{
VideoCapture cap;
help();
if (argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
cap.open(argc == 2 ? argv[1][0] - '0' : 0);
else if (argc == 2)
cap.open(argv[1]);
if (!cap.isOpened())
{
printf("Could not initialize video capture\n");
return 0;
}
buf.resize(2);
Mat image, motion;
for (;;)
{
cap >> image;
if (image.empty())
break;
update_mhi(image, motion, 30);
imshow("Image", image);
imshow("Motion", motion);
if (waitKey(10) >= 0)
break;
}
return 0;
}