部分代码引用 http://blog.csdn.net/zouxy09/article/details/8683859
flows.h
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/video/video.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/types_c.h>
#include <iostream>
#include <cstdio>
using namespace std;
using namespace cv;
class FlowDetect
{
public:
void MakeColorWheel(vector<Scalar> &colorwheel);
void MotionToColor(Mat flow, Mat &color);
Mat KmeansClusion(Mat flow);
Mat ImageNormalization(Mat flow);
Mat DeleteSmallRect(Rect rect, Mat input_img);
private:
vector<vector<Point>> object_contours;
};
flows.cpp
#include "flows.h"
#define UNKNOWN_FLOW_THRESH 1e9
void FlowDetect::MakeColorWheel(vector<Scalar> &colorwheel)
{
int RY = 15;
int YG = 6;
int GC = 4;
int CB = 11;
int BM = 13;
int MR = 6;
int i;
for (i = 0; i < RY; i++) colorwheel.push_back(Scalar(255, 255 * i / RY, 0));
for (i = 0; i < YG; i++) colorwheel.push_back(Scalar(255 - 255 * i / YG, 255, 0));
for (i = 0; i < GC; i++) colorwheel.push_back(Scalar(0, 255, 255 * i / GC));
for (i = 0; i < CB; i++) colorwheel.push_back(Scalar(0, 255 - 255 * i / CB, 255));
for (i = 0; i < BM; i++) colorwheel.push_back(Scalar(255 * i / BM, 0, 255));
for (i = 0; i < MR; i++) colorwheel.push_back(Scalar(255, 0, 255 - 255 * i / MR));
}
void FlowDetect::MotionToColor(Mat flow, Mat &color)
{
if (color.empty())
color.create(flow.rows, flow.cols, CV_8UC3);
static vector<Scalar> colorwheel; //Scalar r,g,b
if (colorwheel.empty())
MakeColorWheel(colorwheel);
// determine motion range:
float maxrad = -1;
// Find max flow to normalize fx and fy
for (int i = 0; i < flow.rows; ++i)
{
for (int j = 0; j < flow.cols; ++j)
{
Vec2f flow_at_point = flow.at<Vec2f>(i, j);
float fx = flow_at_point[0];
float fy = flow_at_point[1];
if ((fabs(fx) > UNKNOWN_FLOW_THRESH) || (fabs(fy) > UNKNOWN_FLOW_THRESH))
continue;
float rad = sqrt(fx * fx + fy * fy);
maxrad = maxrad > rad ? maxrad : rad;
}
}
for (int i = 0; i < flow.rows; ++i)
{
for (int j = 0; j < flow.cols; ++j)
{
uchar *data = color.data + color.step[0] * i + color.step[1] * j;
Vec2f flow_at_point = flow.at<Vec2f>(i, j);
float fx = flow_at_point[0] / maxrad;
float fy = flow_at_point[1] / maxrad;
if ((fabs(fx) > UNKNOWN_FLOW_THRESH) || (fabs(fy) > UNKNOWN_FLOW_THRESH))
{
data[0] = data[1] = data[2] = 0;
continue;
}
float rad = sqrt(fx * fx + fy * fy);
float angle = atan2(-fy, -fx) / CV_PI;
float fk = (angle + 1.0) / 2.0 * (colorwheel.size() - 1);
int k0 = (int)fk;
int k1 = (k0 + 1) % colorwheel.size();
float f = fk - k0;
//f = 0; // uncomment to see original color wheel
for (int b = 0; b < 3; b++)
{
float col0 = colorwheel[k0][b] / 255.0;
float col1 = colorwheel[k1][b] / 255.0;
float col = (1 - f) * col0 + f * col1;
if (rad <= 1)
col = 1 - rad * (1 - col); // increase saturation with radius
else
col *= .75; // out of range
data[2 - b] = (int)(255.0 * col);
}
}
}
}
Mat FlowDetect::KmeansClusion(Mat flow)
{
Mat samples(flow.cols*flow.rows, 1, CV_32FC1);
//标记矩阵,32位整形
Mat labels(flow.cols*flow.rows, 1, CV_32SC1);
uchar* p;
int i, j, k = 0;
for (i = 0; i < flow.rows; i++)
{
p = flow.ptr<uchar>(i);
for (j = 0; j< flow.cols; j++)
{
samples.at<Vec3f>(k, 0)[0] = float(p[j]);
// samples.at<Vec3f>(k, 0)[1] = float(p[j * 3 + 1]);
// samples.at<Vec3f>(k, 0)[2] = float(p[j * 3 + 2]);
k++;
}
}
int clusterCount = 2;
Mat centers(clusterCount, 1, samples.type());
kmeans(samples, clusterCount, labels,
TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 1.0),
2, KMEANS_PP_CENTERS, centers);
//我们已知有3个聚类,用不同的灰度层表示。
Mat img1(flow.rows, flow.cols, CV_8UC1);
float step = 255 / (clusterCount - 1);
k = 0;
for (i = 0; i < img1.rows; i++)
{
p = img1.ptr<uchar>(i);
for (j = 0; j< img1.cols; j++)
{
int tt = labels.at<int>(k, 0);
k++;
p[j] = 255 - tt*step;
}
}
Mat img2 = img1.clone();
Mat element_erode = getStructuringElement(MORPH_RECT, Size(3, 3));
Mat element_dilate = getStructuringElement(MORPH_RECT, Size(9, 9));
erode(img2, img2, element_erode);
dilate(img2, img2, element_dilate);
img1 = ImageNormalization(img2);
return img1;
}
Mat FlowDetect::ImageNormalization(Mat flow)
{
const int flow_channels[1] = { 0 };
const int flow_histSize[1] = { 256 };
float flow_hranges[2] = { 0, 257 };
const float* flow_ranges[1] = { flow_hranges };
MatND flow_hist;
Rect img_background_rect;
uchar* p;
int i, j;
int k = 0;
int n = 0;
calcHist(&flow, 1, flow_channels, Mat(), flow_hist, 1, flow_histSize, flow_ranges);
for (int j = 0; j < flow.rows; j++)
{
uchar* data = flow.ptr<uchar>(j);
for (int i = 0; i < flow.cols; i++)
{
if (data[i] == 0)
{
k = k + 1;
}
if (data[i] == 255)
{
n = n + 1;
}
}
}
if (k<n)
{
for (int j = 0; j < flow.rows; j++)
{
uchar* data = flow.ptr<uchar>(j);
for (int i = 0; i < flow.cols; i++)
{
data[i] = 255 - data[i];
}
}
}
Mat flow_copy = flow.clone();
findContours(flow_copy, object_contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
for (unsigned int num_bounding = 0; num_bounding < object_contours.size(); num_bounding++)
{
img_background_rect = boundingRect(object_contours[num_bounding]);
if (img_background_rect.width*img_background_rect.height < 1000)
{
DeleteSmallRect(img_background_rect, flow);
}
}
return flow;
}
Mat FlowDetect::DeleteSmallRect(Rect rect, Mat input_img)
{
for (int j = rect.y; j < rect.y + rect.height; j++)
{
uchar* data = input_img.ptr<uchar>(j);
for (int i = rect.x; i < rect.x + rect.width; i++)
{
data[i] = 255;
}
}
return input_img;
}
main.cpp
#include<iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
#include <opencv/cvaux.h>
#include <vector>
#include <cv.h>
#include <math.h>
#include "Saliency.h"
#include "flows.h"
//#include "deleteerrorobject.h"
#pragma warning(disable:4996)
using namespace std;
using namespace cv;
int main()
{
FlowDetect flowdetect;
VideoCapture video("C:\\Users\\ThinkCentre\\Desktop\\数学建模\\附件2-典型视频\\有晃动\\people2\\input.avi");
string filenames = "C:\\Users\\ThinkCentre\\Desktop\\数学建模\\附件2-典型视频\\input.avi";
VideoWriter outputVideo(filenames, CV_FOURCC('M', 'J', 'P', 'G'), 25.0, Size(320, 240), false);
int frame_number,total_frame_number;
Mat frame, mask, thresholdImage, output, saliencymap, bg;
Mat pre_img;
Mat flow_img;
Mat motion2color;
Mat detect_bgSubtractor;
BackgroundSubtractorMOG bgSubtractor(10, 20, 0.8, false);
Mat element_erode = getStructuringElement(MORPH_RECT, Size(3, 3));
Mat element_dilate = getStructuringElement(MORPH_RECT, Size(9, 9));
Mat new_mask_img;
if (!video.isOpened())
{
cout << "please try again!" << endl;
return (-1);
}
else
{
total_frame_number = int(video.get(CV_CAP_PROP_FRAME_COUNT));
for (frame_number = 0; frame_number < total_frame_number-1; frame_number++)
{
video >> frame;
if (frame_number == 0)
{
pre_img = frame.clone();
cvtColor(pre_img, pre_img, CV_RGB2GRAY);
}
else
{
cvtColor(frame, frame, CV_RGB2GRAY);
calcOpticalFlowFarneback(pre_img, frame, flow_img, 0.6, 3, 5, 1, 1, 1.2, 1);
flowdetect.MotionToColor(flow_img, motion2color);
cvtColor(motion2color, detect_bgSubtractor, CV_BGR2GRAY);
pre_img = frame.clone();
outputVideo << detect_bgSubtractor;
namedWindow("detect_bgSubtractor");
imshow("detect_bgSubtractor", detect_bgSubtractor);
waitKey(600);
}
}
}
return 0;
}