此代码为学习记录用,无其他任何用途
#include<iostream>
#include<opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main()
{
VideoCapture cap(1);//读取摄像头
//此处为设置相机参数,需要根据使用摄像头修改,笔记本电脑的摄像头可注释掉
cap.set(CV_CAP_PROP_FRAME_WIDTH, 960);//宽度
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 640);//高度
Mat previous_frame, Previous_gray_frame;//定义前一帧图像
cap.read(previous_frame);//先读取前一帧图像进cap
cvtColor(previous_frame, Previous_gray_frame, COLOR_BGR2GRAY);//将前一帧图像变成灰度图像
Mat hsv = Mat::zeros(previous_frame.size(), previous_frame.type());//创建和前一帧图像大小、类型一样的图像
Mat frame, gray_frame;
Mat_<Point2f>flow;//存储图像中移动点
vector<Mat>video;//存储通道分离后的图像
split(hsv, video);//分离空白图像通道
Mat mag = Mat::zeros(hsv.size(), CV_32FC1);//振幅
Mat ang = Mat::zeros(hsv.size(), CV_32FC1);//角度
Mat xpts = Mat::zeros(hsv.size(), CV_32FC1);//x坐标
Mat ypts = Mat::zeros(hsv.size(), CV_32FC1);//y坐标
while (true)
{
bool ret = cap.read(frame);
if (!ret)break;
imshow("frame", frame);
cvtColor(frame, gray_frame, COLOR_BGR2GRAY);
//稠密光流函数计算出光流移动的点
calcOpticalFlowFarneback(Previous_gray_frame, gray_frame, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
//从笛卡尔坐标转换到极坐标系
for (int row = 0; row < flow.rows; row++)
{
for (int col = 0; col < flow.cols; col++)
{
//将图像坐标系转化成像素坐标系
const Point2f& flow_xy = flow.at<Point2f>(row, col);
xpts.at<float>(row, col) = flow_xy.x;
ypts.at<float>(row, col) = flow_xy.y;
}
}
cartToPolar(xpts, ypts, mag, ang);
ang = ang*180.0 / CV_PI / 2.0;//将角度值换成弧度制
normalize(mag, mag, 0, 255, NORM_MINMAX);//归一化
convertScaleAbs(mag, mag);//实现图像增强
convertScaleAbs(ang, ang);
video[0] = ang;
video[1] = Scalar(255);
video[2] = mag;
merge(video, hsv);//合并通道
Mat bgr;
cvtColor(hsv, bgr, COLOR_HSV2BGR);//从HSV色彩空间转化成BGR色彩空间
imshow("result", bgr);
int c = waitKey(60);
if (c == '27')
{
break;
}
}
waitKey(0);
return 0;
}