Opencv Camshift对象跟踪详细代码
第一步:相机标定,获取相机内参和外参
第二步:建立像素坐标系与世界坐标系的转换关系
第三步:通过手动ROI选择视频中感兴趣区域
第四步:获取ROI区域的中心作为跟踪目标的目标像素点
第五步:实时计算像素点对应的空间姿态,同时显示目标跟踪情况
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <math.h>
#include <stdio.h>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/videoio.hpp>
#include <chrono>
#include <fstream>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include "opencv2/calib3d/calib3d.hpp"
#include <cctype>
#include <string.h>
#include <time.h>
#include <windows.h>
#include <cstdio>
#include<windows.h>
#include"glew.h"
#include<GL/GL.h>
#include<GL/GLU.h>
//#include <sl/Camera.hpp>
using namespace std;
using namespace cv;
int smin = 15;
int vmin = 40;
int vmax = 256;
int bins = 16;
Point3f getWorldPoints(Point2f &inPoints, cv::Mat &rvec, cv::Mat &tvec, cv::Mat &cameraMatrix)
{
double zConst = 0;//实际坐标系的距离,若工作平面与相机距离固定可设置为0
double s;
//获取图像坐标(2*1转为3*1)
cv::Mat imagePoint = (Mat_<double>(3, 1) << double(inPoints.x), double(inPoints.y), 1);
//计算比例参数S
cv::Mat tempMat, tempMat2;
tempMat = rvec.inv() * cameraMatrix.inv() * imagePoint;
tempMat2 = rvec.inv() * tvec;
s = zConst + tempMat2.at<double>(2, 0);
s /= tempMat.at<double>(2, 0);
//计算世界坐标
cv::Mat wcPoint = rvec.inv() * (s * cameraMatrix.inv() * imagePoint - tvec);
Point3f worldPoint(wcPoint.at<double>(0, 0), wcPoint.at<double>(1, 0), wcPoint.at<double>(2, 0));
cout << "世界坐标系 x = " << worldPoint.x << " y = " << worldPoint.y << " z = " << worldPoint.z << endl;
return worldPoint;
}
int main(int argc, char** argv) {
VideoCapture capture;
//capture.open("E:/图片/data/4.mp4");
capture.open(0);
capture.set(CAP_PROP_FRAME_WIDTH, 1920.0);
capture.set(CAP_PROP_FRAME_HEIGHT, 1440.0);
if (!capture.isOpened()) {
printf("could not find video data file...\n");
return -1;
}
namedWindow("CAMShift Tracking", CV_WINDOW_AUTOSIZE);
namedWindow("ROI Histogram", CV_WINDOW_AUTOSIZE);
//设置全屏
//setWindowProperty("CAMShift Tracking", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
bool firstRead = true;//确认是否为第一帧
float hrange[] = { 0, 180 };
const float* hranges = hrange;
Rect selection;
Mat frame,hsv, hue, mask, hist, backprojection;
Mat grayImage;
//直方图
Mat drawImg = Mat::zeros(300, 300, CV_8UC3);
Mat cameraMatrix = (Mat_<double>(3, 3) <<
1056.6000, 0, 978.9200,
0, 1055.6600, 534.1640,
0, 0, 1);
Mat rvec = (Mat_<double>(3, 3) <<
0.9967, -0.0158, 0.0793,
0.01889, 0.9991, -0.0381,
-0.0787, 0.0396, 0.9961);
Mat tvec = (Mat_<double>(3, 1) <<
-128.5678,
-12.7116,
-0.6481);
while (capture.read(frame))
{
if (firstRead)
{
//first为第一帧选择的ROI区域 frame为第一帧
Rect2d first = selectROI("CAMShift Tracking", frame);//选择ROI区域
//Rect2d first = selectROI("CAMShift Tracking", hsv);//选择ROI区域
//初始化 将鼠标选择的ROI区域参数赋予程序定义的Rect selection
selection.x = first.x;
selection.y = first.y;
selection.width = first.width;
selection.height = first.height;
printf("ROI.x= %d, ROI.y= %d, width = %d, height= %d", selection.x, selection.y, selection.width, selection.height);
}
// convert to HSV
cvtColor(frame, hsv, COLOR_BGR2HSV);
//inRange检查数组元素是否在另外两个数组元素值之间
//参数:(输入要处理的图像(可以是单通道也可以是多通道),包含下边界数组标量(即H,S,V最小值)
//包含下边界数组标量(即H,S,V最大值),输出图像)
inRange(hsv, Scalar(0, smin, vmin), Scalar(180, vmax, vmax), mask);
//提取H通道
hue = Mat(hsv.size(), hsv.depth());
//HSV对应通道0,1,2,H通道对应0
//因此将输入图像HSV的0通道 复制到 H的0通道
int channels[] = { 0, 0 };
//mixChannels()函数用于将输入数组的指定通道复制到输出数组的指定通道
//参数(输入数组或向量矩阵,矩阵数量,输出数组或矩形向量,矩阵数量
//指定被复制通道与要复制到的位置组成的索引对,索引对数目)
//例如:将一个4通道BGRA图像分割成3通道BGR和一通道A图像
mixChannels(&hsv, 1, &hue, 1, channels, 1);
if (firstRead)
{
// ROI 直方图计算
//通过selection(鼠标)选择的hue和mask的区域分别命名为roi和maskroi
Mat roi(hue, selection);
Mat maskroi(mask, selection);
//calcHist计算直方图
//参数:(输入图像,输入图像的个数,需要统计直方图的第几个通道
//掩膜(计算掩膜内的直方图),输出直方图数组,需要统计直方图通道的个数
//直方图分成多少个区间,统计像素值的区间)
//掩膜:用选定的图像对处理的图像进行遮挡,来控制图像处理的区域
calcHist(&roi, 1, 0, maskroi, hist, 1, &bins, &hranges);
//归一化
normalize(hist, hist, 0, 255, NORM_MINMAX);
// show histogram直方图
//一个有16个bin,每个bin的宽度binw
int binw = drawImg.cols / bins;
//颜色索引 一个图片分成16个bin,每个bin对应一种颜色
Mat colorIndex = Mat(1, bins, CV_8UC3);
for (int i = 0; i < bins; i++)
{
colorIndex.at<Vec3b>(0, i) = Vec3b(saturate_cast<uchar>(i * 180 / bins), 255, 255);
}
//原来色彩取值是0-180,现在转换RGB的0-255
cvtColor(colorIndex, colorIndex, COLOR_HSV2BGR);
//绘制直方图
for (int i = 0; i < bins; i++)
{
int val = saturate_cast<int>(hist.at<float>(i)*drawImg.rows / 255);
//把直方图用矩形表示
//参数(输入图像,矩形的一个顶点,矩形对角线上的另一个顶点,线条颜色,粗细....)
rectangle(drawImg, Point(i*binw, drawImg.rows), Point((i + 1)*binw,
drawImg.rows - val), Scalar(colorIndex.at<Vec3b>(0, i)), -1, 8, 0);
}
}
// back projection----计算直方图反向映射
//反投射直方图原理:直方图可以作为图像的一个描述特征,计算出A的直方图
//且假设计算出来的值可以有效地代表A,之后遍历图像B,使用B的每一个像素点对应A
//的直方图中的统计值来替代原来的像素值
//参数(输入图像(必须是CV_8U,CV_16U,CV_32U的一种),输入图像的数量
//计算方向投影的通道(这里是第0通道),输入的直方图,目标反向投影输出图像
//直方图每个维度bin的取值范围)
calcBackProject(&hue, 1, 0, hist, backprojection, &hranges);
// CAMShift tracking
backprojection &= mask;
//CamShift参数:(色彩概率分布图像,选择的窗口初始值,用来判断搜寻是否停止的一个标准
//保存运行结果,包含被跟踪物体的最小矩形)
//TermCriteria::COUNT 和 TermCriteria::EPS 为终止条件类型
RotatedRect trackBox = CamShift(backprojection, selection,
TermCriteria((TermCriteria::COUNT | TermCriteria::EPS), 10, 1));
// draw location on frame;
//参数(图像,绘制椭圆圆弧所需要的外接矩形,颜色,粗细,线条类型
//ellipse(frame, trackBox, Scalar(0, 0, 255), 3, 8);
ellipse(hsv, trackBox, Scalar(0, 0, 255), 3, 8);
//定义椭圆的中点
Point2f center; //定义变量
center = trackBox.center;//读取椭圆中心
//circle(frame, center, 1, Scalar(0, 255, 0), 2, 8, 0);
circle(hsv, center, 1, Scalar(0, 255, 0), 2, 8, 0);
cout <<"像素坐标系 x = "<< center.x << " y = " << center.y << endl;
//将像素坐标转为世界坐标
getWorldPoints(center, rvec, tvec, cameraMatrix);
if (firstRead)
{
firstRead = false;
}
//imshow("CAMShift Tracking", frame);
imshow("CAMShift Tracking", hsv);
imshow("ROI Histogram", drawImg);
char c = waitKey(50);// ESC
if (c == 27)
{
break;
}
waitKey(10);
}
capture.release();
waitKey(0);
return 0;
}