#include "VideoDemo.h"
#include <opencv2\opencv.hpp>
using namespace cv;
using namespace std;
VideoDemo::VideoDemo()
{
}
VideoDemo::~VideoDemo()
{
}
/* 打开视频 */
void VideoDemo::test1()
{
VideoCapture vcHandle = VideoCapture("../x64/Debug/video/10.mp4");
Mat frame;
if(!vcHandle.isOpened()) {
return;
}
int i = 1;
while(1) {
vcHandle.read(frame);
if(frame.empty()) {
break;
}
//namedWindow("1", WINDOW_NORMAL);
imshow("1", frame);
string str = "../x64/Debug/picture/test1/" + to_string(i) + ".png";
imwrite(str, frame);
char c = waitKey(5);
if(c == '2') { //键盘响应事件
break;
}
i++;
}
vcHandle.release();
}
//https://blog.csdn.net/Day_upon/article/details/85991445
//以下编码格式在OpenCV4中都可以运行
//CV_FOURCC('M','J','P','G') 注意此种格式保存的视频一直无法打开(在opencv3下)
//CV_FOURCC('P', 'I', 'M', '1') = MPEG - 1 codec(无法打开)(在opencv3下)
//CV_FOURCC('M', 'J', 'P', 'G') = motion - jpeg codec(无法打开)(在opencv3下)
//CV_FOURCC('M', 'P', '4', '2') = MPEG - 4.2 codec(保存的avi可以打开)(在opencv3下)
//CV_FOURCC('D', 'I', 'V', '3') = MPEG - 4.3 codec(保存的avi可以打开)(在opencv3下)
//CV_FOURCC('D', 'I', 'V', 'X') = MPEG - 4 codec(可以打开)(在opencv3下)
//CV_FOURCC('U', '2', '6', '3') = H263 codec(未测试)
//CV_FOURCC('I', '2', '6', '3') = H263I codec(未测试)
//CV_FOURCC('F', 'L', 'V', '1') = FLV1 codec(无法打开)
/* 保存视频 */
void VideoDemo::test2()
{
VideoCapture vcHandle = VideoCapture("../x64/Debug/video/cork.mp4");
Mat frame;
if(!vcHandle.isOpened()) {
return;
}
int frameWithd = vcHandle.get(CAP_PROP_FRAME_WIDTH);
int frameHeight = vcHandle.get(CAP_PROP_FRAME_HEIGHT);
int frameCount = vcHandle.get(CAP_PROP_FRAME_COUNT);
double fps = vcHandle.get(CAP_PROP_FPS);
int fourcc = vcHandle.get(CAP_PROP_FOURCC);
std::cout << "[" << frameWithd << "]-" << "[" << frameHeight << "]-"<< "[" << frameCount << "]-"<< "[" << fps << "]-" << std::endl;
VideoWriter write;
int fourcc1 = write.fourcc('M', 'J', 'P', 'G'); //('D', 'I', 'V', 'X')可以正常保存mp4
bool ret = write.open("../x64/Debug/video/cork_1.avi", fourcc1, fps, Size(frameWithd, frameHeight), true);
while(1) {
vcHandle >> frame; //>>等同于read
if(frame.empty()) {
break;
}
write.write(frame);
//write << (frame);
char c = waitKey(1);
if(c == '2') {
break;
}
}
vcHandle.release();
write.release();
std::cout.flush();
}
VideoCapture g_cap;
void CallbackTest3(int pos, void *data)
{
g_cap.set(CAP_PROP_POS_FRAMES, pos);
}
/* 拖动视频进度 */
void VideoDemo::test3()
{
std::string strWin = "video";
namedWindow(strWin, WINDOW_NORMAL);
g_cap = VideoCapture("../x64/Debug/video/112.mp4");
int frames = g_cap.get(CAP_PROP_FRAME_COUNT);
int tmpW = g_cap.get(CAP_PROP_FRAME_WIDTH);
int tmpH = g_cap.get(CAP_PROP_FRAME_HEIGHT);
int CurPos = 0;
createTrackbar("pos", strWin, &CurPos, frames, CallbackTest3);
Mat frame;
while(1) {
g_cap >> frame;
if(frame.empty()) {
char c = waitKey(33);
continue; //视频结束也不要退出
}
int curPos = g_cap.get(CAP_PROP_POS_FRAMES);
setTrackbarPos("pos", strWin, curPos);
imshow(strWin, frame);
char c = waitKey(50);
if(c == '2') {
break;
}
}
}
/* 打开视频,使用grab */
void VideoDemo::test4()
{
VideoCapture vcHandle = VideoCapture("../x64/Debug/video/112.mp4");
Mat frame;
if(!vcHandle.isOpened()) {
return;
}
while(1) {
vcHandle.grab();
vcHandle.retrieve(frame);
if(frame.empty()) {
break;
}
//namedWindow("1", WINDOW_NORMAL);
imshow("1", frame);
char c = waitKey(50);
if(c == '2') { //键盘响应事件
break;
}
}
vcHandle.release();
}
/* 帧间差 */
void VideoDemo::test5()
{
VideoCapture vcHandle = VideoCapture("../x64/Debug/video/222.mp4");
Mat frame, preFrame;
if(!vcHandle.isOpened()) {
return;
}
int i=0;
while(1) {
vcHandle.read(frame);
if(frame.empty()) {
break;
}
Point a(100, 100);
Point b(200, 300);
LineIterator it(frame, a, b);
for(int j=0; j<it.count;j++) {
(*it)[1] = 255;
(*it)[0] = 255;
(*it)[2] = 255;
it++;
}
imshow("当前帧", frame);
if(i > 0) {
imshow("差", frame - preFrame); //动态前景
imshow("上一帧", preFrame); //动态前景
Mat foreground;
absdiff(preFrame, frame, foreground);
threshold(foreground, foreground, 15, 255, THRESH_BINARY);
imshow("帧间差", foreground); //动态前景
}
preFrame = frame.clone();
char c = waitKey(50);
if(c == '2') { //键盘响应事件
break;
}
i++;
}
vcHandle.release();
}
/* 平均背景法 */
void VideoDemo::test6()
{
VideoCapture vcHandle = VideoCapture("../x64/Debug/video/2.mp4");
Mat frame;
vcHandle.read(frame);
Mat accumulateBg, accumulateDiff, preFrame, frontground, background; //IavgF:累计背景图片的均值,IdiffF:累计绝对值帧的均值,preFrame:当前帧的前一帧
Mat tmp, tmp2; //背景图
vector<Mat> vBGR(3), vBackground(3), vFrontground(3); //单通道 float
Mat maskt; //掩模
int iCount;
//为所有变量申请内存,由于定义的都是全局变量,故只需要申请栈上的空间即可
Size sz = frame.size();
accumulateBg = Mat::zeros(sz, CV_32FC3);
accumulateDiff = Mat::zeros(sz, CV_32FC3);
frontground = Mat::zeros(sz, CV_32FC3);
background = Mat::zeros(sz, CV_32FC3);
preFrame = Mat::zeros(sz, CV_32FC3);
iCount = 0;
tmp = Mat::zeros(sz, CV_32FC3);
tmp2 = Mat::zeros(sz, CV_32FC3);
maskt = Mat::zeros(sz, CV_32FC1);
int i = 0;
while(i < 500) { //视频要大于500帧
vcHandle.read(frame);
if(frame.empty()) {
break;
}
//计算累计背景图片和累计绝对值帧(一种比学习图片标准差更快的方式)平均差分
static int first = 1;
frame.convertTo(tmp, CV_32F);
if (!first) {
//accumulateBg += tmp; //累计背景图片
accumulate(tmp, accumulateBg);
absdiff(tmp, preFrame, tmp2);
accumulateDiff += tmp2; //累计绝对值帧
iCount += 1;
}
first = 0;
preFrame = tmp;
imshow("原图", frame);
char c = waitKey(20);
if(c == '2') { //键盘响应事件
break;
}
i++;
}
//计算每个像素的均值和偏移值(平均绝对差)
accumulateBg = accumulateBg / (float)iCount; //计算累计背景图片的均值
accumulateDiff = accumulateDiff / (float)iCount; //累计绝对值帧的均值
accumulateDiff += Scalar(1.0, 1.0, 1.0); //为了使累计绝对值帧的均值不全为0,给每个元素都加1
frontground = accumulateBg + (accumulateDiff * 7.0); //把高于平均值的n倍平均绝对差(可以理解为累计差分的平均值)理解为前景
split(frontground, vFrontground);
background = accumulateBg - (accumulateDiff * 6.0); //把低于平均值的n倍平均绝对差(可以理解为累计差分的平均值)理解为背景
split(background, vBackground);
Mat mask;
while (true) {
vcHandle >> frame;
if(frame.empty()) {
break;
}
imshow("原图", frame);
//为此我们进行前景和背景的分割 之前得到的前景与背景作为判别一个图像前景和背景的阈值
frame.convertTo(tmp, CV_32F);
split(tmp, vBGR);
//通道1
inRange(vBGR[0], vBackground[0], vFrontground[0], mask);
//通道2
inRange(vBGR[1], vBackground[1], vFrontground[1], maskt);
mask = min(mask, maskt);
//通道3
inRange(vBGR[2], vBackground[2], vFrontground[2], maskt);
mask = min(mask, maskt);
mask = 255 - mask;
split(frame, vBGR);
vBGR[0] = max(mask, vBGR[0]);
vBGR[1] = max(mask, vBGR[1]);
vBGR[2] = max(mask, vBGR[2]);
merge(vBGR, frame);
imshow("frame", frame);
char c = waitKey(30);
if(c == '2') { //键盘响应事件
break;
}
}
vcHandle.release();
}
#define CHANNELS 3 // 设置处理的图像通道数,要求小于等于图像本身的通道数
//码元的数据结构
typedef struct ce {
uchar learnHigh[CHANNELS]; //此码元各通道的阀值上限(学习界限)
uchar learnLow[CHANNELS]; //此码元各通道的阀值下限,学习过程中如果一个新像素各通道值x[i],均有 learnLow[i]<=x[i]<=learnHigh[i],则该像素可合并于此码元
uchar max[CHANNELS]; //属于此码元的像素中各通道的最大值
uchar min[CHANNELS]; //属于此码元的像素中各通道的最小值
int t_last_update; //此码元最后一次更新的时间,每一帧为一个单位时间,用于计算stale
int stale; //最大负运行(最大不活动时间
} code_element;
//码本的数据结构
typedef struct code_book {
code_element **cb; //码元的二维指针,理解为指向码元指针数组的指针,使得添加码元时不需要来回复制码元,只需要简单的指针赋值即可
int numEntries; //此码本中码元的数目
int t; //计数每次访问,此码本现在的时间,一帧为一个时间单位
} codeBook;
// 使用新的数据点更新码本条目
// pYuv 指向YUV像素的指针
// c 此像素的码本
// cbBounds 码本的学习界限
// numChannels 正在学习的颜色通道数
// 注意: cvBounds的大小必须为cvBounds[numChannels]
// RETURN:码本计数
int updateCodeBook(uchar *pYuv, codeBook &c, unsigned *cbBounds, int numChannels)
{
if (c.numEntries == 0) {
c.t = 0; //码本中码元为零时初始化时间为0
}
c.t += 1; //每调用一次加一,即每一帧图像加一
//设置上限和下限
int n;
unsigned int high[3], low[3];
for(n = 0; n<numChannels; n++) {
high[n] = *(pYuv + n) + *(cbBounds + n); //*(pYuv+n)和pYuv[n]结果等价,经试验*(pYuv+n)速度更快
if (high[n] > 255) {
high[n] = 255;
}
low[n] = *(pYuv + n) - *(cbBounds + n);
if (low[n] < 0) {
low[n] = 0; //用pYuv所指像素通道数据,加减cbBonds中数值,作为此像素阀值的上下限
}
}
//看看这是否适合现有的码字
int matchChannel;
int i;
for(i = 0; i<c.numEntries; i++) { //遍历此码本每个码元,测试p像素是否满足其中之一
matchChannel = 0;
for(n = 0; n<numChannels; n++) { //遍历每个通道
if ((c.cb[i]->learnLow[n] <= *(pYuv + n)) && (*(pYuv + n) <= c.cb[i]->learnHigh[n])) { //找到此频道的条目,如果pYuv像素通道数据在该码元阀值上下限之间
matchChannel++;
}
}
if (matchChannel == numChannels) { //如果在所有通道中都找到条目,如果p像素各通道都满足上面条件
c.cb[i]->t_last_update = c.t;
//更新该码元时间为当前时间,为第一个通道调整此码字
for(n = 0; n<numChannels; n++){ //调整该码元各通道最大最小值
if (c.cb[i]->max[n] < *(pYuv + n)) {
c.cb[i]->max[n] = *(pYuv + n);
}
else if (c.cb[i]->min[n] > *(pYuv + n)) {
c.cb[i]->min[n] = *(pYuv + n);
}
}
break;
}
}
if (i == c.numEntries) { //找不到现有码字,请创建一个新码字
code_element **foo = new code_element*[c.numEntries + 1]; //指针的数组
for(int ii = 0; ii<c.numEntries; ii++) { //申请c.numEntries+1个指向码元的指针
foo[ii] = c.cb[ii]; //将前c.numEntries个指针指向已存在的每个码元
}
foo[c.numEntries] = new code_element; //申请一个新的码元
if (c.numEntries) {
delete[] c.cb; //删除c.cb指针数组
}
c.cb = foo; //把foo头指针赋给c.cb
for(n = 0; n<numChannels; n++) { //更新新码元各通道数据
c.cb[c.numEntries]->learnHigh[n] = high[n];
c.cb[c.numEntries]->learnLow[n] = low[n];
c.cb[c.numEntries]->max[n] = *(pYuv + n);
c.cb[c.numEntries]->min[n] = *(pYuv + n);
}
c.cb[c.numEntries]->t_last_update = c.t;
c.cb[c.numEntries]->stale = 0;
c.numEntries += 1;
}
//跟踪潜在过时条目的开销
for(int s = 0; s<c.numEntries; s++) {
int negRun = c.t - c.cb[s]->t_last_update; //这个垃圾是为了跟踪哪些代码簿条目将过时
if (c.cb[s]->stale < negRun) { //计算该码元的不更新时间
c.cb[s]->stale = negRun;
}
}
//慢慢调整学习范围
for(n = 0; n<numChannels; n++) { // 如果像素通道数据在高低阀值范围内,但在码元阀值之外,则缓慢调整此码元学习界限
if (c.cb[i]->learnHigh[n] < high[n]) {
c.cb[i]->learnHigh[n] += 1;
}
if (c.cb[i]->learnLow[n] > low[n]) {
c.cb[i]->learnLow[n] -= 1;
}
}
return(i);
}
// 给定一个像素和一个代码本,确定该像素是否被码本覆盖
// pYuv 指向YUV像素的指针
// c 此像素的码本
// numChannels 正在测试的通道数量
// maxMod 当code_element确定新像素是否为前景时,将此数字(可能为负数)添加到最大级别
// minMod 在确定像素是否为前景时,从最小级别的code_element减去此(可能为负数)数字
// 注:minMod和maxMod的长度必须为numChannels,例如3个通道=>minMod[3],maxMod[3]
// Return:0 => background, 255 => foreground
uchar backgroundDiff(uchar *pYuv, codeBook &c, int numChannels, int *minMod, int *maxMod)
{
//下面步骤和背景学习中查找码元如出一辙
int matchChannel;
//看看这是否适合现有的码字
int i;
for(i = 0; i<c.numEntries; i++) {
matchChannel = 0;
for(int n = 0; n<numChannels; n++) {
if ((c.cb[i]->min[n] - minMod[n] <= *(pYuv + n)) && (*(pYuv + n) <= c.cb[i]->max[n] + maxMod[n])) {
matchChannel++; //找到此频道的条目
}
else {
break;
}
}
if (matchChannel == numChannels) {
break; //找到一个与所有频道匹配的条目
}
}
if (i == c.numEntries) {
return(255); //pYuv像素各通道值满足码本中其中一个码元,则返回白色
}
return(0);
}
// 学习了一段时间后,定期调用此函数以清除过时的代码簿条目
// c 要清理的代码簿
// Return:清除的条目数
int clearStaleEntries(codeBook &c)
{
int staleThresh = c.t >> 1; //设定刷新时间
int *keep = new int[c.numEntries]; //申请一个标记数组
int keepCnt = 0; //记录不删除码元数目
//查看哪些码本条目过于陈旧
for(int i = 0; i<c.numEntries; i++) { //遍历码本中每个码元
if (c.cb[i]->stale > staleThresh) { //如码元中的不更新时间大于设定的刷新时间,则标记为删除
keep[i] = 0; //销毁标记
}
else {
keep[i] = 1; //标记以保留
keepCnt += 1;
}
}
//只保留好的
c.t = 0; //过时跟踪的完全重置,码本时间清零
code_element **foo = new code_element*[keepCnt]; //申请大小为keepCnt 的码元指针数组
int k = 0;
for(int ii = 0; ii<c.numEntries; ii++) {
if (keep[ii]) {
foo[k] = c.cb[ii];
foo[k]->stale = 0; //必须刷新这些条目以备下次清除
foo[k]->t_last_update = 0;
k++;
}
}
delete[] keep;
delete[] c.cb;
c.cb = foo; //把foo头指针地址赋给c.cb
int numCleared = c.numEntries - keepCnt; //被清理的码元个数
c.numEntries = keepCnt; //剩余的码元地址
return(numCleared);
}
/* 码书实现背景剔除 */
void VideoDemo::test7()
{
/*该模型针对彩色视频图像序列,根据像素点的连续采样值的颜色失真程度及其亮度范围,
将背景像素用码本表示,然后利用背景差分法思想对新输入像素值与其对应码本做比较判断,从而提取出前景目标像素。
码本模型为每一个背景像素点创建一个码本,且码本随着其中码字的更新而更新,
所有像素点的码本构成一个完整的背景。他是用一个码本(codebook)cb来描述一个像素p */
VideoCapture capture = VideoCapture("../x64/Debug/video/2.mp4");
Mat frame;
codeBook* cB; //一组码本,码本个数与像素总数一致
unsigned cbBounds[CHANNELS]; //码元的阈值
uchar* pColor; //YUV指针
int imageLen;
int nChannels = CHANNELS;
int minMod[CHANNELS];
int maxMod[CHANNELS];
capture >> frame;
Mat ImaskCodeBook(frame.rows, frame.cols, CV_8UC1, Scalar(255)); //为ImaskCodeBook分配一个和rawImage尺寸相同,8位单通道图像,初始化为白色图像
imageLen = frame.cols * frame.rows;
cB = new codeBook[imageLen]; //得到与图像像素数目长度一样的一组码本,以便对每个像素进行处理
for(int i = 0; i<imageLen; i++) { //初始化每个码元数目为0
cB[i].numEntries = 0;
}
for(int i = 0; i<nChannels; i++) {
cbBounds[i] = 10; //用于确定码元各通道的阀值
minMod[i] = 20; //用于背景差分函数中
maxMod[i] = 20; //调整其值以达到最好的分割
}
// 开始处理视频每一帧图像
for(int i = 0; ; i++) {
capture >> frame;
if (frame.empty()) {
break;
}
if (i <= 30) { //30帧内进行背景学习
pColor = (uchar *)(frame.ptr(0)); //ptr(0)是指向yuvimage第一行第一个元素的指针,指向yuvImage图像的通道数据
for(int c = 0; c<imageLen; c++) { //遍历整张图的所有像素,生成对应的码本
updateCodeBook(pColor, cB[c], cbBounds, nChannels); //对每个像素,调用此函数,捕捉背景中相关变化图像
pColor += 3; //3通道图像, 指向下一个像素通道数据
}
if (i == 30) { //到30 帧时调用下面函数,删除码本中陈旧的码元
for(int c = 0; c<imageLen; c++) { //遍历整张图的所有像素对应的码本
clearStaleEntries(cB[c]);
}
}
}
else {
uchar maskPixelCodeBook;
pColor = (uchar *)(frame.ptr<uchar>(0));
uchar *pMask = (uchar *)(ImaskCodeBook.ptr(0)); //1通道图像,指向ImaskCodeBook通道数据序列的首元素
for(int c = 0; c<imageLen; c++) {
maskPixelCodeBook = backgroundDiff(pColor, cB[c], nChannels, minMod, maxMod);
*pMask++ = maskPixelCodeBook;
pColor += 3; //pColor指向的是3通道图像
}
}
imshow("Raw", frame);
imshow("CodeBook", ImaskCodeBook);
char c = waitKey(20);
if(c == '2') {
break;
}
}
delete[] cB;
}
void refineSegments(const Mat& img, Mat& mask, Mat& dst)
{
int niters = 3;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Mat temp;
dilate(mask, temp, Mat(), Point(-1,-1), niters);
erode(temp, temp, Mat(), Point(-1,-1), niters*2);
dilate(temp, temp, Mat(), Point(-1,-1), niters);
findContours(temp, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
if( contours.size() == 0 ) {
return;
}
dst = Mat::zeros(img.size(), CV_8UC3);
int idx = 0, largestComp = 0;
double maxArea = 0;
for(; idx < hierarchy.size(); idx++) {
const vector<Point>& c = contours[idx];
double area = fabs(contourArea(Mat(c)));
if(area > maxArea) { //找到面积最大的轮廓
maxArea = area;
largestComp = idx;
}
}
drawContours(dst, contours, largestComp, Scalar(255, 255, 255), FILLED, LINE_8, hierarchy);
}
/* MOG2 */
void VideoDemo::test8()
{
VideoCapture cap;
bool update_bg_model = true;
cap = VideoCapture("../x64/Debug/video/2.mp4");
if(!cap.isOpened()) {
printf("\nCan not open camera or video file\n");
return;
}
Mat frame, bgmask, out_frame;
cap >> frame;
if(frame.empty()) {
printf("can not read data from the video source\n");
return;
}
Ptr<BackgroundSubtractorMOG2> bgsubtractor = createBackgroundSubtractorMOG2(500, 9, true);
bgsubtractor->setVarThreshold(10);
for(;;) {
cap >> frame;
if(frame.empty()) {
break;
}
bgsubtractor->apply(frame, bgmask, -1); //输出很接近背景,带有脏的
imshow("video", bgmask);
refineSegments(frame, bgmask, out_frame);
imshow("segmented", out_frame);
char c = waitKey(20);
if(c == '2') {
break;
}
}
}
/* 稠密光流 */
void VideoDemo::test9()
{
Mat frame, preframe, gray, pregray, flowdata;
VideoCapture vcHandle = VideoCapture("../x64/Debug/video/2.mp4");
vcHandle.read(frame);
cvtColor(frame, pregray, COLOR_BGR2GRAY);
while(1) {
vcHandle.read(frame);
if(frame.empty()) {
break;
}
imshow("input", frame);
cvtColor(frame, gray, COLOR_BGR2GRAY);
calcOpticalFlowFarneback(pregray, gray, flowdata, 0.5, 3, 15, 3, 5, 1.5, 0);
cvtColor(pregray, preframe, COLOR_GRAY2BGR);
for(int row = 0; row < preframe.rows; row++) {
for(int col = 0; col < preframe.cols; col++) {
const Point2f fxy = flowdata.at<Point2f>(row, col);
if (fxy.x > 2 || fxy.y > 2) {
circle(preframe, Point(col, row), 2, Scalar(0, 255, 0), 2);
}
}
}
imshow("光流", preframe);
char c = waitKey(2);
if(c == '2') {
break;
}
}
vcHandle.release();
}
/* 稀疏光流 */
void VideoDemo::test10()
{
VideoCapture vcHandle = VideoCapture("../x64/Debug/video/2.mp4");
Mat old_frame, old_gray;
vcHandle.read(old_frame);
cvtColor(old_frame, old_gray, COLOR_BGR2GRAY);
//角点光源初始化
vector<Point2f> feature_pts;
goodFeaturesToTrack(old_gray, feature_pts, 50, 0.01, 50, Mat(), 3, false);
vector<Point2f> pts[2];
pts[0].insert(pts[0].end(), feature_pts.begin(), feature_pts.end());
vector<Point2f> initial_points;
initial_points.insert(initial_points.end(), feature_pts.begin(), feature_pts.end());
Mat frame, gray;
vector<uchar> status;
vector<float> err;
//定义停止条件,当迭代10次不需要计算,两次的计算结果差小于0.01也不需要计算
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 10, 0.01);
while (true) {
vcHandle.read(frame);
if(frame.empty()) {
break;
}
Mat frame1 = frame.clone();
for(int i = 0; i < feature_pts.size(); i++) { //将检测到的角点绘制到原图上
circle(frame1, feature_pts[i], 5, Scalar(0, 0, 255), 2, 8, 0);
}
imshow("角", frame1);
cvtColor(frame, gray, COLOR_BGR2GRAY);
calcOpticalFlowPyrLK(old_frame, frame, pts[0], pts[1], status, err, Size(21, 21), 3, criteria, 0);
//检测是否出错
size_t i = 0, k = 0;
RNG rng(12345);
for(i = 0; i < pts[1].size(); ++i) {
//距离与状态检测
double dist = abs(pts[0][i].x - pts[1][i].x) + abs(pts[0][i].y - pts[1][i].y);
if (status[i] && dist>2) {
pts[0][k] = pts[0][i];
pts[1][k++] = pts[1][i];
initial_points[k] = initial_points[i];
circle(frame, pts[1][i], 3, Scalar(255, 0, 0), 3, 8);
line(frame, pts[0][i], pts[1][i], Scalar(0, 0, 255), 3, 8);
}
}
//更新角点vector容量
pts[0].resize(k);
pts[1].resize(k);
initial_points.resize(k);
imshow("frame", frame);
char c = waitKey(2);
if(c == '2') {
break;
}
//更换帧图像,帧角点信息
pts[0] = pts[1];
old_gray = gray.clone();
//在稀疏光源法还要重新初始化,当角点数小于40时,重新初始化
if (pts[0].size() < 40) {
goodFeaturesToTrack(old_gray, feature_pts, 50, 0.01, 50, Mat(), 3, false);
pts[0].insert(pts[0].end(), feature_pts.begin(), feature_pts.end());
initial_points.insert(initial_points.end(), feature_pts.begin(), feature_pts.end());
}
}
vcHandle.release();
}
Mat image;
bool leftButtonDownFlag = false; //左键单击后视频暂停播放的标志位
Point cStartPoint; //矩形框起点
Point cEndPoint; //矩形框终点
int histSize = 200;
float histR[] = {0,255};
const float *histRange = histR;
int channels[] = {0,1};
Mat dstHist;
Rect rect;
void onMouse(int event, int x, int y, int flags, void *ustc)
{
Mat rectImage;
Mat imageCopy; //绘制矩形框时用来拷贝原图的图像
if(event == EVENT_LBUTTONDOWN) {
leftButtonDownFlag = true; //标志位
cStartPoint = Point(x,y); //设置左键按下点的矩形起点
cEndPoint = cStartPoint;
}
if(event==EVENT_MOUSEMOVE && leftButtonDownFlag) {
imageCopy = image.clone();
cEndPoint = Point(x,y);
if(cStartPoint != cEndPoint) {
rectangle(imageCopy, cStartPoint, cEndPoint, Scalar(255,0,0), 2); //在复制的图像上绘制矩形
}
imshow("跟踪木头人", imageCopy);
}
if(event == EVENT_LBUTTONUP) {
Mat ImageHSV;
leftButtonDownFlag = false;
rect = Rect(cStartPoint, cEndPoint);
rectImage = image(rect); //子图像显示
imshow("截取图像", rectImage);
cvtColor(rectImage, ImageHSV, COLOR_RGB2HSV);
imshow("截取图像转HSV", ImageHSV);
calcHist(&ImageHSV, 2, channels, Mat(), dstHist, 1, &histSize, &histRange, true, false);
normalize(dstHist, dstHist, 0, 255, NORM_MINMAX);
}
}
/* meanShift均值漂移算法 */
void VideoDemo::test11()
{
VideoCapture video = VideoCapture("../x64/Debug/video/7.mp4"); //需要轮廓明显,移动不能太快
double fps = video.get(CAP_PROP_FPS); //获取视频帧率
double pauseTime = 1000 / fps; //两幅画面中间间隔
vector<Point> pt; //保存目标轨迹
namedWindow("跟踪木头人", WINDOW_AUTOSIZE);
setMouseCallback("跟踪木头人", onMouse);
while(true) {
if(!leftButtonDownFlag) { //判定鼠标左键没有按下,采取播放视频,否则暂停
video>>image;
}
if(!image.data ) {
break;
}
if(cStartPoint!=cEndPoint && !leftButtonDownFlag){
Mat imageHSV;
Mat calcBackImage;
cvtColor(image, imageHSV, COLOR_RGB2HSV);
calcBackProject(&imageHSV, 2, channels, dstHist, calcBackImage, &histRange); //反向投影
TermCriteria criteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.001);
//meanShift(calcBackImage, rect, criteria);
CamShift(calcBackImage, rect, criteria); //和meanShift类似,窗口可以自动调整大小
rectangle(image, rect, Scalar(255, 0, 0), 3); //目标绘制
pt.push_back(Point(rect.x+rect.width/2, rect.y+rect.height/2));
for(int i=0; i<pt.size()-1; i++) {
line(image, pt[i], pt[i+1], Scalar(0,255,0), 2.5); //运动轨迹
}
}
imshow("跟踪木头人", image);
char c = waitKey(pauseTime);
if(c == '2') {
break;
}
}
}
bool ChessboardStable(vector<Point2f> corners_l, vector<Point2f> corners_r)
{
static vector<vector<Point2f>> vCornersLeft, vCornersRight;
static int iPicIndex = 0;
if (vCornersLeft.size() < 10) {
vCornersLeft.push_back(corners_l);
vCornersRight.push_back(corners_r);
return false;
}
else{
vCornersLeft[iPicIndex % 10] = corners_l;
vCornersRight[iPicIndex % 10] = corners_r;
iPicIndex++;
double error = 0.0;
for(int i = 0; i < vCornersLeft.size(); i++) {
for(int j = 0; j < vCornersLeft[i].size(); j++) {
error += abs(corners_l[j].x - vCornersLeft[i][j].x) + abs(corners_l[j].y - vCornersLeft[i][j].y);
error += abs(corners_r[j].x - vCornersRight[i][j].x) + abs(corners_r[j].y - vCornersRight[i][j].y);
}
}
if (error < 1000) {
vCornersLeft.clear();
vCornersRight.clear();
iPicIndex = 0;
return true;
}
else {
return false;
}
}
}
/* 立体匹配 */
void VideoDemo::test12()
{
VideoCapture camera_l, camera_r;
Mat frame_l, frame_r;
int cont = 0;
camera_l.open("../x64/Debug/video/9_L.mp4");
camera_r.open("../x64/Debug/video/9_R.mp4");
//需要双目相机
/*while (frame_l.rows < 2){
camera_l.open("../x64/Debug/video/7.mp4");
camera_l.set(CAP_PROP_FOURCC, 'GPJM');
camera_l.set(CAP_PROP_FRAME_WIDTH, 320);
camera_l.set(CAP_PROP_FRAME_HEIGHT, 240);
cont = 0;
while (frame_l.rows < 2 && cont < 5){
camera_l >> frame_l;
cont++;
}
}
while (frame_r.rows < 2){
camera_r.open("../x64/Debug/video/7.mp4");
camera_r.set(CAP_PROP_FOURCC, 'GPJM');
camera_r.set(CAP_PROP_FRAME_WIDTH, 320);
camera_r.set(CAP_PROP_FRAME_HEIGHT, 240);
cont = 0;
while (frame_r.rows < 2 && cont < 5){
camera_r >> frame_r;
cont++;
}
}*/
Size boardSize(7, 7);
const float squareSize = 26.f; //将其设置为实际的正方形大小
vector<vector<Point2f>> imagePoints_l;
vector<vector<Point2f>> imagePoints_r;
vector<vector<Point3f> > objectPoints;
int nimages = 0;
while (true) {
camera_l >> frame_l;
camera_r >> frame_r;
/*frame_l = imread("../x64/Debug/picture/37.jpg", IMREAD_COLOR); //没有视频,一张棋盘图片也可以
frame_r = imread("../x64/Debug/picture/37_1.jpg", IMREAD_COLOR);*/
if (frame_l.empty() || frame_r.empty()) {
continue;
}
bool found_l = false, found_r = false;
vector<Point2f> corners_l, corners_r;
found_l = findChessboardCorners(frame_l, boardSize, corners_l, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
found_r = findChessboardCorners(frame_r, boardSize, corners_r, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
if (found_l && found_r && ChessboardStable(corners_l, corners_r)) {
Mat viewGray;
cvtColor(frame_l, viewGray, COLOR_BGR2GRAY);
cornerSubPix(viewGray, corners_l, Size(11, 11), Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
cvtColor(frame_r, viewGray, COLOR_BGR2GRAY);
cornerSubPix(viewGray, corners_r, Size(11, 11), Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
imagePoints_l.push_back(corners_l);
imagePoints_r.push_back(corners_r);
++nimages;
frame_l += 100;
frame_r += 100;
if (nimages >= 30) {
break;
}
}
drawChessboardCorners(frame_l, boardSize, corners_l, found_l);
drawChessboardCorners(frame_r, boardSize, corners_r, found_r);
putText(frame_l, to_string(nimages), Point(20, 20), 1, 1, Scalar(0, 0, 255));
putText(frame_r, to_string(nimages), Point(20, 20), 1, 1, Scalar(0, 0, 255));
imshow("左相机", frame_l);
imshow("右相机", frame_r);
char c = waitKey(20);
if(c == '2') {
break;
}
}
if (nimages < 20) {
cout << "Not enough" << endl;
return;
}
vector<vector<Point2f>> imagePoints[2] = {imagePoints_l, imagePoints_r};
objectPoints.resize(nimages);
for(int i = 0; i < nimages; i++) {
for(int j = 0; j < boardSize.height; j++) {
for(int k = 0; k < boardSize.width; k++) {
objectPoints[i].push_back(Point3f(k*squareSize, j*squareSize, 0));
}
}
}
cout << "运行立体校准 ..." << endl;
Size imageSize(320, 240);
Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = initCameraMatrix2D(objectPoints, imagePoints_l, imageSize, 0);
cameraMatrix[1] = initCameraMatrix2D(objectPoints, imagePoints_r, imageSize, 0);
Mat R, T, E, F; //图像尺寸 旋转矩阵 平移矩阵 本帧矩阵 基础矩阵
int flags = CALIB_FIX_ASPECT_RATIO + CALIB_ZERO_TANGENT_DIST + CALIB_USE_INTRINSIC_GUESS +
CALIB_SAME_FOCAL_LENGTH + CALIB_RATIONAL_MODEL + CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5;
auto pTerm = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, 1e-5);
double rms = stereoCalibrate(objectPoints, imagePoints_l, imagePoints_r, cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1], imageSize, R, T, E, F, flags, pTerm); //插值亚像素角点
cout << "完成了RMS error:" << rms << endl;
double err = 0;
int npoints = 0;
//计算极线向量
vector<Vec3f> lines[2]; //极线
for(int i = 0; i < nimages; i++) {
//左某图所有角点数量
int npt = (int)imagePoints_l[i].size();
Mat imgpt[2];
imgpt[0] = Mat(imagePoints_l[i]);
undistortPoints(imgpt[0], imgpt[0], cameraMatrix[0], distCoeffs[0], Mat(), cameraMatrix[0]);
computeCorrespondEpilines(imgpt[0], 0 + 1, F, lines[0]);
imgpt[1] = Mat(imagePoints_r[i]); //某图的角点向量矩阵
undistortPoints(imgpt[1], imgpt[1], cameraMatrix[1], distCoeffs[1], Mat(), cameraMatrix[1]); //计算校正后的角点坐标
computeCorrespondEpilines(imgpt[1], 1 + 1, F, lines[1]); //计算极线
for(int j = 0; j < npt; j++) {
double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] + imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
fabs(imagePoints[1][i][j].x*lines[0][j][0] + imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
err += errij;
}
npoints += npt;
}
cout << "平均极差 : " << err / npoints << endl;
cout << "M1" << cameraMatrix[0] << endl << "D1" << distCoeffs[0] << endl << "M2" << cameraMatrix[1] << endl << "D2" << distCoeffs[1] << endl;
Mat R1, R2, P1, P2, Q; //计算外参数
Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0], cameraMatrix[1], distCoeffs[1], imageSize,
R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
cout << "R" << R << endl << "T" << T << endl << "R1" << R1 << endl << "R2" << R2 << endl << "P1" << P1 << endl << "P2" << P2 << endl << "Q" << Q << endl;
}
/* hartley算法 */
void VideoDemo::test13()
{
Size boardSize(7, 7);
string imagelistfn;
bool showRectified = true;
vector<string> imagelist;
for(int i=1; i<=32; i++) {
imagelist.push_back("../x64/Debug/picture/test/" + to_string(i) + ".png");
}
if( imagelist.size() % 2 != 0 ) {
cout << "错误:图像列表包含奇数(非偶数)个元素\n";
return;
}
const int maxScale = 2;
const float squareSize = 1.f; //将其设置为实际的正方形大小
vector<vector<Point2f>> imagePoints[2];
vector<vector<Point3f>> objectPoints;
Size imageSize;
int i, j, k, nimages = imagelist.size() / 2;
imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
vector<string> goodImageList;
for(i = j = 0; i < nimages; i++) {
for(k = 0; k < 2; k++) { //默认两张为一组
auto strName = imagelist[i * 2 + k];
Mat img = imread(strName, IMREAD_COLOR);
if(img.empty()) {
break;
}
imageSize = img.size();
bool found = false;
vector<Point2f> &corners = imagePoints[k][j];
for(int scale = 1; scale <= maxScale; scale++) {
Mat timg;
timg = img.clone();
found = findChessboardCorners(timg, boardSize, corners, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
if(found) {
break;
}
}
cout << strName << endl;
Mat cimg;
cimg = img.clone();
drawChessboardCorners(cimg, boardSize, corners, found);
imshow("corners", cimg);
char c = waitKey(20);
if(c == '2') {
break;
}
auto pTerm = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 30, 0.01);
cvtColor(img, img, COLOR_BGR2GRAY);
cornerSubPix(img, corners, Size(11,11), Size(-1,-1), pTerm);
}
if(k == 2) {
goodImageList.push_back(imagelist[i*2]);
goodImageList.push_back(imagelist[i*2+1]);
j++;
}
}
nimages = j;
if(nimages < 2) {
cout << "错误:配对太少,无法运行校准\n";
return;
}
imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
objectPoints.resize(nimages);
for(i = 0; i < nimages; i++) {
for(j = 0; j < boardSize.height; j++)
for(k = 0; k < boardSize.width; k++)
objectPoints[i].push_back(Point3f(k*squareSize, j*squareSize, 0));
}
cout << "运行立体校准 ...\n";
Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = Mat::eye(3, 3, CV_64F);
cameraMatrix[1] = Mat::eye(3, 3, CV_64F);
Mat R, T, E, F;
int flags = CALIB_FIX_ASPECT_RATIO + CALIB_ZERO_TANGENT_DIST + CALIB_SAME_FOCAL_LENGTH +
CALIB_RATIONAL_MODEL + CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5;
auto pTerm = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 100, 1e-5);
double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1], cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1], imageSize, R, T, E, F, flags, pTerm);
cout << "完成了RMS error=" << rms << endl;
//校准质量检查由于输出基本矩阵隐式地包含所有输出信息,可以使用极极几何约束:m2^t*F*m1=0来检查校准质量
double err = 0;
int npoints = 0;
vector<Vec3f> lines[2];
for(i = 0; i < nimages; i++) {
int npt = (int)imagePoints[0][i].size();
Mat imgpt[2];
for(k = 0; k < 2; k++) {
imgpt[k] = Mat(imagePoints[k][i]);
undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
}
for(j = 0; j < npt; j++) {
double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] + imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
fabs(imagePoints[1][i][j].x*lines[0][j][0] + imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
err += errij;
}
npoints += npt;
}
cout << "平均重投影误差 = " << err/npoints << endl;
cout << "M1" << cameraMatrix[0] << endl << "D1" << distCoeffs[0] << endl << "M2" << cameraMatrix[1] << endl << "D2" << distCoeffs[1] << endl;
Mat R1, R2, P1, P2, Q;
Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0], cameraMatrix[1], distCoeffs[1], imageSize,
R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
cout << "R" << R << endl << "T" << T << endl << "R1" << R1 << endl << "R2" << R2 << endl << "P1" << P1 << endl << "P2" << P2 << endl << "Q" << Q << endl;
//OpenCV可以处理左右或上下相机安排
bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));
//计算和显示整流
if(!showRectified)
return;
//hartley算法,使用每个相机的固有参数,但直接从基本矩阵计算整流变换
vector<Point2f> allimgpt[2];
for(k = 0; k < 2; k++) {
for(i = 0; i < nimages; i++)
std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k]));
}
F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
Mat H1, H2;
stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3); //没有此函数,凹凸严重
R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
P1 = cameraMatrix[0];
P2 = cameraMatrix[1];
Mat rmap[2][2];
initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]); //为cv::remap()预先计算映射
initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
Mat canvas;
double sf;
int w, h;
if(!isVerticalStereo) { //防止图像太大屏幕展示不了
sf = 600.0 / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width * sf);
h = cvRound(imageSize.height * sf);
canvas.create(h, w*2, CV_8UC3);
}
else {
sf = 300.0 / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width * sf);
h = cvRound(imageSize.height * sf);
canvas.create(h*2, w, CV_8UC3);
}
for(i = 0; i < nimages; i++) {
for(k = 0; k < 2; k++) {
Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);
imshow("单目相机校正", rimg);
waitKey(1000);
cvtColor(rimg, cimg, COLOR_GRAY2BGR);
Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
resize(cimg, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);
}
if(!isVerticalStereo) {
for(j = 0; j < canvas.rows; j += 16)
line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
}
else {
for(j = 0; j < canvas.cols; j += 16)
line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
}
imshow("双目相机校正对齐", canvas);
char c = waitKey(20);
if(c == '2') {
break;
}
}
}
/* 人脸识别 */
void VideoDemo::test14()
{
string face_cascade_name = "E:/opencv/sources/data/haarcascades/haarcascade_frontalface_alt.xml";
string eyes_cascade_name = "E:/opencv/sources/data/haarcascades/haarcascade_eye.xml";
string mouth_cascade_name = "E:/opencv/sources/data/haarcascades/haarcascade_mcs_mouth.xml";
VideoCapture videoCap("../x64/Debug/video/12.mp4");
CascadeClassifier faceCascade;
CascadeClassifier eyesCascade;
CascadeClassifier mouthCascade;
// 加载脸部分类器文件
if (!faceCascade.load(face_cascade_name)) {
cout << "load face_cascade_name failed. " << endl;
return;
}
// 加载眼睛部分分类器文件
if (!eyesCascade.load(eyes_cascade_name)) {
cout << "load eyes_cascade_name failed. " << endl;
return;
}
// 加载嘴部分类器文件
if (!mouthCascade.load(mouth_cascade_name)) {
cout << "load mouth_cascade_name failed. " << endl;
return;
}
while(1){
Mat frame;
videoCap.read(frame);
if (frame.empty()) {
videoCap.release();
return;
}
vector<Rect> faces;
Mat gray;
cvtColor(frame, gray, COLOR_BGR2GRAY);
equalizeHist(gray, gray);
// 检测人脸
faceCascade.detectMultiScale(gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
for (int i = 0; i < faces.size(); i++) {
// 用椭圆画出人脸部分
Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
ellipse(frame, center, Size(faces[i].width / 2, faces[i].height / 2), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);
Mat faceROI = frame(faces[i]);
vector<Rect> eyes;
// 检测眼睛
eyesCascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
for (int j = 0; j < eyes.size(); j++) {
// 用圆画出眼睛部分
Point eye_center(faces[i].x + eyes[j].x + eyes[j].width / 2, faces[i].y + eyes[j].y + eyes[j].height / 2);
int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
circle(frame, eye_center, radius, Scalar(255, 0, 0), 4, 8, 0);
}
Mat mouthROI = frame(faces[i]);
vector<Rect> mouth;
// 检测嘴部
mouthCascade.detectMultiScale(mouthROI, mouth, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
for (int k = 0; k < mouth.size(); k++) {
//用长方形画出嘴部
Rect rect(faces[i].x + mouth[k].x, faces[i].y + mouth[k].y, mouth[k].width, mouth[k].height);
rectangle(frame, rect, Scalar(0, 255, 0), 2, 8, 0);
}
// 检测到两个眼睛和一个嘴巴, 可认为检测到有效人脸
if (eyes.size() == 2 && mouth.size() == 1) {
// 人脸上方区域写字进行标识
Point centerText(faces[i].x + faces[i].width / 2 - 40, faces[i].y - 20);
putText(frame, "correct", centerText, FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255), 2);
}
else {
Point centerText(faces[i].x + faces[i].width / 2 - 40, faces[i].y - 20);
putText(frame, "mistake", centerText, FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255), 2);
}
}
imshow("face", frame);
//等待回车键按下退出程序
if (waitKey(5) == 13) {
destroyAllWindows();
return;
}
}
}
/* 运动信息结构体 */
struct TransformParam
{
TransformParam() {}
//x轴信息,y轴信息,角度信息
TransformParam(double _dx, double _dy, double _da) {
dx = _dx;
dy = _dy;
da = _da;
}
double dx;
double dy;
double da; //角度
void getTransform(Mat &T)
{
//根据新值重构变换矩阵
T.at<double>(0, 0) = cos(da);
T.at<double>(0, 1) = -sin(da);
T.at<double>(1, 0) = sin(da);
T.at<double>(1, 1) = cos(da);
T.at<double>(0, 2) = dx;
T.at<double>(1, 2) = dy;
}
};
/* 轨迹结构体 */
struct Trajectory
{
Trajectory() {}
Trajectory(double _x, double _y, double _a)
{
x = _x;
y = _y;
a = _a;
}
double x;
double y;
double a; //角
};
/*轨迹累积
* transforms 运动信息结构体
* vector<Trajectory> 轨迹结构体 */
vector<Trajectory> cumsum(vector<TransformParam> &transforms)
{
//所有帧的运动轨迹
vector<Trajectory> trajectory;
//累积帧到帧变换,累加计算x,y以及a(角度)
double a = 0;
double x = 0;
double y = 0;
//累加
for (size_t i = 0; i < transforms.size(); i++) {
x += transforms[i].dx;
y += transforms[i].dy;
a += transforms[i].da;
trajectory.push_back(Trajectory(x, y, a));
}
return trajectory;
}
/**平滑运动轨迹
* @param trajectory 运动轨迹
* @param radius 窗格大小
* @return vector<Trajectory> */
vector<Trajectory> smooth(vector<Trajectory> &trajectory, int radius)
{
//平滑后的运动轨迹
vector<Trajectory> smoothed_trajectory;
//移动滑动窗格
for (size_t i = 0; i < trajectory.size(); i++) {
double sum_x = 0;
double sum_y = 0;
double sum_a = 0;
int count = 0;
for (int j = -radius; j <= radius; j++) {
if (i + j >= 0 && i + j < trajectory.size()) {
sum_x += trajectory[i + j].x;
sum_y += trajectory[i + j].y;
sum_a += trajectory[i + j].a;
count++;
}
}
double avg_a = sum_a / count;
double avg_x = sum_x / count;
double avg_y = sum_y / count;
smoothed_trajectory.push_back(Trajectory(avg_x, avg_y, avg_a));
}
return smoothed_trajectory;
}
/* 基于特征点匹配的视频稳像(效果不好) */
void VideoDemo::test15()
{
VideoCapture cap("../x64/Debug/video/16.mp4");
int n_frames = int(cap.get(CAP_PROP_FRAME_COUNT));
int w = int(cap.get(CAP_PROP_FRAME_WIDTH));
int h = int(cap.get(CAP_PROP_FRAME_HEIGHT));
double fps = cap.get(CAP_PROP_FPS);
//定义存储帧的相关变量
Mat curr, curr_gray; //当前帧RGB图像和灰度图
Mat prev, prev_gray; //前一帧RGB图像和灰度图
cap >> prev;
cvtColor(prev, prev_gray, COLOR_BGR2GRAY);
vector<TransformParam> transforms; //仿射变化参数结构体
Mat last_T; //上一张图像的仿射矩阵
for (int i = 1; i < n_frames; i++) {
vector<Point2f> prev_pts, curr_pts; //前一帧角点vector,当前帧角点vector
goodFeaturesToTrack(prev_gray, prev_pts, 200, 0.01, 30); //获取前一帧的角点
bool success = cap.read(curr);
if (!success) {
break;
}
cvtColor(curr, curr_gray, COLOR_BGR2GRAY);
//光流法追寻特征点
vector<uchar> status; //输出状态矢量(元素是无符号char类型,uchar),如果在当前帧发现前一帧角点特征则置为1,否则,为0
vector<float> err; //输出误差矢量
calcOpticalFlowPyrLK(prev_gray, curr_gray, prev_pts, curr_pts, status, err);
//获取光流跟踪下有效的角点
auto prev_it = prev_pts.begin();
auto curr_it = curr_pts.begin();
for (size_t k = 0; k < status.size(); k++) {
if (status[k]) {
prev_it++;
curr_it++;
}
else { //删除无效角点
prev_it = prev_pts.erase(prev_it);
curr_it = curr_pts.erase(curr_it);
}
}
//获得变换矩阵
Mat T = estimateRigidTransform(prev_pts, curr_pts, false); //false表示带几何约束的仿射变换,true则是全仿射变化,T为变换矩阵
//极少数情况会找不到变换矩阵,取上一个变换为当前变化矩阵,当然第一次检测就没找到仿射矩阵,算法会出问题,不过概率很低
if (T.data == NULL) {
last_T.copyTo(T);
}
T.copyTo(last_T);
double dx = T.at<double>(0, 2); //提取仿射变化结果
double dy = T.at<double>(1, 2);
double da = atan2(T.at<double>(1, 0), T.at<double>(0, 0)); //提取角度
transforms.push_back(TransformParam(dx, dy, da)); //存储仿射变化矩阵
curr_gray.copyTo(prev_gray); //进行下一次检测准测
cout << "Frame: " << i << "/" << n_frames << " - Tracked points : " << prev_pts.size() << endl;
}
vector<Trajectory> trajectory = cumsum(transforms); //使用累积变换和计算轨迹
int SMOOTHING_RADIUS = 50;
vector<Trajectory> smoothed_trajectory = smooth(trajectory, SMOOTHING_RADIUS); //获取平滑后的轨迹
vector<TransformParam> transforms_smooth; //平滑后的运动信息结构体
for (size_t i = 0; i < transforms.size(); i++) { //原始运动信息结构体
//计算平滑后的轨迹和原始轨迹差异
double diff_x = smoothed_trajectory[i].x - trajectory[i].x;
double diff_y = smoothed_trajectory[i].y - trajectory[i].y;
double diff_a = smoothed_trajectory[i].a - trajectory[i].a;
//计算平滑后的运动信息结构体数据
double dx = transforms[i].dx + diff_x;
double dy = transforms[i].dy + diff_y;
double da = transforms[i].da + diff_a;
transforms_smooth.push_back(TransformParam(dx, dy, da));
}
cap.set(CAP_PROP_POS_FRAMES, 0); //定位当前帧为第1帧
Mat T(2, 3, CV_64F);
Mat frame, frame_stabilized, frame_out; //平滑后的变化矩阵
cap.read(frame); //跳过第一帧
for (int i = 0; i < n_frames - 1; i++) { //对所有帧进行变化得到稳像结果
bool success = cap.read(frame);
if (!success) {
break;
}
transforms_smooth[i].getTransform(T); //提取平滑后的仿射变化矩阵
warpAffine(frame, frame_stabilized, T, frame.size()); //应用仿射变化
//去除黑边
Mat T = getRotationMatrix2D(Point2f(frame_stabilized.cols / 2, frame_stabilized.rows / 2), 0, 1.04); //将原图扩大为1.04倍,然后截取原图尺寸相等大小区域
warpAffine(frame_stabilized, frame_stabilized, T, frame_stabilized.size()); //仿射变换
hconcat(frame, frame_stabilized, frame_out); //将原图和变化后的图横向排列输出到视频
if (frame_out.cols > 1920) { //如果图像太大,请调整其大小
resize(frame_out, frame_out, Size(frame_out.cols / 2, frame_out.rows / 2));
}
imshow("Before and After", frame_out);
cout << "out frame:" << i << endl;
waitKey(10);
}
cap.release();
}
/* 视频中实现简单背景估计 */
void VideoDemo::test16()
{
string video_file = "../x64/Debug/video/7.mp4";
VideoCapture cap(video_file);
//随机选取25帧图像
default_random_engine generator;
uniform_int_distribution<int> distribution(0, cap.get(CAP_PROP_FRAME_COUNT));
vector<Mat> vFrames;
Mat frame;
//随机从视频片段中挑选25张图像
for (int i = 0; i < 25; i++) {
int fid = distribution(generator); //获取序号
cap.set(CAP_PROP_POS_FRAMES, fid);
Mat frame;
cap >> frame;
if (frame.empty()) {
continue;
}
vFrames.push_back(frame);
}
//中值图像
Mat medianImg(vFrames[0].rows, vFrames[0].cols, CV_8UC3, Scalar(0, 0, 0));
for (int row = 0; row < vFrames[0].rows; row++) {
for (int col = 0; col < vFrames[0].cols; col++) {
vector<int> elements_B;
vector<int> elements_G;
vector<int> elements_R;
for (int i = 0; i < vFrames.size(); i++) { //提取当前点BGR值
int B = vFrames[i].at<Vec3b>(row, col)[0];
int G = vFrames[i].at<Vec3b>(row, col)[1];
int R = vFrames[i].at<Vec3b>(row, col)[2];
elements_B.push_back(B);
elements_G.push_back(G);
elements_R.push_back(R);
}
//计算中值
nth_element(elements_B.begin(), elements_B.begin() + elements_B.size() / 2, elements_B.end());
medianImg.at<cv::Vec3b>(row, col)[0] = elements_B[elements_B.size() / 2];
nth_element(elements_G.begin(), elements_G.begin() + elements_G.size() / 2, elements_G.end());
medianImg.at<cv::Vec3b>(row, col)[1] = elements_G[elements_G.size() / 2];
nth_element(elements_R.begin(), elements_R.begin() + elements_R.size() / 2, elements_R.end());
medianImg.at<cv::Vec3b>(row, col)[2] = elements_R[elements_R.size() / 2];
}
}
//显示中值图像帧
imshow("中值图", medianImg);
cap.set(CAP_PROP_POS_FRAMES, 0);
Mat grayMedianFrame;
cvtColor(medianImg, grayMedianFrame, COLOR_BGR2GRAY);
while (1) {
cap >> frame;
if (frame.empty())
{
break;
}
cvtColor(frame, frame, COLOR_BGR2GRAY);
Mat dframe;
absdiff(frame, grayMedianFrame, dframe); //差分
threshold(dframe, dframe, 30, 255, THRESH_BINARY); //二值化
imshow("frame", dframe);
waitKey(20);
}
cap.release();
}
opencv自测程序(视频函数)
于 2023-09-18 10:18:00 首次发布