由于疫情的影响,一系列的非接触设备的到应用,如无人消毒机器人、红外测温。除了体温,心率也是衡量人体状况的重要指标。心率可以通过颈动脉、桡动脉还有皮肤颜色来进行提取。2012年,Wu等人基于欧拉视频放大(EVM)技术设计了一个非接触式心率检测系统在相机和目标人物本身没有明显晃动的情况下,且光照条件良好时,该系统测得心率的结果,能够获得临床级别的准确度。之后又有学者提出了基于相位的微小运动放大算法。近年来传统的运动放大技术还是EVM和PVM,自动识别和视觉增强方面的发展和探索空间还比较大。本文运用欧拉运动放大技术的原理,以增强胸骨上窝运动来获取心率。主要是通过非接触式拍摄一段视频,运用EVM放大运动,再提取心跳信号并分析得出心率。
先看一下效果吧
心率图
放大胸骨上窝
本程序完全开源,网盘链接:https://pan.baidu.com/s/1XU0IhnPIKWYzQQN6bPG8pg
提取码:wj4k
如果对大家有用,希望可以给我的码云点个star~(github又被墙了)项目地址
那么什么是欧拉运动放大呢?简单的来说是在图像金字塔上进行操作,求出相邻两帧的图像变化,然后进行IIR滤波,还原通道,再进行放大和衰减。
由于处理域为YIQ,所以需要先将图像由RGB变换到YIQ,函数如下
Matx33f rgb2yiq_mat(0.299f, 0.587f, 0.114f,
0.596f, -0.274f, -0.322f,
0.211f, -0.523f, 0.312f);
Matx33f yiq2rgb_mat(1.0f, 0.956f, 0.621f,
1.0f, -0.272f, -0.647f,
1.0f, -1.106f, 1.703f);
//RGB2YIQ
Mat rgb2yiq(const Mat& img) {
Mat img_out(img.size(), img.type());
img_out = img.clone();
//img_out.convertTo(img_out, CV_32F);
for (int j = 0; j < img.rows; j++) {
for (int i = 0; i < img.cols * 3; i += 3) {
Vec3f pixel(img_out.at<float>(j, i + 2), img_out.at<float>(j, i + 1), img_out.at<float>(j, i));
pixel = rgb2yiq_mat * pixel;
for (int k = 0; k < 3; k++) {
// if(pixel[k] > 255.0f) pixel[k] = 255.0f;
// else if(pixel[k] < 0.0f) pixel[k] = 0.0f;
img_out.at<float>(j, i + 2 - k) = pixel[k];
}
}
}
// img_out.convertTo(img_out, CV_8UC3);
return img_out;
}
//YIQ2RGB
Mat yiq2rgb(const Mat& img) {
Mat img_out(img.size(), img.type());
img_out = img.clone();
//img_out.convertTo(img_out, CV_32F);
for (int j = 0; j < img.rows; j++) {
for (int i = 0; i < img.cols * 3; i += 3) {
Vec3f pixel(img_out.at<float>(j, i + 2), img_out.at<float>(j, i + 1), img_out.at<float>(j, i));
pixel = yiq2rgb_mat * pixel;
for (int k = 0; k < 3; k++) {
if (pixel[k] > 255.0) pixel[k] = 255.0;
else if (pixel[k] < 0.0) pixel[k] = 0.0;
img_out.at<float>(j, i + 2 - k) = pixel[k];
}
}
}
//img_out.convertTo(img_out, CV_8UC3);
return img_out;
}
然后构建拉普拉斯金字塔
bool laplacianpyramid(Mat img, int levels, vector<Mat_<Vec3f>>& pyramid)
{
if (levels < 1)
{
cout << "level设置错误" << endl;
return false;
}
else
{
Mat img1 = img.clone();
for (int i = 0; i < levels - 1; i++)
{
Mat up, down;
pyrDown(img1, down);
pyrUp(down, up, img1.size());
pyramid.push_back(img1 - up);
img1 = down;
}
pyramid.push_back(img1);
}
}
然后进行IIR滤波 注意代码中有一系列的文件操作,需要将这些文件代码进行注释(我是由于vector的copy导致数据出错,然后这样找bug的)
void IIR( vector<Mat_<Vec3f>>&src, vector<Mat_<Vec3f>>& dst, vector<Mat_<Vec3f>>& low, vector<Mat_<Vec3f>>& high)
{
double fl = 0.05;
double fh = 0.4;
vector<Mat_<Vec3f>> low1, high1, dst1;
for (int i = 0; i < src.size(); i++)
{
low1.push_back((1 - fl) * low[i] + fl * src[i]);
high1.push_back((1 - fh) * high[i] + fh * src[i]);
dst1.push_back(high[i]-low[i]);
}
low.assign(low1.begin(), low1.end());
high.assign(high1.begin(), high1.end());
dst.assign(dst1.begin(), dst1.end());
ofstream file;
file.open("dst.txt");
for (int i = 0; i < dst[3].rows; i++)
{
for (int j = 0; j < dst[3].cols; j++)
{
file << float(dst[3].at<Vec3f>(i, j)[2]) << " ";
}
file << endl;
}
file.close();
file.open("src.txt");
for (int i = 0; i < dst[3].rows; i++)
{
for (int j = 0; j < dst[3].cols; j++)
{
file << src[3].at<Vec3f>(i, j)[2] << " ";
}
file << endl;
}
file.close();
file.open("low.txt");
int num = 3;
for (int i = 0; i < dst[num].rows; i++)
{
for (int j = 0; j < dst[num].cols; j++)
{
file << low[num].at<Vec3f>(i, j)[2] << " ";
}
file << endl;
}
file.close();
file.open("high.txt");
for (int i = 0; i < dst[num].rows; i++)
{
for (int j = 0; j < dst[num].cols; j++)
{
file << high[num].at<Vec3f>(i, j)[2] << " ";
}
file << endl;
}
file.close();
}
进行完这些操作后,就需要恢复图像并进行放大处理
//恢复源图像并进行放大
void returnsrc(vector<Mat_<Vec3f>>& src, Mat& dst,double alpha,double lambda_c)
{
double lambda = sqrtf(src[0].cols * src[0].cols + src[0].rows * src[0].rows) / 3;
int ext = 2;
double delta;
delta = lambda_c / 8 / (1 + alpha);
vector<Mat_<Vec3f>>temp;
for (int i = 0; i < src.size(); i++)
{
double curralpha = lambda / delta / 8 - 1;
curralpha = curralpha * ext;
if ((i == 0) || (i == src.size() - 1))
{
src[i] = 0;
}
else if (curralpha > alpha)
{
src[i] = src[i]*alpha;
}
else
{
src[i] = src[i] *curralpha;
}
lambda = lambda / 2;
}
Mat up= src[src.size()-1];
for (int i = 0; i < src.size()-1; i++)
{
pyrUp(up, up);
up = up + src[src.size() - 2-i];
}
Mat planes[3];
split(up, planes);
planes[0] = planes[0] * 0.1;
planes[1] = planes[1] * 0.1;
merge(planes, 3, up);
//Mat result = yiq2rgb(up);
//result.convertTo(result, CV_8UC3, 255.0, 1.0 / 255.0);
dst = up.clone();
}
这样通过两帧就可以得到一帧放大后的图像
但是怎么得到心率呢?
一种简单的方法是获取ROI,并求响应;另外一种是独立成分分析ICA,这种我现在还没有搞明白,先不用。下面介绍ROI的响应并求出周期
我对输入图像进行了裁剪,大小为512*512,30fps
获取ROI 代码中的cost就是代价
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
Mat imgrio = img(Rect(i*32,j*32,32,32));
cost[flag - 1][j + i*16] = count(imgrio);
}
}
float count(Mat& src)
{
Mat a;
//a = src.clone();
cvtColor(src, a, CV_RGB2GRAY);
a.convertTo(a, CV_32FC1);
float sum = 0;
for (int i = 0; i < src.rows; i++)
{
for (int j = 0; j < src.cols; j++)
{
sum += a.at<float>(i, j);
}
}
return sum/32/32;
}
然后你需要找到响应值最大的,可以采用冒泡排序,下面的为升序,find函数是找到每个ROI随时间的响应最大
void BubbleSort(float* p, int length, int* ind_diff)
{
for (int m = 0; m < length; m++)
{
ind_diff[m] = m;
}
for (int i = 0; i < length; i++)
{
for (int j = 0; j < length - i - 1; j++)
{
if (p[j] > p[j + 1])
{
float temp = p[j];
p[j] = p[j + 1];
p[j + 1] = temp;
int ind_temp = ind_diff[j];
ind_diff[j] = ind_diff[j + 1];
ind_diff[j + 1] = ind_temp;
}
}
}
}
int find(float X[210][256])
{
float average[256];
float s[256];
int ind[256] = { 0 };
for (int j = 0; j < 256; j++)
{
average[j] = 0;
s[j] = 0;
for (int i = 0; i < 210; i++)
{
average[j] += X[i][j];
}
average[j] /= 210;
for (int i = 0; i < 210; i++)
{
s[j] += (X[i][j]-average[j])*(X[i][j] - average[j]);
}
}
BubbleSort(s, 256, ind);
for (int i = 0; i < 256; i++)
{
cout << "value: " << s[i] << "Index: " << ind[i] << endl;
}
//return distance(s, max_element(s, s + sizeof(s) / sizeof(s[0])));
return ind[255];
}
这样我们的任务基本上结束了 全部代码如下,其中需要自建pdi文件
pdi.h
#ifndef PDI_H
#define PDI_H
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <algorithm>
using namespace cv;
Mat filter(const Mat& img, const Mat& mask);
Mat filterRGB(const Mat& img, const Mat& mask);
Mat filterGray(const Mat& img, const Mat& mask);
Mat rgb2yiq2rgb(const Mat& img);
Mat rgb2yiq(const Mat& img);
Mat yiq2rgb(const Mat& img);
Mat split(const Mat& img, int channel, bool mono = false);
Mat merge(const Mat& img_R, const Mat& img_G, const Mat& img_B);
uchar mean_y_uchar(const Mat& img);
float mean_y_float(const Mat& img);
Mat negative(const Mat& img, bool rgb = true);
Mat negative_rgb(const Mat& img);
Mat negative_y(const Mat& img);
Mat addBrightness(const Mat& img, int c);
Mat mulBrightness(const Mat& img, float c, bool rgb = true);
Mat mulBrightness_rgb(const Mat& img, float c);
Mat mulBrightness_y(const Mat& img, float c);
Mat thresholding(const Mat& img, uchar threshold, int type, bool mean = true);
Mat thresholdingGray(const Mat& img, uchar threshold, int type, bool mean = true);
Mat thresholdingYIQ(const Mat& img, float threshold, int type, bool mean = true);
Mat sobelFilterVer(const Mat& img);
Mat sobelFilterHor(const Mat& img);
Mat sobelFilter(const Mat& img);
Mat laplacianFilterVer(const Mat& img);
Mat laplacianFilterHor(const Mat& img);
Mat laplacianFilter(const Mat& img);
Mat noiseSaltPepper(const Mat& img, float density);
Mat noiseSaltPepperMono(const Mat& img, float density);
Mat noiseSaltPepperMono(const Mat& img, float density);
#endif
pdi.cpp
#include "pdi.hpp"
Matx33f rgb2yiq_mat(0.299f, 0.587f, 0.114f,
0.596f, -0.274f, -0.322f,
0.211f, -0.523f, 0.312f);
Matx33f yiq2rgb_mat(1.0f, 0.956f, 0.621f,
1.0f, -0.272f, -0.647f,
1.0f, -1.106f, 1.703f);
// 1.1
Mat rgb2yiq2rgb(const Mat& img) {
return yiq2rgb(rgb2yiq(img));
}
Mat rgb2yiq(const Mat& img) {
Mat img_out(img.size(), img.type());
img_out = img.clone();
//img_out.convertTo(img_out, CV_32F);
for (int j = 0; j < img.rows; j++) {
for (int i = 0; i < img.cols * 3; i += 3) {
Vec3f pixel(img_out.at<float>(j, i + 2), img_out.at<float>(j, i + 1), img_out.at<float>(j, i));
pixel = rgb2yiq_mat * pixel;
for (int k = 0; k < 3; k++) {
// if(pixel[k] > 255.0f) pixel[k] = 255.0f;
// else if(pixel[k] < 0.0f) pixel[k] = 0.0f;
img_out.at<float>(j, i + 2 - k) = pixel[k];
}
}
}
// img_out.convertTo(img_out, CV_8UC3);
return img_out;
}
Mat yiq2rgb(const Mat& img) {
Mat img_out(img.size(), img.type());
img_out = img.clone();
//img_out.convertTo(img_out, CV_32F);
for (int j = 0; j < img.rows; j++) {
for (int i = 0; i < img.cols * 3; i += 3) {
Vec3f pixel(img_out.at<float>(j, i + 2), img_out.at<float>(j, i + 1), img_out.at<float>(j, i));
pixel = yiq2rgb_mat * pixel;
for (int k = 0; k < 3; k++) {
if (pixel[k] > 255.0) pixel[k] = 255.0;
else if (pixel[k] < 0.0) pixel[k] = 0.0;
img_out.at<float>(j, i + 2 - k) = pixel[k];
}
}
}
//img_out.convertTo(img_out, CV_8UC3);
return img_out;
}
// 2.1
Mat split(const Mat& img, int channel, bool mono) {
int type;
if (mono)
type = CV_8UC1;
else
type = CV_8UC3;
Mat img_out(img.size(), type);
for (int j = 0; j < img.rows; j++) {
for (int i = 0; i < img.cols; i++) {
img_out.at<uchar>(j, i + 2 - channel + !mono * 2 * i + mono * (channel - 2)) = img.at<uchar>(j, 3 * i + 2 - channel);
}
}
return img_out;
}
Mat merge(const Mat& img_R, const Mat& img_G, const Mat& img_B) {
Mat img_out(img_R.size(), CV_8UC3);
for (int j = 0; j < img_R.rows; j++) {
for (int i = 0; i < img_R.cols; i++) {
img_out.at<uchar>(j, 3 * i + 2) = img_R.at<uchar>(j, i);
img_out.at<uchar>(j, 3 * i + 1) = img_G.at<uchar>(j, i);
img_out.at<uchar>(j, 3 * i) = img_B.at<uchar>(j, i);
}
}
return img_out;
}
// 1.3
Mat negative(const Mat& img, bool rgb) {
if (rgb)
return negative_rgb(img);
else
return negative_y(img);
}
// 1.3a
Mat negative_rgb(const Mat& img) {
bool color = true;
if (img.type() == CV_32FC1 || img.type() == CV_8UC1)
color = false;
Mat img_out(img.size(), img.type());
for (int j = 0; j < img.rows; j++) {
for (int i = 0; i < img.cols + color * img.cols * 2; i++) {
img_out.at<uchar>(j, i) = 255 - img.at<uchar>(j, i);
}
}
return img_out;
}
// 1.3b
Mat negative_y(const Mat& img) {
Mat img_out = rgb2yiq(img);
for (int j = 0; j < img.rows; j++) {
for (int i = 2; i < 3 * img.cols; i += 3) {
img_out.at<float>(j, i) = 255 - img_out.at<float>(j, i);
}
}
return yiq2rgb(img_out);
}
// 1.4
Mat addBrightness(const Mat& img, int c) {
bool color = true;
if (img.type() == CV_32FC1 || img.type() == CV_8UC1)
color = false;
Mat img_out(img.size(), img.type());
for (int j = 0; j < img.rows; j++) {
for (int i = 0; i < img.cols + color * img.cols * 2; i++) {
int pixel = img.at<uchar>(j, i) + c;
if (pixel > 255) pixel = 255;
else if (pixel < 0) pixel = 0;
img_out.at<uchar>(j, i) = pixel;
}
}
return img_out;
}
//1.5
Mat mulBrightness(const Mat& img, float c, bool rgb) {
if (rgb)
return mulBrightness_rgb(img, c);
else
return mulBrightness_y(img, c);
}
// 1.5a
Mat mulBrightness_rgb(const Mat& img, float c) {
if (c < 0.0f) return img;
bool color = true;
if (img.type() == CV_32FC1 || img.type() == CV_8UC1)
color = false;
Mat img_out(img.size(), img.type());
for (int j = 0; j < img.rows; j++) {
for (int i = 0; i < img.cols + color * img.cols * 2; i++) {
float pixel = img.at<uchar>(j, i) * c;
if (pixel > 255.0f) pixel = 255.0f;
img_out.at<uchar>(j, i) = pixel;
}
}
return img_out;
}
// 1.5b
Mat mulBrightness_y(const Mat& img, float c) {
if (c < 0.0f) return img;
Mat img_out = rgb2yiq(img);
for (int j = 0; j < img.rows; j++) {
for (int i = 2; i < img.cols * 3; i += 3) {
float pixel = img_out.at<float>(j, i) * c;
if (pixel > 255.0f) pixel = 255.0f;
img_out.at<float>(j, i) = pixel;
}
}
return yiq2rgb(img_out);
}
// 1.6
Mat thresholding(const Mat& img, uchar threshold, int type, bool mean) {
Mat img_out;
bool color = true;
if (img.type() == CV_32FC1 || img.type() == CV_8UC1) {
color = false;
img_out = img;
return thresholdingGray(img_out, threshold, type, mean);
}
else {
img_out = rgb2yiq(img);
return thresholdingYIQ(img_out, float(threshold), type, mean);
}
}
Mat thresholdingGray(const Mat& img, uchar threshold, int type, bool mean) {
Mat img_out = img;
if (mean) threshold = mean_y_uchar(img_out);
for (int j = 0; j < img.rows; j++) {
for (int i = 0; i < img.cols; i++) {
if (type == 0)
img_out.at<uchar>(j, i) = img_out.at<uchar>(j, i) > threshold ? 255 : 0;
else if (type == 1)
img_out.at<uchar>(j, i) = img_out.at<uchar>(j, i) > threshold ? 255 : img_out.at<uchar>(j, i);
else
img_out.at<uchar>(j, i) = img_out.at<uchar>(j, i) > threshold ? threshold : img_out.at<uchar>(j, i);
}
}
return img_out;
}
Mat thresholdingYIQ(const Mat& img, float threshold, int type, bool mean) {
Mat img_out = img;
if (mean) threshold = mean_y_float(img_out);
for (int j = 0; j < img.rows; j++) {
for (int i = 2; i < img.cols * 3; i += 3) {
if (type == 0)
img_out.at<float>(j, i) = img_out.at<float>(j, i) > threshold ? 255 : 0;
else if (type == 1)
img_out.at<float>(j, i) = img_out.at<float>(j, i) > threshold ? 255 : img_out.at<float>(j, i);
else
img_out.at<float>(j, i) = img_out.at<float>(j, i) > threshold ? threshold : img_out.at<float>(j, i);
}
}
return yiq2rgb(img_out);
}
uchar mean_y_uchar(const Mat& img) {
long mean = 0;
for (int j = 0; j < img.rows; j++)
for (int i = 0; i < img.cols; i++)
mean += img.at<uchar>(j, i);
mean = mean / (img.rows * img.cols);
return mean;
}
float mean_y_float(const Mat& img) {
float mean = 0;
for (int j = 0; j < img.rows; j++)
for (int i = 2; i < img.cols * 3; i += 3)
mean += img.at<float>(j, i);
mean = mean / (img.rows * img.cols);
return mean;
}
Mat filter(const Mat& img, const Mat& mask) {
if (img.type() == CV_32FC3 || img.type() == CV_8UC3)
return filterRGB(img, mask);
if (img.type() == CV_32FC1 || img.type() == CV_8UC1)
return filterGray(img, mask);
}
Mat filterGray(const Mat& img, const Mat& mask) {
Mat img_out(img.size(), img.type());
int raio = (mask.rows - 1) / 2;
Mat img_pad;
copyMakeBorder(img, img_pad, raio, raio, raio, raio, BORDER_REFLECT101);
for (int j = raio; j < img.rows + raio; j++) {
for (int i = raio; i < img.cols + raio; i++) {
Rect region(i - raio, j - raio, mask.rows, mask.cols);
Mat img_region = img_pad(region);
img_region.convertTo(img_region, CV_32F);
img_region = img_region.mul(mask);
float pixel = sum(img_region)[0];
if (pixel > 255) { pixel = 255; }
if (pixel < 0) { pixel = 0; }
img_out.at<uchar>(j - raio, i - raio) = pixel;
}
}
return img_out;
}
Mat filterRGB(const Mat& img, const Mat& mask) {
Mat channel[3];
for (int i = 0; i < 3; i++) {
channel[i] = filterGray(split(img, i, true), mask);
}
return merge(channel[0], channel[1], channel[2]);
}
Mat sobelFilterHor(const Mat& img) {
Mat mask = (Mat_<float>(3, 3) <<
-1, 0, 1,
-2, 0, 2,
-1, 0, 1);
return filter(img, mask);
}
Mat sobelFilterVer(const Mat& img) {
Mat mask = (Mat_<float>(3, 3) <<
1, 2, 1,
0, 0, 0,
-1, -2, -1);
return filter(img, mask);
}
Mat sobelFilter(const Mat& img) {
Mat img_out = sobelFilterVer(img) + sobelFilterHor(img);
bool color = true;
if (img_out.type() == CV_32FC1 || img_out.type() == CV_8UC1)
color = false;
for (int j = 0; j < img_out.rows; j++) {
for (int i = 0; i < img_out.cols + color * img.cols * 2; i++) {
uchar pixel = img_out.at<uchar>(j, i);
if (pixel > 255) pixel = 255;
else if (pixel < 0) pixel = 0;
img_out.at<uchar>(j, i) = pixel;
}
}
return img_out;
}
Mat laplacianFilterHor(const Mat& img) {
Mat mask = (Mat_<float>(3, 3) <<
0, 1, 0,
1, -4, 1,
0, 1, 0);
return filter(img, mask);
}
Mat laplacianFilterVer(const Mat& img) {
Mat mask = (Mat_<float>(3, 3) <<
1, 1, 1,
1, -8, 1,
1, 1, 1);
return filter(img, mask);
}
Mat laplacianFilter(const Mat& img) {
// Mat img_out = laplacianFilterVer(img) + laplacianFilterHor(img);
// bool color = true;
// if(img_out.type() == CV_32FC1 || img_out.type() == CV_8UC1)
// color = false;
// for(int j = 0; j < img_out.rows; j++){
// for(int i = 0; i < img_out.cols + color*img.cols*2; i++){
// uchar pixel = img_out.at<uchar>(j, i);
// if(pixel > 255) pixel = 255;
// else if(pixel < 0) pixel = 0;
// img_out.at<uchar>(j, i) = pixel;
// }
// }
// return img_out;
Mat mask = (Mat_<float>(3, 3) <<
0, 1, 0,
1, -4, 1,
0, 1, 0);
return filter(img, mask);
}
Mat noiseSaltPepperMono(const Mat& img, float density) {
// Mat black(img.size(), IPL_DEPTH_1U);
// Mat white(img.size(), IPL_DEPTH_1U);
uchar pixel;
Mat out_img(img.size(), img.type());
/*for (int j = 0; j < img.rows; j++) {
for (int i = 0; i < img.cols; i++) {
pixel = uchar(rand() % 256);
out_img.at<uchar>(j, i) = pixel < uint(127 * density) ? 0 : img.at<uchar>(j, i);
out_img.at<uchar>(j, i) = pixel > uint(127 * (2.0f - density)) ? 255 : out_img.at<uchar>(j, i);
}
}*/
//randu(saltpepper_noise, 0, 255);
// Mat black = saltpepper_noise < uint(127*density);
// Mat white = saltpepper_noise > uint(127*(2.0-density));
// Mat saltpepper_img = img.clone();
// saltpepper_img.setTo(255, white);
// saltpepper_img.setTo(0, black);
return out_img;
}
Mat noiseSaltPepperRGB(const Mat& img, float density) {
Mat channel[3];
for (int i = 0; i < 3; i++) {
channel[i] = noiseSaltPepperMono(split(img, i, true), density);
}
return merge(channel[0], channel[1], channel[2]);
}
Mat noiseSaltPepper(const Mat& img, float density) {
if (img.type() == CV_32FC3 || img.type() == CV_8UC3)
return noiseSaltPepperRGB(img, density);
if (img.type() == CV_32FC1 || img.type() == CV_8UC1)
return noiseSaltPepperMono(img, density);
}
main.cpp
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vector>
#include <fstream>
#include<algorithm>
#include <iterator>
#include <valarray>
#include "pdi.hpp"
#include <graphics.h> // 引用图形库头文件
#include <conio.h>
using namespace std;
using namespace cv;
//空域滤波--构建拉普拉斯金字塔
bool laplacianpyramid(Mat img, int levels, vector<Mat_<Vec3f>>& pyramid)
{
if (levels < 1)
{
cout << "level设置错误" << endl;
return false;
}
else
{
Mat img1 = img.clone();
for (int i = 0; i < levels - 1; i++)
{
Mat up, down;
pyrDown(img1, down);
pyrUp(down, up, img1.size());
pyramid.push_back(img1 - up);
img1 = down;
}
pyramid.push_back(img1);
}
}
//时域滤波--二阶IIR
void IIR( vector<Mat_<Vec3f>>&src, vector<Mat_<Vec3f>>& dst, vector<Mat_<Vec3f>>& low, vector<Mat_<Vec3f>>& high)
{
double fl = 0.05;
double fh = 0.4;
vector<Mat_<Vec3f>> low1, high1, dst1;
for (int i = 0; i < src.size(); i++)
{
low1.push_back((1 - fl) * low[i] + fl * src[i]);
high1.push_back((1 - fh) * high[i] + fh * src[i]);
dst1.push_back(high[i]-low[i]);
}
low.assign(low1.begin(), low1.end());
high.assign(high1.begin(), high1.end());
dst.assign(dst1.begin(), dst1.end());
ofstream file;
file.open("dst.txt");
for (int i = 0; i < dst[3].rows; i++)
{
for (int j = 0; j < dst[3].cols; j++)
{
file << float(dst[3].at<Vec3f>(i, j)[2]) << " ";
}
file << endl;
}
file.close();
file.open("src.txt");
for (int i = 0; i < dst[3].rows; i++)
{
for (int j = 0; j < dst[3].cols; j++)
{
file << src[3].at<Vec3f>(i, j)[2] << " ";
}
file << endl;
}
file.close();
file.open("low.txt");
int num = 3;
for (int i = 0; i < dst[num].rows; i++)
{
for (int j = 0; j < dst[num].cols; j++)
{
file << low[num].at<Vec3f>(i, j)[2] << " ";
}
file << endl;
}
file.close();
file.open("high.txt");
for (int i = 0; i < dst[num].rows; i++)
{
for (int j = 0; j < dst[num].cols; j++)
{
file << high[num].at<Vec3f>(i, j)[2] << " ";
}
file << endl;
}
file.close();
}
//恢复源图像并进行放大
void returnsrc(vector<Mat_<Vec3f>>& src, Mat& dst,double alpha,double lambda_c)
{
double lambda = sqrtf(src[0].cols * src[0].cols + src[0].rows * src[0].rows) / 3;
int ext = 2;
double delta;
delta = lambda_c / 8 / (1 + alpha);
vector<Mat_<Vec3f>>temp;
for (int i = 0; i < src.size(); i++)
{
double curralpha = lambda / delta / 8 - 1;
curralpha = curralpha * ext;
if ((i == 0) || (i == src.size() - 1))
{
src[i] = 0;
}
else if (curralpha > alpha)
{
src[i] = src[i]*alpha;
}
else
{
src[i] = src[i] *curralpha;
}
lambda = lambda / 2;
}
Mat up= src[src.size()-1];
for (int i = 0; i < src.size()-1; i++)
{
pyrUp(up, up);
up = up + src[src.size() - 2-i];
}
Mat planes[3];
split(up, planes);
planes[0] = planes[0] * 0.1;
planes[1] = planes[1] * 0.1;
merge(planes, 3, up);
//Mat result = yiq2rgb(up);
//result.convertTo(result, CV_8UC3, 255.0, 1.0 / 255.0);
dst = up.clone();
}
//void returnsrc(vector<Mat_<Vec3f>>& src, Mat& dst)
//{
// vector<Mat_<Vec3f>>temp;
// for (int i = 0; i < src.size(); i++)
// {
// Mat a = src[i];
// Mat p = yiq2rgb(a);
// Mat planes[3];
// split(p, planes);
// planes[1] = planes[1] * 100;
// planes[2] = planes[2] * 100;
// merge(planes, 3, p);
// temp.push_back(p);
// }
// Mat up = temp[src.size() - 1];
// for (int i = 0; i < src.size() - 1; i++)
// {
// pyrUp(up, up);
// up = up + temp[src.size() - 2 - i];
// }
// up.convertTo(up, CV_8UC3, 255.0, 1.0 / 255.0);
// dst = up.clone();
//}
float count(Mat& src)
{
Mat a;
//a = src.clone();
cvtColor(src, a, CV_RGB2GRAY);
a.convertTo(a, CV_32FC1);
float sum = 0;
for (int i = 0; i < src.rows; i++)
{
for (int j = 0; j < src.cols; j++)
{
sum += a.at<float>(i, j);
}
}
return sum/32/32;
}
void BubbleSort(float* p, int length, int* ind_diff)
{
for (int m = 0; m < length; m++)
{
ind_diff[m] = m;
}
for (int i = 0; i < length; i++)
{
for (int j = 0; j < length - i - 1; j++)
{
if (p[j] > p[j + 1])
{
float temp = p[j];
p[j] = p[j + 1];
p[j + 1] = temp;
int ind_temp = ind_diff[j];
ind_diff[j] = ind_diff[j + 1];
ind_diff[j + 1] = ind_temp;
}
}
}
}
int find(float X[210][256])
{
float average[256];
float s[256];
int ind[256] = { 0 };
for (int j = 0; j < 256; j++)
{
average[j] = 0;
s[j] = 0;
for (int i = 0; i < 210; i++)
{
average[j] += X[i][j];
}
average[j] /= 210;
for (int i = 0; i < 210; i++)
{
s[j] += (X[i][j]-average[j])*(X[i][j] - average[j]);
}
}
BubbleSort(s, 256, ind);
for (int i = 0; i < 256; i++)
{
cout << "value: " << s[i] << "Index: " << ind[i] << endl;
}
//return distance(s, max_element(s, s + sizeof(s) / sizeof(s[0])));
return ind[255];
}
int main()
{
//#define find_plot
#define creat
#ifdef creat
VideoCapture frame("04032.mp4");
VideoWriter writer("result_ht_1.avi", CV_FOURCC('M', 'J', 'P', 'G'), 30, Size(512, 512));
Mat img,out;
vector<Mat_<Vec3f>>result;
vector<Mat_<Vec3f>>low;
vector<Mat_<Vec3f>>high;
int number = 0;
#define level 7
while (1)
{
vector<Mat_<Vec3f>>pyramid;
frame >> img;
string a = to_string(number+1)+".jpg";
imwrite(a, img);
if (img.empty())
{
break;
}
else
{
img.convertTo(img, CV_32FC3, 1.0 / 255.0f);
Mat imgyiq;
imgyiq = rgb2yiq(img);
Mat imgba = imgyiq.clone();
laplacianpyramid(imgyiq, level, pyramid);
if (number == 0)
{
low.assign(pyramid.begin(), pyramid.end());
high.assign(pyramid.begin(), pyramid.end());
result.assign(pyramid.begin(), pyramid.end());
}
else
{
IIR(pyramid, result, low, high);
returnsrc(result, out,15,24);
//imshow("", out);
//imshow("s", imgba);
out = (out + imgba);
Mat finnal = yiq2rgb(out);
finnal.convertTo(finnal, CV_8UC3, 255.0);
writer << finnal;
imshow("", finnal);
waitKey(1);
}
number++;
}
}
#endif
#ifdef find_plot
VideoCapture frame("result_ht_1.avi");
Mat img;
int flag = 0;
float cost[210][256];
while (true)
{
frame >> img;
//img.convertTo(img, CV_32FC3, 1.0 / 255.0f);
//Mat imgyiq = rgb2yiq(img);
//Mat planes[3];
//split(imgyiq, planes);
flag++;
if (img.empty())
{
break;
}
if (flag > 210)
{
break;
}
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
Mat imgrio = img(Rect(i*32,j*32,32,32));
cost[flag - 1][j + i*16] = count(imgrio);
}
}
}
int loc = find(cost);
cout << loc << endl;
ofstream file;
file.open("zhenfu.txt");
for (int i = 0; i < 210; i++)
{
file << float(cost[i][loc]) << ",";
}
file.close();
/*initgraph(420,200);
setbkcolor(WHITE);
cleardevice();
for (int i = 0; i < 210; i++)
{
putpixel(2 * i, 200-int(float(cost[i][loc])), BLACK);
}
saveimage(_T("test.bmp"));
_getch();
closegraph();*/
#endif
}