如何使用VideoCapture控制视频输入和对比?
基本上,视频操作所需的所有功能集成在cv :: VideoCapture C ++类中。这本身就建立在FFmpeg开源库上。这是OpenCV的基本依赖,所以你不必担心这一点。视频由连续的图像组成,我们将这些在文献中称为帧。在视频文件的情况下,存在指定两帧之间多长时间的帧速率。而对于摄像机,通常每秒可以限制多少帧可以进行数字化,这个属性不太重要,因为相机会看到当前的世界快照。
代码:
double getPSNR(const Mat& I1, const Mat& I2)
{
Mat s1;
absdiff(I1, I2, s1); // |I1 - I2|
s1.convertTo(s1, CV_32F); // cannot make a square on 8 bits
s1 = s1.mul(s1); // |I1 - I2|^2
Scalar s = sum(s1); // sum elements per channel
double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse = sse / (double)(I1.channels() * I1.total());
double psnr = 10.0 * log10((255 * 255) / mse);
return psnr;
}
}
Scalar getMSSIM( const Mat& i1, const Mat& i2)
{
const double C1 = 6.5025, C2 = 58.5225;
/***************************** INITS **********************************/
int d = CV_32F;
Mat I1, I2;
i1.convertTo(I1, d); // cannot calculate on one byte large values
i2.convertTo(I2, d);
Mat I2_2 = I2.mul(I2); // I2^2
Mat I1_2 = I1.mul(I1); // I1^2
Mat I1_I2 = I1.mul(I2); // I1 * I2
/*************************** END INITS **********************************/
Mat mu1, mu2; // PRELIMINARY COMPUTING
GaussianBlur(I1, mu1, Size(11, 11), 1.5);
GaussianBlur(I2, mu2, Size(11, 11), 1.5);
Mat mu1_2 = mu1.mul(mu1);
Mat mu2_2 = mu2.mul(mu2);
Mat mu1_mu2 = mu1.mul(mu2);
Mat sigma1_2, sigma2_2, sigma12;
GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
Mat t1, t2, t3;
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
t3 = t1.mul(t2); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
t1 = t1.mul(t2); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
Mat ssim_map;
divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar mssim = mean(ssim_map); // mssim = average of ssim map
return mssim;
}
int isInit=0;
VideoCapture *onevideo=NULL;
VideoCapture *twovideo=NULL;
JNIEXPORT void JNICALL
Java_org_opencv_samples_tutorial2_Tutorial2Activity_FindFeatures(JNIEnv *, jobject, jlong addrGray,
jlong addrRgba) {
Mat &mGr = *(Mat *) addrGray;
Mat &mRgb = *(Mat *) addrRgba;
vector<KeyPoint> v;
looperAddNum++;
if (looperAddNum > 15) {
looperAddNum = 0;
if (looperIndexNum < 100)
looperIndexNum += 1;
}
// code start-----------------------------------------------------------------------------
if (!isInit){
const char* filename="/data/data/org.opencv.samples.tutorial2/cache/Megamind.avi";
const char* filename2="/data/data/org.opencv.samples.tutorial2/cache/Megamind_bugy.avi";
onevideo=new VideoCapture(filename);
twovideo=new VideoCapture(filename2);
if (!onevideo->isOpened()){
LOGI("index one open error");
return;
}
if (!twovideo->isOpened()){
LOGI("index two open error");
return;
}
isInit=1;
}
if (isInit){
Mat frameReference, twoframe;
*onevideo >> frameReference;
*twovideo>> twoframe;
if (frameReference.empty())
return;
if (twoframe.empty())
return;
double ret = getPSNR(frameReference, twoframe);
Scalar rets = getMSSIM(frameReference, twoframe);
Mat dstW = mRgb(Rect(0,0, frameReference.cols, frameReference.rows));
cvtColor(frameReference, dstW, COLOR_RGB2BGRA);
LOGI("index is init ok %f %d %d %f %f %f", ret, frameReference.cols, frameReference.rows, rets[0], rets[1], rets[2]);
}
// code end-----------------------------------------------------------------------------
}