计算两端yuv视频流中每一帧的ssim值

方法同上一篇,只不多这里在计算的时候用了opencv1的接口,出现了一些问题,最后总算解决了。

程序:

#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cv.h>
#include <highgui.h>
#define NUM_FRAME 100 //只处理前100帧,根据视频帧数可修改
void CalcPsnr(const char *in1,const char *in2)
{
	cv::VideoCapture vc1;
	cv::VideoCapture vc2;
	bool flag1 = vc1.open(in1);
	bool flag2 = vc2.open(in2);
	if (!flag1||!flag2)
	{
		printf("avi file open error \n");
		system("pause");
		exit(-1);
	}

	int frmCount1 = vc1.get(CV_CAP_PROP_FRAME_COUNT);
	int frmCount2 = vc2.get(CV_CAP_PROP_FRAME_COUNT);
	printf("frmCount: %d \n", frmCount1);
	printf("frmCount: %d \n", frmCount2);
	for (int i = 0; i < frmCount1; i++)
	{
		printf("%d/%d \n", i + 1, frmCount1);
		cv::Mat image_ref;
		vc1 >> image_ref;
		cv::Mat image_obj;
		vc2 >> image_obj;
		double mse = 0;
		double div_r = 0;
		double div_g = 0;
		double div_b = 0;
		int width = image_ref.cols;
		int height = image_ref.rows;
		double psnr = 0;
		for (int v = 0; v < height; v++)
		{
			for (int u = 0; u < width; u++)
			{
				div_r = image_ref.at<cv::Vec3b>(v, u)[0] - image_obj.at<cv::Vec3b>(v, u)[0];
				div_g = image_ref.at<cv::Vec3b>(v, u)[1] - image_obj.at<cv::Vec3b>(v, u)[1];
				div_b = image_ref.at<cv::Vec3b>(v, u)[2] - image_obj.at<cv::Vec3b>(v, u)[2];
				mse += ((div_r*div_r + div_b*div_b + div_g*div_g) / 3);

			}
		}
		mse = mse / (width*height);
		psnr = 10 * log10(255 * 255 / mse);
		printf("%lf\n", mse);
		printf("%lf\n", psnr);
	}
	return;
}
void  CalcSsim(const char *in1,const char *in2)
{
	CvCapture* capture1 = cvCreateFileCapture(in1);
	CvCapture* capture2 = cvCreateFileCapture(in2);
	int i = 0;
	// default settings  
	double C1 = 6.5025, C2 = 58.5225;

	IplImage
		*img1 = NULL, *img2 = NULL, *img1_img2 = NULL,
		*img1_temp = NULL, *img2_temp = NULL,
		*img1_sq = NULL, *img2_sq = NULL,
		*mu1 = NULL, *mu2 = NULL,
		*mu1_sq = NULL, *mu2_sq = NULL, *mu1_mu2 = NULL,
		*sigma1_sq = NULL, *sigma2_sq = NULL, *sigma12 = NULL,
		*ssim_map = NULL, *temp1 = NULL, *temp2 = NULL, *temp3 = NULL;
	while (1)
	{
		printf("%d/%d \n", ++i, NUM_FRAME);
		


		/***************************** INITS **********************************/
		img1_temp = cvQueryFrame(capture1);
		img2_temp = cvQueryFrame(capture2);

		if (img1_temp == NULL || img2_temp == NULL)
			return;

		int x = img1_temp->width, y = img1_temp->height;
		int nChan = img1_temp->nChannels, d = IPL_DEPTH_32F;
		CvSize size = cvSize(x, y);

		img1 = cvCreateImage(size, d, nChan);
		img2 = cvCreateImage(size, d, nChan);

		cvConvert(img1_temp, img1);
		cvConvert(img2_temp, img2);
		/*cvReleaseImage(&img1_temp);
		cvReleaseImage(&img2_temp);*/


		img1_sq = cvCreateImage(size, d, nChan);
		img2_sq = cvCreateImage(size, d, nChan);
		img1_img2 = cvCreateImage(size, d, nChan);

		cvPow(img1, img1_sq, 2);
		cvPow(img2, img2_sq, 2);
		cvMul(img1, img2, img1_img2, 1);

		mu1 = cvCreateImage(size, d, nChan);
		mu2 = cvCreateImage(size, d, nChan);

		mu1_sq = cvCreateImage(size, d, nChan);
		mu2_sq = cvCreateImage(size, d, nChan);
		mu1_mu2 = cvCreateImage(size, d, nChan);


		sigma1_sq = cvCreateImage(size, d, nChan);
		sigma2_sq = cvCreateImage(size, d, nChan);
		sigma12 = cvCreateImage(size, d, nChan);

		temp1 = cvCreateImage(size, d, nChan);
		temp2 = cvCreateImage(size, d, nChan);
		temp3 = cvCreateImage(size, d, nChan);

		ssim_map = cvCreateImage(size, d, nChan);
		/*************************** END INITS **********************************/


		//  
		// PRELIMINARY COMPUTING  
		cvSmooth(img1, mu1, CV_GAUSSIAN, 11, 11, 1.5);
		cvSmooth(img2, mu2, CV_GAUSSIAN, 11, 11, 1.5);

		cvPow(mu1, mu1_sq, 2);
		cvPow(mu2, mu2_sq, 2);
		cvMul(mu1, mu2, mu1_mu2, 1);


		cvSmooth(img1_sq, sigma1_sq, CV_GAUSSIAN, 11, 11, 1.5);
		cvAddWeighted(sigma1_sq, 1, mu1_sq, -1, 0, sigma1_sq);

		cvSmooth(img2_sq, sigma2_sq, CV_GAUSSIAN, 11, 11, 1.5);
		cvAddWeighted(sigma2_sq, 1, mu2_sq, -1, 0, sigma2_sq);

		cvSmooth(img1_img2, sigma12, CV_GAUSSIAN, 11, 11, 1.5);
		cvAddWeighted(sigma12, 1, mu1_mu2, -1, 0, sigma12);


		//  
		// FORMULA  

		// (2*mu1_mu2 + C1)  
		cvScale(mu1_mu2, temp1, 2);
		cvAddS(temp1, cvScalarAll(C1), temp1);

		// (2*sigma12 + C2)  
		cvScale(sigma12, temp2, 2);
		cvAddS(temp2, cvScalarAll(C2), temp2);

		// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))  
		cvMul(temp1, temp2, temp3, 1);

		// (mu1_sq + mu2_sq + C1)  
		cvAdd(mu1_sq, mu2_sq, temp1);
		cvAddS(temp1, cvScalarAll(C1), temp1);

		// (sigma1_sq + sigma2_sq + C2)  
		cvAdd(sigma1_sq, sigma2_sq, temp2);
		cvAddS(temp2, cvScalarAll(C2), temp2);

		// ((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))  
		cvMul(temp1, temp2, temp1, 1);

		// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))  
		cvDiv(temp3, temp1, ssim_map, 1);


		CvScalar index_scalar = cvAvg(ssim_map);

		// through observation, there is approximately   
		// 1% error max with the original matlab program  

		/*cout << "(R, G & B SSIM index)" << std::endl;
		cout << index_scalar.val[2] << endl;
		cout << index_scalar.val[1] << endl;
		cout << index_scalar.val[0] << endl;*/

		cvReleaseImage(&img1_sq);
		cvReleaseImage(&img2_sq);
		cvReleaseImage(&img1_img2);
		cvReleaseImage(&mu1);
		cvReleaseImage(&mu2);
		cvReleaseImage(&mu1_sq);
		cvReleaseImage(&mu2_sq);
		cvReleaseImage(&mu1_mu2);
		cvReleaseImage(&sigma1_sq);
		cvReleaseImage(&sigma2_sq);
		cvReleaseImage(&sigma12);
		cvReleaseImage(&temp1);
		cvReleaseImage(&temp2);
		cvReleaseImage(&temp3);
		cvReleaseImage(&ssim_map);
		//double ssim=max(max(index_scalar.val[0], index_scalar.val[1]), index_scalar.val[2]);  
		double ssim = (index_scalar.val[0] + index_scalar.val[1] + index_scalar.val[2]) / 3;
		std::cout << ssim << std::endl;

	}
	cvReleaseCapture(&capture1);
	cvReleaseCapture(&capture2);
	return;
}
void DisplayYUV2RGB(const char *dir,const char *in,int _w,int _h)
{
	int w = _w;
	int h = _h;
	printf("yuv file w: %d, h: %d \n", w, h);
	FILE* pFileIn = fopen(in, "rb+");
	int bufLen = w*h * 3 / 2;
	unsigned char* pYuvBuf = new unsigned char[bufLen];
	int iCount = 0;


	for (int i = 0; i<NUM_FRAME; i++)
	{
		fread(pYuvBuf, bufLen*sizeof(unsigned char), 1, pFileIn);

		cv::Mat yuvImg;
		yuvImg.create(h * 3 / 2, w, CV_8UC1);
		memcpy(yuvImg.data, pYuvBuf, bufLen*sizeof(unsigned char));
		cv::Mat rgbImg;
		cv::cvtColor(yuvImg, rgbImg, CV_YUV2BGR_I420);

		cv::imshow("img", rgbImg);
		char s[100];
		sprintf(s,"%spic%d%s",dir,i,".jpg");
		cv::imwrite(s, rgbImg);
		cv::waitKey(1);

		printf("%d \n", iCount++);
	}
	delete[] pYuvBuf;
	fclose(pFileIn);
}
void Image_to_video(const char* in, const char* out)
{
	int num = 1;
	CvSize size = cvSize(1024, 768);  //视频帧格式的大小
	double fps = 30; //每秒钟的帧率
	CvVideoWriter *writer = cvCreateVideoWriter(out, CV_FOURCC('D', 'I', 'V', 'X'), fps, size); //创建视频文件
	char cname[100];
	sprintf(cname, in, num); //加载图片的文件夹,图片的名称编号是1开始1,2,3,4,5.。。。
	IplImage *src = cvLoadImage(cname);
	if (!src)
	{
		return;
	}
	IplImage *src_resize = cvCreateImage(size, 8, 3); //创建视频文件格式大小的图片
	cvNamedWindow("avi");
	while (src)
	{
		cvShowImage("avi", src_resize);
		cvWaitKey(1);
		cvResize(src, src_resize); //将读取的图片设置为视频格式大小相同
		cvWriteFrame(writer, src_resize); //保存图片为视频流格式
		cvReleaseImage(&src); //释放空间
		num++;
		sprintf(cname, in, num);
		src = cvLoadImage(cname);       //循环读取数据
	}
	cvReleaseVideoWriter(&writer);
	cvReleaseImage(&src_resize);
}
int main(int argc, char *argv[])
{
	const char *out = "C:/Users/jiang/Desktop/output/book_virtual08.yuv";
	const char *dir = "C:/Users/jiang/Desktop/output/tupian1/";
	DisplayYUV2RGB(dir, out, 1024, 768);
	const char *outImagename = "C:/Users/jiang/Desktop/output/tupian1/pic%d.jpg";
	const char *outVideoname = "C:/Users/jiang/Desktop/output/3outfile.avi";
	Image_to_video(outImagename, outVideoname);

	out = "C:/Users/jiang/Desktop/bookarrival/bookarrival_c_8.yuv";
	dir = "C:/Users/jiang/Desktop/output/tupian1/";
	DisplayYUV2RGB(dir, out, 1024, 768);
	outImagename = "C:/Users/jiang/Desktop/output/tupian1/pic%d.jpg";
	outVideoname = "C:/Users/jiang/Desktop/output/4outfile.avi";
	Image_to_video(outImagename, outVideoname);

	const char *in1 = "C:/Users/jiang/Desktop/output/3outfile.avi";
	const char *in2 = "C:/Users/jiang/Desktop/output/4outfile.avi";
	CalcPsnr(in1, in2);
	CalcSsim(in1, in2);

	getchar();
}


### 回答1: 在MATLAB,可以使用`VideoReader`函数来读取YUV文件,并使用`imwrite`函数将每一帧图片保存为文件。 首先,需要创建一个`VideoReader`对象,指定YUV文件的路径和格式(如420Planar、YUV422等): ```matlab videoObj = VideoReader('path/to/your/video.yuv', 'VideoFormat', 'YUV420_8'); ``` 接下来,可以使用`hasFrame`函数来检查是否还有可读的帧,然后使用`readFrame`函数读取每一帧,并使用`imwrite`函数将其保存为图片文件: ```matlab frameNum = 1; while hasFrame(videoObj) frame = readFrame(videoObj); imwrite(frame, sprintf('frame_%d.jpg', frameNum)); frameNum = frameNum + 1; end ``` 在上面的代码,`videoObj`是`VideoReader`对象,`frame`是读取到的帧数据。`imwrite`函数将传入的帧数据保存为以帧序号命名的图片文件,如`frame_1.jpg`、`frame_2.jpg`等。 要注意的是,YUV文件一帧的尺寸和颜色空间可能不同,因此在读取每一帧之前,需要根据文件的格式进行适当的设置。另外,需要根据实际情况调整保存图片的格式和路径。 希望这个回答对您有帮助! ### 回答2: MATLAB可以使用VideoReader函数来读取YUV文件,并使用imwrite函数将每一帧保存为图片文件。 首先,需要使用VideoReader函数创建一个视频读取对象,指定输入的YUV文件名: ```matlab v = VideoReader('input.yuv'); ``` 接下来,可以使用readFrame函数逐帧读取YUV文件的图像,并使用imwrite函数将每一帧保存为图片文件。可以通过循环来读取文件的所有帧并保存: ```matlab frameIndex = 1; % 用于追踪帧的索引 while hasFrame(v) frame = readFrame(v); % 读取下一帧图像 filename = sprintf('frame%d.jpg', frameIndex); % 根据帧索引生成保存的文件名 imwrite(frame, filename); % 保存图像为JPEG文件 frameIndex = frameIndex + 1; % 帧索引自增 end ``` 上述代码,使用sprintf函数生成每一帧保存的文件名,命名为frame1.jpg、frame2.jpg等。imwrite函数将每一帧图像保存为JPEG文件。 最终,代码将会读取YUV文件的每一帧图像,并将每一帧保存为以帧索引命名的图片文件。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值