利用视差图合成新视点,视差图一般通过图像匹配获取,以middlebury上的一张图为例,左边为原图(左图像),右边为对应视差图(灰度图)。
- 正向映射:
简单的利用左视点原图和视差图进行视点合成,取每一个像素点处的视差值,然后计算新图像中像素点位置,然后赋值。前向映射,单点赋值代码如下。配置完Opencv可以直接跑,如下图:
#include <iostream>
#include <string>
#include <opencv.hpp>
using namespace std;
using namespace cv;
void main()
{
string imgPath="data/source_images/teddy/";
Mat srcImgL=imread(imgPath+"imgL.png");
Mat dispL=imread(imgPath+"dispL.png",0);
dispL=dispL/4;
int imgHeight=srcImgL.rows;
int imgWidth=srcImgL.cols;
int channels=srcImgL.channels();
Mat dstImgL=Mat::zeros(imgHeight,imgWidth, CV_8UC3);
uchar* pImgDataL=(uchar*)srcImgL.data;
uchar* pDispDataL=(uchar*)dispL.data;
uchar* pDstDataL=(uchar*)dstImgL.data;
VideoWriter writer("video.avi", CV_FOURCC('D','I','V','X'), 30, Size(imgWidth, imgHeight), 1);
int cnt=0;
int viewCnt=50;
while (cnt !=4)
{
for (int k=0; k<viewCnt; k++)
{
dstImgL.setTo(0);
float interp;
if (cnt%2==0) interp=(float)k/viewCnt;
else interp=float(viewCnt-k)/viewCnt;
for (int j=0; j<imgHeight; j++)
{
for (int i=0; i<imgWidth; i++)
{
uchar dispL=pDispDataL[j*imgWidth+i];
float offsetL=dispL* interp;
int idL=(int)(offsetL+0.5); //计算视差值
if (idL+i>=imgWidth) continue;
//插值结果
int idxResult=(j*imgWidth+i)*channels;
int idx=(j*imgWidth+i+idL)*channels;
for (int chan=0; chan<channels; chan++)
{
pDstDataL[idxResult+chan]=pImgDataL[idx+chan];
}
}
}
namedWindow("show");
imshow("show", dstImgL);
waitKey(10);
writer<<dstImgL;
}
cnt++;
}
writer.release();
}
边缘有锯齿,随后找时间加上反向映射以及双线性插值的版本。
-
反向映射
先根据左视点视差图生成虚拟视点的视差图,然后反向映射得到每一个像素点在原图像中的浮点位置,利用线性插值获取最终颜色值。(虚拟视点位置视差图没有填充空洞版本),可见有很多裂纹。#include <iostream> #include <string> #include <opencv.hpp> using namespace std; using namespace cv; int index(int m, int n) { if (m>=0 && m<n) return m; else if (m<0) return 0; else if (m>=n) return n-1; } void obtainNewDispMap(const Mat &refDisp, Mat &dstDisp, float value) { int height=refDisp.rows; int width=refDisp.cols; uchar* pSrcDispData=(uchar*) refDisp.data; float* pDstDispData=(float*) dstDisp.data; for (int j=0; j<height; j++) { for (int i=0; i<width; i++) { int disp=(int)pSrcDispData[j*width+i]; float newDisp=disp*(value); int inew=(int)(i-newDisp); inew=index(inew, width); pDstDispData[j*width+inew]=newDisp; } } } void main(void) { string imgPath="data/source_images/teddy/"; Mat srcImgL=imread(imgPath+"imgL.png"); Mat dispL=imread(imgPath+"dispL.png",0); dispL=dispL/4; int imgHeight=srcImgL.rows; int imgWidth=srcImgL.cols; Mat dstImgL=Mat::zeros(imgHeight,imgWidth, CV_8UC3); Mat dstImg=Mat::zeros(imgHeight,imgWidth, CV_8UC3); Mat dstNewDispImg=Mat::zeros(imgHeight,imgWidth, CV_32FC1); uchar* pImgDataL=(uchar*)srcImgL.data; uchar* pDispDataL=(uchar*)dispL.data; uchar* pDstDataL=(uchar*)dstImgL.data; VideoWriter writer("video.avi", CV_FOURCC('D','I','V','X'), 30, Size(imgWidth, imgHeight), 1); int cnt=0; int viewCnt=50; while (cnt!=4) { float interp; for (int k=0; k<viewCnt; k++) { dstNewDispImg.setTo(255); dstImgL.setTo(0); if (cnt%2==0) interp=(float)k/viewCnt; else interp=(float)(viewCnt-k)/viewCnt; obtainNewDispMap(dispL, dstNewDispImg, interp); float* pNewDispData=(float*)dstNewDispImg.data; for (int j=0; j<imgHeight; j++) { for (int i=0; i<imgWidth; i++) { float disp=pNewDispData[j*imgWidth+i]; float id=i+disp; int id0=floor(id); int id1=floor(id+1); float weight1=1-(id-id0); float weight2=id-id0; id0=index(id0, imgWidth); id1=index(id1, imgWidth); //插值结果 pDstDataL[j*imgWidth*3+i*3+0]=weight1*pImgDataL[j*imgWidth*3+id0*3+0]+weight2*pImgDataL[j*imgWidth*3+id1*3+0]; pDstDataL[j*imgWidth*3+i*3+1]=weight1*pImgDataL[j*imgWidth*3+id0*3+1]+weight2*pImgDataL[j*imgWidth*3+id1*3+1]; pDstDataL[j*imgWidth*3+i*3+2]=weight1*pImgDataL[j*imgWidth*3+id0*3+2]+weight2*pImgDataL[j*imgWidth*3+id1*3+2]; } } namedWindow("virImg"); imshow("virImg", dstImgL); waitKey(10); writer<<dstImgL; } cnt++; } writer.release(); }
3.反向映射+空洞填充+双线性插值
上面生成虚拟视点位置的视差图时没有填充空洞,生成的虚拟视点会有很多裂纹存在。加上空洞填充能够有效消除裂纹。如下:
填充空洞后生的虚拟视点图如下,可见空洞裂纹得到有效消除: