实现的思路是先获取到摄像头的数据
然后把摄像头的数据NV12转换成RGB数据
最后把RGB数据 转换成Qinage在QLable显示
- 先获取摄像头数据
MEDIA_BUFFER buffer = RK_MPI_SYS_GetMediaBuffer(RK_ID_RGA, 0, -1);//从指定通道rga中获取数据 - 把摄像头数据进行转换
通过代码把NV12转换成rgb数据 yuvBuffer 这个是摄像的数据 rgbBuffer是返回的rgb数据
static int nv12_to_rgb24_640x640(void* yuvBuffer, void* rgbBuffer) {
rga_buffer_t src, dst;
memset(&src, 0, sizeof(rga_buffer_t));
memset(&dst, 0, sizeof(rga_buffer_t));
src = wrapbuffer_virtualaddr(yuvBuffer, 640, 640, RK_FORMAT_YCbCr_420_SP);
dst = wrapbuffer_virtualaddr(rgbBuffer, 640, 640, RK_FORMAT_RGB_888);
src.format = RK_FORMAT_YCbCr_420_SP;
dst.format = RK_FORMAT_RGB_888;
IM_STATUS status = imcvtcolor(src, dst, src.format, dst.format);
if (status != IM_STATUS_SUCCESS) {
printf(“ERROR: imcvtcolor failed!\n”);
return -1;
}
else {
printf(“imcvtcolor nv12_to_rgb24_640x640 success!\n”);
}
return 0;
} - 把rgb数据转换成Qimage srcBuf 是上面返回的rgb数据 ,w是宽,h是高,pDistImage是Qimage对象
void RGB2Image(void *srcBuf, int w, int h, QImage *pDistImage){
int i;
int r, g, b;
QRgb *point;
uchar *bit;
i = 0;
bit = (uchar )(srcBuf);
for(int y = 0; y < h; y ++){
for ( int x = 0; x < w; x ++ ){
/ Please attion the Littile-Edian and Big-Edian,
* The Order maybe R-G-B.
*/
b = (int)bit[i];
g = (int)bit[i+1];
r = (int)bit[i+2];
point = (QRgb *)pDistImage->scanLine(y) + x;
*point = qRgb(r, g, b);
i += 3;
}
}
} - 显示
MEDIA_BUFFER buffer;
void * pRknnInputData = malloc(YOLO_INPUT_SIZE);
buffer = RK_MPI_SYS_GetMediaBuffer(RK_ID_VI, 0, -1);//从指定通道VI中获取数据
ret = nv12_to_rgb24_640x640(RK_MPI_MB_GetPtr(buffer), pRknnInputData);//从指定的MEDIA_BUFFER中获取缓冲区数据指针=>nv12转rgb
mage image =QImage (640,640,QImage::Format_RGB32);
RGB2Image(pRknnInputData,640,640,&image);
labImg->setPixmap(QPixmap::fromImage(image)); - 注意事项
从摄像头初始化开始 获取设置的宽和高 都要保持一致 640,640