ffmpeg视频解码后得到图像经常需要一些图像处理,opencv是图像处理的神器。所以就需要两者之间互相转化,ffmpeg解码后的数据类型是AVFrame,而opencv的图像数据结构是Mat,这就需要做个转化,本文实现了avframe->mat,mat->avframe。封装成函数
cv::Mat avframe2cvmat(AVFrame *avframe, int w = 0, int h = 0);
AVFrame *cvmat2avframe(cv::Mat mat);
avframe->mat
cv::Mat avframe2cvmat(AVFrame *avframe, int w, int h) {
if (w <= 0) w = avframe->width;
if (h <= 0) h = avframe->height;
struct SwsContext *sws_ctx = NULL;
sws_ctx = sws_getContext(avframe->width, avframe->height, (enum AVPixelFormat)avframe->format,
w, h, AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);
cv::Mat mat;
mat.create(cv::Size(w, h), CV_8UC3);
AVFrame *bgr24frame = av_frame_alloc();
bgr24frame->data[0] = (uint8_t *)mat.data;
avpicture_fill((AVPicture *)bgr24frame, bgr24frame->data[0], AV_PIX_FMT_BGR24, w, h);
sws_scale(sws_ctx,
(const uint8_t* const*)avframe->data, avframe->linesize,
0, avframe->height, // from cols=0,all rows trans
bgr24frame->data, bgr24frame->linesize);
av_free(bgr24frame);
sws_freeContext(sws_ctx);
return mat;
}
w,h可以对输入图像进行resize,默认和原图大小一致,不resize。
mat->avframe
AVFrame *cvmat2avframe(cv::Mat mat) {
// alloc avframe
AVFrame *avframe = av_frame_alloc();
if (avframe && !mat.empty()) {
avframe->format = AV_PIX_FMT_YUV420P;
avframe->width = mat.cols;
avframe->height = mat.rows;
av_frame_get_buffer(avframe, 0);
av_frame_make_writable(avframe);
cv::Mat yuv; // convert to yuv420p first
cv::cvtColor(mat, yuv, cv::COLOR_BGR2YUV_I420);
// calc frame size
int frame_size = mat.cols * mat.rows;
unsigned char *pdata = yuv.data;
// fill yuv420
// yyy yyy yyy yyy
// uuu
// vvv
avframe->data[0] = pdata; // fill y
avframe->data[1] = pdata + frame_size; // fill u
avframe->data[2] = pdata + frame_size * 5 / 4; // fill v
}
return avframe;
}
emmm···,注释的很清楚了!用完了别玩了释放AVFrame~~