最近做的一个项目,需要播放服务端传过来的h264裸流数据,于是做了一个简易的播放器,采用ffmpeg解码h264并用opencv显示图像。实现原理很简单,首先接收到一个完整的h264帧之后传给ffmpeg的AVPacket,然后调用avcodec_send_packet()和avcodec_receive_frame()解码,得到一个AVFrame,然后调用sws_scale()把解码后的yuv图像转换成opencv能使用的RGB图像,然后每解码转换一帧数据就调用一次cv::imshow()显示图像,具体实现代码如下:
//H264ToCVShow.h
#ifndef _H264TOCVSHOW_H
#define _H264TOCVSHOW_H
//#define __STDC_CONSTANT_MACROS
#include <stdio.h>
// Opencv
extern "C" {
#include "libavutil/avutil.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
};
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/objdetect.hpp>
#include <thread>
#include <string>
class H264ToCVShow {
public :
H264ToCVShow(std::string winName);
~H264ToCVShow();
void init();
void decode(unsigned char *inputbuf, size_t size);
void start();
void play();
cv::Mat getMat();
private:
const AVCodec *codec;
AVCodecContext *c = nullptr;
int frame_count;
AVFrame *frame;
AVPacket avpkt;
struct SwsContext *img_convert_ctx;
cv::Mat pCvMat;
bool matReady;
std::thread *play_th = nullptr;
std::string windowName;
};
#endif
//H264ToCVShow.cpp
#include "H264ToCVShow.h"
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"swscale.lib")
#pragma comment(lib,"opencv_world320d.lib")
void H264ToCVShow::init() {
matReady = false;
av_init_packet(&avpkt);
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame_count = 0;
}
void H264ToCVShow::decode(unsigned char *inputbuf, size_t size){
avpkt.size = size;
if(avpkt.size == 0)
return;
avpkt.data = inputbuf;
int ret = avcodec_send_packet(c, &avpkt);
if (ret != 0)
{
printf("avcodec_send_packet error\n");
}
ret = avcodec_receive_frame(c, frame);
while (true)
{
if (ret == 0)
{
int width = frame->width;
int height = frame->height;
// Allocate the opencv mat and store its stride in a 1-element array
if (pCvMat.rows != height || pCvMat.cols != width || pCvMat.type() != CV_8UC3) pCvMat = cv::Mat(height, width, CV_8UC3);
int cvLinesizes[1];
cvLinesizes[0] = pCvMat.step1();
// Convert the colour format and write directly to the opencv matrix
SwsContext* conversion = sws_getContext(width, height, (AVPixelFormat)frame->format, width, height, AVPixelFormat::AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
sws_scale(conversion, frame->data, frame->linesize, 0, height, &pCvMat.data, cvLinesizes);
sws_freeContext(conversion);
matReady = true;
ret = avcodec_receive_frame(c, frame);
if (ret != 0) {
matReady = false;
break;
}
}
else {
matReady = false;
break;
}
}
}
void H264ToCVShow::play() {
while(true)
{
if(matReady){
cv::imshow(windowName,pCvMat);
cv::waitKey(1);
}
}
}
void H264ToCVShow::start()
{
play_th = new std::thread(&H264ToCVShow::play,this);
//play_th->join();
}
H264ToCVShow::H264ToCVShow(std::string winName) {
this->windowName = winName;
init();
}
H264ToCVShow::~H264ToCVShow() {
play_th->join();
delete play_th;
play_th = nullptr;
}
cv::Mat H264ToCVShow::getMat() {
if(matReady){
return pCvMat;
}
else{
return cv::Mat();
}
}