工具:VS2019、OpenCV4.5、FFmpeg
代码主要分为两部分:
数据回调并缓存部分、解码显示部分
1. 数据回调并缓存
这部分就是获取网络视频流数据,我主要是调用了网络相机SDK中提供的数据回调函数,这部分代码主要作为参考,你可以根据自己的数据获取方式来实现。
/*
数据回调函数的实现
@parameter
u32DataType 数据包类型
pu8Buffer 数据包指针
u32Length 数据包长度
*/
#include<queue>
#include"H264Decode.h"
using namespace std;
queue<pair<unsigned char*, int>> AVDataQueue;
int _stdcall StreamCallBackFunc(uint8_t u32DataType, unsigned char* pu8Buffer, int u32Length)
{
if (pstruAV->u32AVFrameFlag == HI_NET_DEV_VIDEO_FRAME_FLAG) //如果数据包为视频数据包
{
//AVDataQueue可以直接使用STL标准中deque或者queue,要存储两个数据
//一个是数据包的指针,另一个是数据包的长度
AVDataQueue.push(pu8Buffer, us32Length); //缓存h264视频数据
/*这个缓存数据的延时,看自己的解码情况考虑添加
我自己测试,不加的这个延时的话,解码的画面都是花屏的
延时的时间设置的条件是根据关键帧类型来判断的,如I帧、P帧,我这里简单的根据包长度判单,以
供参考,具体的延迟时间可以自己调整,但不要太久,会导致播放的延时变大*/
//具体为什么要添加这个延时,我也不知道为什么😁,如果有大哥知道麻烦告诉我
if(u32Length > 10000){
Sleep(25);
}
else{
Sleep(40);
}
}
else if (pstruAV->u32AVFrameFlag == HI_NET_DEV_AUDIO_FRAME_FLAG)
{
/*AUDIO音频数据, 我并没有解码处理音频数据,便直接丢弃了*/
}
return 0;
}
int main()
{
/* ... */
HI_NET_DEV_SetStreamCallBack(netHandle, StreamCallBackFunc, &userData); //回调数据流
/* ... */
//H264Decode的实现在下文
H264Decode decoder("H264 Decode");
decoder.RealTimeDeocde(); //上面已经缓存好了数据,直接调用RealTimeDeocde便可解码显示
//decoder.playbackDecode("./video.h264"); //解码显示本地的h264数据文件
return 0;
}
2. 解码显示
解码显示,主要参考官方源码中给的实例(目录:ffmpeg\doc\examples\decode_video.c),主要代码如下
// 头文件 H264Decode.h
#pragma once
#include<opencv2/opencv.hpp>
#include<opencv2/highgui/highgui_c.h>
#include<thread>
#include<string>
#include<deque>
extern "C" {
#include "ffmpeg/include/libavutil/avutil.h"
#include "ffmpeg/include/libavcodec/avcodec.h"
#include "ffmpeg/include/libavformat/avformat.h"
#include "ffmpeg/include/libavutil/frame.h"
#include "ffmpeg/include/libswscale/swscale.h"
#include "ffmpeg/include/libavutil/imgutils.h"
#include "ffmpeg/include/libavcodec/packet.h"
#include "ffmpeg/include/libavutil/error.h"
};
#define FRAME_WIDTH 1920
#define FRAME_HEIGHT 1080
extern std::queue<std::pair<unsigned char*, int>> AVDataqueue;
struct buffer_data {
uint8_t* ptr;
size_t size; ///< size left in the buffer
};
class H264Decode {
public:
H264Decode(std::string windowName);
~H264Decode();
void PlaybackDecode(const char* h264File);
void RealTimeDecode();
private:
const AVCodec* codec;
AVCodecContext* context;
AVFrame* avframe;
AVPacket* avpkt;
struct SwsContext* img_convert_ctx;
cv::Mat pCvMat;
std::string windowName;
AVFormatContext* pFormatCtx;
AVCodecParserContext* CodeparserCtx;
AVIOContext* avio_ctx;
uint8_t* avio_ctx_buffer;
size_t avio_ctx_buffer_size; //4096
struct buffer_data bd;
};
// 源文件H264Decode.cpp
#define _CRT_SECURE_NO_WARNINGS
#include "H264Decode.h"
#include <Windows.h>
#define VIDEO_INBUF_SIZE 80000
#define VIDEO_REFILL_THRESH 1024
/***************************************************************************************************************************
avcodec_find_decoder(): 通过解码器ID查找解码器:函数的参数是一个编码器的ID,返回查找到的编码器(没有找到就返回NULL)
avcodec_alloc_context3(): 初始化AVCodecContext
avcodec_open2(): 打开解码器
av_packet_alloc(): 初始化AVPacket
av_frame_alloc(): 初始化AVFrame
avformat_open_input(): 打开输入的视频流
avformat_find_stream_info(): 查找流的信息,填充AVFormatContext
av_read_frame(): 从流中读取一个AVPacket包数据
avcodec_send_packet(): 视频解码:发送一个AVPacker数据包给解码器
avcodec_receive_frame(): 视频解码:接收解码完成的一帧视频数据
***************************************************************************************************************************/
static int video_stream_index = -1;
/*
** PlaybackDecode函数可以用来播放你保存下来的h264视频文件
** @paramert:
** const char* h264File: 指定要播放文件的路径
*/
void H264Decode::PlaybackDecode(const char* h264File)
{
printf("H264Decoder initialize start. \n");
int ret;
if ((ret = avformat_open_input(&pFormatCtx, h264File, NULL, NULL)) < 0) {
fprintf(stderr, "Cannot open input file\n");
exit(ret);
}
if ((ret = avformat_find_stream_info(pFormatCtx, NULL)) < 0) {
fprintf(stderr, "Cannot find stream information\n");
exit(ret);
}
/* select the video stream */
ret = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0);
if (ret < 0) {
fprintf(stderr, "Cannot find a video stream in the input file\n");
exit(ret);
}
video_stream_index = ret;
/* create decoding context */
context = avcodec_alloc_context3(codec);
if (!context) {
fprintf(stderr, "avcodec_alloc_context3 error\n");
exit(AVERROR(ENOMEM));
}
avcodec_parameters_to_context(context, pFormatCtx->streams[video_stream_index]->codecpar);
/* init the video decoder */
if ((ret = avcodec_open2(context, codec, NULL)) < 0) {
fprintf(stderr, "Cannot open video decoder\n");
exit(ret);
}
printf("ffmpeg decode have initialized, begin decode ... \n");
while (1) {
//printf("av read frame\n");
if ((ret = av_read_frame(pFormatCtx, avpkt)) < 0)
break;
if (avpkt->stream_index == video_stream_index) {
ret = avcodec_send_packet(context, avpkt);
if (ret < 0) {
printf("Line %d : %s Error while sending a packet to the decoder\n", __LINE__, __func__);
break;
}
while (ret >= 0) {
//printf("avcodec_receive_frame\n");
ret = avcodec_receive_frame(context, avframe);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
}
else if (ret < 0) {
fprintf(stderr, "Error while receiving a frame from the decoder\n");
exit(1);
}
int width = avframe->width;
int height = avframe->height;
// Allocate the opencv mat and store its stride in a 1-element array
if (pCvMat.rows != height || pCvMat.cols != width || pCvMat.type() != CV_8UC3) pCvMat = cv::Mat(height, width, CV_8UC3);
int cvLinesizes[1];
cvLinesizes[0] = pCvMat.step1();
// Convert the color format and write directly to the opencv matrix
SwsContext* conversion = sws_getContext(width, height, (AVPixelFormat)avframe->format, width, height, AVPixelFormat::AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
sws_scale(conversion, avframe->data, avframe->linesize, 0, height, &pCvMat.data, cvLinesizes);
sws_freeContext(conversion);
cv::imshow(this->windowName, pCvMat);
cv::waitKey(30);
av_frame_unref(avframe);
}
}
av_packet_unref(avpkt);
}
}
/*=====================================================================================*/
//read_packet 从刚才缓存的h264视频数据队列中读取数据,这个函数使用来实时解码视频流时使用
static int read_packet(void* opaque, uint8_t* buf, int buf_size)
{
while (AVDataQueue.empty())
{
Sleep(1);
}
buf_size = FFMIN(buf_size, AVDataQueue.front().second);
/* copy internal buffer data to buf */
memcpy(buf, AVDataQueue.front().first, buf_size);
AVDataQueue.front().first += buf_size;
AVDataQueue.front().second -= buf_size;
if (AVDataQueue.front().second == 0)
AVDataQueue.pop_front();
//printf("read packet data size:%zu\n", buf_size);
return buf_size;
}
//read time decode initial
void H264Decode::RealTimeDecode()
{
int ret;
int count = 0;
while (AVDataQueue.empty()) Sleep(10);
bd.ptr = AVDataQueue.front().first;
bd.size = AVDataQueue.front().second;
avio_ctx_buffer = (unsigned char*)av_malloc(avio_ctx_buffer_size);
if (!avio_ctx_buffer) {
ret = AVERROR(ENOMEM);
return;
}
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size, 0, &bd, &read_packet, NULL, NULL);
if (!avio_ctx) {
printf("avio_alloc_context error\n");
ret = AVERROR(ENOMEM);
return;
}
if (!(pFormatCtx = avformat_alloc_context())) {
ret = AVERROR(ENOMEM);
return;
}
pFormatCtx->pb = avio_ctx;
ret = avformat_open_input(&pFormatCtx, "", NULL, NULL);
if (ret < 0) {
fprintf(stderr, "avformat_open_input Could not open input, return : %d\n", ret);
return;
}
ret = avformat_find_stream_info(pFormatCtx, NULL);
if (ret < 0) {
fprintf(stderr, "Could not find stream information\n");
return;
}
av_dump_format(pFormatCtx, 0, NULL, 0);
codec = avcodec_find_decoder(AV_CODEC_ID_H264); //函数的参数是一个编码器的ID,返回查找到的编码器(没有找到就返回NULL)
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
/* select the video stream */
ret = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0);
if (ret < 0) {
fprintf(stderr, "Cannot find a video stream in the input file\n");
exit(ret);
}
video_stream_index = ret;
/* create decoding context */
context = avcodec_alloc_context3(codec);
if (!context) {
fprintf(stderr, "avcodec_alloc_context3 error\n");
exit(AVERROR(ENOMEM));
}
avcodec_parameters_to_context(context, pFormatCtx->streams[video_stream_index]->codecpar);
context->thread_count = 3; //set thread number
/* init the video decoder */
if ((ret = avcodec_open2(context, codec, NULL)) < 0) {
fprintf(stderr, "Cannot open video decoder\n");
exit(ret);
}
//CodeparserCtx = av_parser_init(AV_CODEC_ID_H264);
//if (!CodeparserCtx) {
// fprintf(stderr, "Could not allocate video parser context\n");
// exit(2);
//}
while (1) {
//printf("av read frame\n");
if ((ret = av_read_frame(pFormatCtx, avpkt)) < 0)
break;
//av_log(NULL, AV_LOG_INFO, "packet: stream index %d, length %d\n", avpkt->stream_index, avpkt->size);
if (avpkt->stream_index == video_stream_index) {
ret = avcodec_send_packet(context, avpkt);
if (ret < 0) {
printf("Line %d : avcodec_send_packet error while sending a packet to the decoder, return : %d\n", __LINE__, ret);
continue;
}
while (ret >= 0) {
//printf("avcodec_receive_frame\n");
ret = avcodec_receive_frame(context, avframe);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
}
else if (ret < 0) {
fprintf(stderr, "Error while receiving a frame from the decoder\n");
exit(1);
}
int width = avframe->width;
int height = avframe->height;
// Allocate the opencv mat and store its stride in a 1-element array
if (pCvMat.rows != height || pCvMat.cols != width || pCvMat.type() != CV_8UC3) pCvMat = cv::Mat(height, width, CV_8UC3);
int cvLinesizes[1];
cvLinesizes[0] = pCvMat.step1();
//将图像数据转换为opencv的Mat格式
SwsContext* conversion = sws_getContext(width, height, (AVPixelFormat)avframe->format, width, height, AVPixelFormat::AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
sws_scale(conversion, avframe->data, avframe->linesize, 0, height, &pCvMat.data, cvLinesizes);
sws_freeContext(conversion);
//H264CvMatQue.push(pCvMat);
cv::imshow(this->windowName, pCvMat); //opencv显示
cv::waitKey(1);
av_frame_unref(avframe);
}
}
av_packet_unref(avpkt);
}
}
/*winName is the opencv imshow window name*/
H264Decode::H264Decode(std::string winName):
codec(nullptr),
context(nullptr),
avframe(nullptr),
avpkt(nullptr),
img_convert_ctx(nullptr),
pCvMat(cv::Mat(FRAME_HEIGHT, FRAME_WIDTH, CV_8UC3)),
windowName(winName),
pFormatCtx(nullptr),
CodeparserCtx(nullptr),
avio_ctx(nullptr),
avio_ctx_buffer(nullptr),
avio_ctx_buffer_size(VIDEO_INBUF_SIZE),
bd({0})
{
cv::namedWindow(this->windowName, CV_WINDOW_NORMAL);
cv::resizeWindow(this->windowName, FRAME_WIDTH / 2, FRAME_HEIGHT / 2);
/* alloc the packet */
avpkt = av_packet_alloc();
if (!avpkt) {
fprintf(stderr, "Could not allocate video packet\n");
}
/* alloc the frame */
avframe = av_frame_alloc();
if (!avframe) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
}
H264Decode::~H264Decode() {
avio_context_free(&avio_ctx);
avcodec_free_context(&context);
avformat_close_input(&pFormatCtx);
av_frame_free(&avframe);
av_packet_free(&avpkt);
}