代码
参考FFMpeg中example代码写了个简单的HEVC解码器(实际上修改了CODECID应该都能解码)
example的代码位于doc/examples路径下
运行方法
获取到FFMpeg编译包(包括include和lib,还有dll这些,后面会写一下如何在windows下编译生成ffmpeg)
实际FFMpeg编译后生成包含的头文件很少,可能因为主要是为了包装成上层应用。因此我在读取解码后的量化参数的时候,把hevcdec.h之类的文件也全部复制过来了,主要是为了调用HEVCContext这个结构体,里面的qp_y_tab包含每个min_cb的量化参数。
应该是可以根据划分深度,获取到每个编码块的量化参数,这样更符合码流分析的习惯。但是目前没有找到有用ffmpeg对HEVC码流实现码流分析的开源项目,因此都是自己瞎琢磨,每个参数每个变量去找他们的用途和含义,因此有很多不懂的地方法,有写的不合理和不对的地方,烦请指出。
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavcodec/codec_desc.h>
#include <libavcodec/codec.h>
#include <libavcodec/hevcdec.h>
}
#define INBUF_SIZE 4096
FILE* fr;
static void pgm_save(AVFrame *frame, int xsize, int ysize,
char* filename)
{
//FILE* f;
int i, j, k;
int width = frame->width;
//fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
for (i = 0; i < ysize; i++)
fwrite(frame->data[0] + width * i , 1, width, fr);
for (j = 0; j < ysize / 2; j++)
fwrite(frame->data[1] + width / 2 * j, 1, width / 2, fr);
for (k = 0; k < ysize / 2; k++)
fwrite(frame->data[2] + width / 2 * k, 1, width / 2, fr);
}
static void decode(AVCodecContext* dec_ctx, AVFrame* frame, AVPacket* pkt,
const char* filename)
{
char buf[1024] = "D:\\VideoCoding\\MyFFMpegPro\\testFFMpegEncoder\\workspace\\test.yuv";
int ret;
ret = avcodec_send_packet(dec_ctx, pkt);
if (ret < 0) {
fprintf(stderr, "Error sending a packet for decoding\n");
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_frame(dec_ctx, frame);
HEVCContext* cur = (HEVCContext*)dec_ctx->priv_data;
int log2_min_cb_size = cur->ps.sps->log2_min_cb_size;
int xlength = cur->ps.sps->width >> 3;
int ylength = cur->ps.sps->height >> 3;
int min_cb_width = cur->ps.sps->min_cb_width;
int min_cb_height = cur->ps.sps->min_cb_height;
for (int i = 0; i < min_cb_height; i++)
{
for (int j = 0; j < min_cb_width; j++)
{
std::cout << int(cur->qp_y_tab[i * min_cb_width + j]);
}
printf("\n");
}
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
exit(1);
}
// get intra quant metrix
//uint16_t* intra_q = dec_ctx->intra_matrix;
//std::cout << intra_q[0] << std::endl;
printf("saving frame %d\n", dec_ctx->frame_number);
fflush(stdout);
pgm_save(frame, frame->width, frame->height, buf);
}
}
int main(int argc, char** argv)
{
const char* filename, * outfilename;
const AVCodec* codec;
AVCodecParserContext* parser;
AVCodecContext* c = NULL;
FILE* f;
AVFrame* frame;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
uint8_t* data;
size_t data_size;
int ret;
int eof;
AVPacket* pkt;
//if (argc <= 2) {
// fprintf(stderr, "Usage: %s <input file> <output file>\n"
// "And check your input file is encoded by mpeg1video please.\n", argv[0]);
// exit(0);
//}
filename = "D:\\VideoCoding\\MyFFMpegPro\\testFFMpegEncoder\\workspace\\str.bin";
outfilename = "D:\\VideoCoding\\MyFFMpegPro\\testFFMpegEncoder\\workspace\\output.yuv";
std::cout << filename << std::endl;
fopen_s(&fr, outfilename, "wb");
pkt = av_packet_alloc();
if (!pkt)
exit(1);
/* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
/* find the MPEG-1 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_HEVC);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
parser = av_parser_init(codec->id);
if (!parser) {
fprintf(stderr, "parser not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* For some codecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because this information is not
available in the bitstream. */
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
fopen_s(&f, filename, "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
do {
/* read raw data from the input file */
data_size = fread(inbuf, 1, INBUF_SIZE, f);
if (ferror(f))
break;
eof = !data_size;
/* use the parser to split the data into frames */
data = inbuf;
while (data_size > 0 || eof) {
ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if (ret < 0) {
fprintf(stderr, "Error while parsing\n");
exit(1);
}
data += ret;
data_size -= ret;
if (pkt->size)
decode(c, frame, pkt, outfilename);
else if (eof)
break;
}
} while (!eof);
fclose(fr);
/* flush the decoder */
decode(c, frame, NULL, outfilename);
fclose(f);
av_parser_close(parser);
avcodec_free_context(&c);
av_frame_free(&frame);
av_packet_free(&pkt);
return 0;
}
代码都是官方example文档里面的,所有没有写注释
在这里也想请教一下各位大佬,我现在知道解码流程了,但是每次解码都是hls_decode_quadtree()里面解码一个CU,而且里面只有一个HEVCLocalContext变量存储当前CU的解码信息,想要编码后获取每个编码的编码信息(例如量化参数,划分深度,预测方向,运行矢量,参考帧等等),应该怎么做呢?现在的想法是改一下原始的hevcdec.c的代码,每次解码后把信息存下来,但是感觉该源码的做法有点奇怪,应该有更好的方法,各位大佬多多指教!