编码
#include <iostream>
#define __STDC_CONSTANT_MACROS
extern "C" {
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include "libavformat/avformat.h"
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/time.h>
};
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avdevice.lib")
#pragma comment(lib,"avfilter.lib")
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"postproc.lib")
#pragma comment(lib,"swresample.lib")
#pragma comment(lib,"swscale.lib")
using namespace std;
#define AV_CODEC_CAP_DELAY (1<<5)
#define AVIO_FLAG_READ_WRITE (1|2)
//YUV编码为H.264
int flush_encoder(AVFormatContext* fmt_ctx, unsigned int stream_index) {
int ret;
int got_frame;
AVPacket enc_pkt;
if (!(fmt_ctx->streams[stream_index]->codec->codec->capabilities & AV_CODEC_CAP_DELAY)) return 0;
while (1) {
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = avcodec_encode_video2(fmt_ctx->streams[stream_index]->codec, &enc_pkt, NULL, &got_frame);
av_frame_free(NULL);
if (ret < 0) break;
if (!got_frame) {
ret = 0;
break;
}
printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_pkt.size);
ret = av_write_frame(fmt_ctx, &enc_pkt);
if (ret < 0) break;
}
}
//注释 CTRL+K CTRL+C
//取消注释 CTRL+K CTRL+U
int main(int argc, char* argv[]) {
AVFormatContext* pFormatCtx;
AVOutputFormat* fmt;
AVStream* video_st;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
AVPacket pkt;
uint8_t* picture_buf;
AVFrame* pFrame;
int picture_size;
int y_size;
int framecnt = 0;
FILE* in_file = fopen("C:/Users/37075/source/repos/ffmpeg_learn/yuv/ElephantsDream_CIF_24fps.yuv", "rb");
int in_w = 352, in_h = 288;
int framenum = 14315;
const char* out_file = "C:/Users/37075/source/repos/ffmpeg_learn/h264/output2.h264";
//注册FFmpeg所有编码器
av_register_all();
//Method 1.
//pFormatCtx = avformat_alloc_context();
//fmt = av_guess_format(NULL, out_file, NULL);
//pFormatCtx->oformat = fmt;
//Method 2.
//初始化输出的码流AVFormatContext
avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
fmt = pFormatCtx->oformat;
//Open output URL
if (avio_open(&pFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) < 0) {
printf("Failed to open output file! \n");
return -1;
}
//创建输出码流的AVStream
video_st = avformat_new_stream(pFormatCtx, 0);
if (video_st == NULL){
return -1;
}
pCodecCtx = video_st->codec;
pCodecCtx->codec_id = fmt->video_codec;
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
pCodecCtx->width = in_w;
pCodecCtx->height = in_h;
pCodecCtx->bit_rate = 400000;
pCodecCtx->gop_size = 250;
pCodecCtx->time_base.num = 1;
pCodecCtx->time_base.den = 25;
pCodecCtx->qmin = 10;
pCodecCtx->qmax = 51;
pCodecCtx->max_b_frames = 3;
AVDictionary* param = 0;
//H.264
if (pCodecCtx->codec_id == AV_CODEC_ID_H264) {
av_dict_set(¶m, "preset", "slow", 0);
av_dict_set(¶m, "tune", "zerolatency", 0);
}
if (pCodecCtx->codec_id == AV_CODEC_ID_HEVC) {
av_dict_set(¶m, "preset", "ultrafast", 0);
av_dict_set(¶m, "tune", "zerolatency", 0);
}
av_dump_format(pFormatCtx, 0, out_file, 1);
//查找编码器
pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
if (!pCodec) {
printf("Can not find encoder! \n");
return -1;
}
//打开编码器
if (avcodec_open2(pCodecCtx, pCodec, ¶m) < 0) {
printf("Failed to open encoder! \n");
return -1;
}
pFrame = av_frame_alloc();
picture_size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
picture_buf = (uint8_t*)av_malloc(picture_size);
avpicture_fill((AVPicture*)pFrame, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
pFrame->format = pCodecCtx->pix_fmt;
pFrame->width = pCodecCtx->width;
pFrame->height = pCodecCtx->height;
//Write File Header
avformat_write_header(pFormatCtx, NULL);
av_new_packet(&pkt, picture_size);
y_size = pCodecCtx->width * pCodecCtx->height;
int64_t iStart = av_gettime();
for (int i = 0; i < framenum; i++) {
//Read raw YUV data
if (fread(picture_buf, 1, y_size * 3 / 2, in_file) <= 0) {
printf("Failed to read raw data! \n");
return -1;
}
else if (feof(in_file)) {
break;
}
pFrame->data[0] = picture_buf; // Y
pFrame->data[1] = picture_buf + y_size; // U
pFrame->data[2] = picture_buf + y_size * 5 / 4; // V
//PTS
//pFrame->pts=i;
pFrame->pts = i * (video_st->time_base.den) / ((video_st->time_base.num) * 25);
int got_picture = 0;
//Encode
//编辑一帧视频,将AVFrame(yuv)编码为AVPacket(h264)
int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_picture);
if (ret < 0) {
printf("Failed to encode! \n");
return -1;
}
if (got_picture == 1) {
// printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, pkt.size);
framecnt++;
pkt.stream_index = video_st->index;
//将编码后的视频码流写入文件
ret = av_write_frame(pFormatCtx, &pkt);
av_free_packet(&pkt);
}
}
printf("time: %d\n", av_gettime() - iStart);
//Flush Encoder
//输入的像素数据读取完成后调用该函数
//输出编码器中剩余的AVPacket
int ret = flush_encoder(pFormatCtx, 0);
if (ret < 0) {
printf("Flushing encoder failed\n");
return -1;
}
//Write file trailer
av_write_trailer(pFormatCtx);
//Clean
if (video_st) {
avcodec_close(video_st->codec);
av_free(pFrame);
av_free(picture_buf);
}
avio_close(pFormatCtx->pb);
avformat_free_context(pFormatCtx);
fclose(in_file);
return 0;
}
#include <iostream>
#define __STDC_CONSTANT_MACROS
extern "C" {
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include "libavformat/avformat.h"
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/time.h>
};
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avdevice.lib")
#pragma comment(lib,"avfilter.lib")
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"postproc.lib")
#pragma comment(lib,"swresample.lib")
#pragma comment(lib,"swscale.lib")
using namespace std;
#define AV_CODEC_CAP_DELAY (1<<5)
#define AVIO_FLAG_READ_WRITE (1|2)
//YUV编码为H.264
int flush_encoder(AVFormatContext* fmt_ctx, unsigned int stream_index) {
int ret;
int got_frame;
AVPacket enc_pkt;
if (!(fmt_ctx->streams[stream_index]->codec->codec->capabilities & AV_CODEC_CAP_DELAY)) return 0;
while (1) {
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = avcodec_encode_video2(fmt_ctx->streams[stream_index]->codec, &enc_pkt, NULL, &got_frame);
av_frame_free(NULL);
if (ret < 0) break;
if (!got_frame) {
ret = 0;
break;
}
printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_pkt.size);
ret = av_write_frame(fmt_ctx, &enc_pkt);
if (ret < 0) break;
}
}
int main(int argc, char* argv[]) {
AVFormatContext* pFormatCtx;
AVOutputFormat* fmt;
AVStream* video_st;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
AVPacket pkt;
uint8_t* picture_buf;
AVFrame* pFrame;
int picture_size;
int y_size;
int framecnt = 0;
FILE* in_file = fopen("C:/Users/37075/source/repos/ffmpeg_learn/yuv/ElephantsDream_CIF_24fps.yuv", "rb");
int in_w = 352, in_h = 288;
int framenum = 14315;
const char* out_file = "C:/Users/37075/source/repos/ffmpeg_learn/h264/output2.h264";
//注册FFmpeg所有编码器
av_register_all();
//Method 1.
//pFormatCtx = avformat_alloc_context();
//fmt = av_guess_format(NULL, out_file, NULL);
//pFormatCtx->oformat = fmt;
//Method 2.
//初始化输出的码流AVFormatContext
avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
fmt = pFormatCtx->oformat;
//Open output URL
if (avio_open(&pFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) < 0) {
printf("Failed to open output file! \n");
return -1;
}
//创建输出码流的AVStream
video_st = avformat_new_stream(pFormatCtx, 0);
if (video_st == NULL) {
return -1;
}
AVDictionary* param = 0;
av_dump_format(pFormatCtx, 0, out_file, 1);
//查找编码器
pCodec = avcodec_find_encoder_by_name("h264_nvenc");
if (!pCodec) {
printf("Can not find encoder! \n");
return -1;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx) {
printf("Can not allocate video codec context! \n");
return -1;
}
pCodecCtx = video_st->codec;
pCodecCtx->codec_id = fmt->video_codec;
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
pCodecCtx->width = in_w;
pCodecCtx->height = in_h;
pCodecCtx->bit_rate = 400000;
pCodecCtx->gop_size = 250;
pCodecCtx->time_base.num = 1;
pCodecCtx->time_base.den = 25;
pCodecCtx->qmin = 10;
pCodecCtx->qmax = 51;
pCodecCtx->max_b_frames = 3;
//H.264
if (pCodecCtx->codec_id == AV_CODEC_ID_H264) {
av_dict_set(¶m, "preset", "slow", 0);
av_dict_set(¶m, "tune", "zerolatency", 0);
}
if (pCodecCtx->codec_id == AV_CODEC_ID_HEVC) {
av_dict_set(¶m, "preset", "ultrafast", 0);
av_dict_set(¶m, "tune", "zerolatency", 0);
}
//打开编码器
if (avcodec_open2(pCodecCtx, pCodec, ¶m) < 0) {
printf("Failed to open encoder! \n");
return -1;
}
pFrame = av_frame_alloc();
picture_size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
picture_buf = (uint8_t*)av_malloc(picture_size);
avpicture_fill((AVPicture*)pFrame, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
pFrame->format = pCodecCtx->pix_fmt;
pFrame->width = pCodecCtx->width;
pFrame->height = pCodecCtx->height;
printf("width:%d\n", pFrame->width);
printf("height:%d\n", pFrame->height);
//Write File Header
avformat_write_header(pFormatCtx, NULL);
av_new_packet(&pkt, picture_size);
y_size = pCodecCtx->width * pCodecCtx->height;
int64_t iStart = av_gettime();
for (int i = 0; i < framenum; i++) {
//Read raw YUV data
if (fread(picture_buf, 1, y_size * 3 / 2, in_file) <= 0) {
printf("Failed to read raw data! \n");
return -1;
}
else if (feof(in_file)) {
break;
}
pFrame->data[0] = picture_buf; // Y
pFrame->data[1] = picture_buf + y_size; // U
pFrame->data[2] = picture_buf + y_size * 5 / 4; // V
//PTS
//pFrame->pts=i;
pFrame->pts = i * (video_st->time_base.den) / ((video_st->time_base.num) * 25);
int got_picture = 0;
//Encode
//编辑一帧视频,将AVFrame(yuv)编码为AVPacket(h264)
int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_picture);
if (ret < 0) {
printf("Failed to encode! \n");
return -1;
}
if (got_picture == 1) {
// printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, pkt.size);
framecnt++;
pkt.stream_index = video_st->index;
//将编码后的视频码流写入文件
ret = av_write_frame(pFormatCtx, &pkt);
av_free_packet(&pkt);
}
}
printf("time: %d\n", av_gettime() - iStart);
//Flush Encoder
//输入的像素数据读取完成后调用该函数
//输出编码器中剩余的AVPacket
int ret = flush_encoder(pFormatCtx, 0);
if (ret < 0) {
printf("Flushing encoder failed\n");
return -1;
}
//Write file trailer
av_write_trailer(pFormatCtx);
//Clean
if (video_st) {
avcodec_close(video_st->codec);
av_free(pFrame);
av_free(picture_buf);
}
avio_close(pFormatCtx->pb);
avformat_free_context(pFormatCtx);
fclose(in_file);
return 0;
}
解码
#include <iostream>
#include <highgui.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#define __STDC_CONSTANT_MACROS
extern "C" {
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include "libavformat/avformat.h"
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/time.h>
};
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avdevice.lib")
#pragma comment(lib,"avfilter.lib")
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"postproc.lib")
#pragma comment(lib,"swresample.lib")
#pragma comment(lib,"swscale.lib")
using namespace std;
using namespace cv;
#define OUTPUT_YUV420P 0
#define SWS_BICUBIC 4
#define SDL_INIT_VIDEO 0x00000020u
#define SDL_INIT_AUDIO 0x00000010u
#define SDL_INIT_TIMER 0x00000001u
#define SDL_WINDOWPOS_UNDEFINED (0x1FFF0000|0)
void DisplayYUV(int w, int h, int fps, FILE* pFileIn)
{
printf("yuv file w: %d, h: %d \n", w, h);
//设置文件指针为文件末尾
fseek(pFileIn, 0, SEEK_END);
int frame_count = 0;
//求文件帧数,对于MxN(rows x cols,M行N列)的BGR图像(CV_8UC3),其对应的YUV420图像大小是(3M/2)xN(CV_8UC1)
//frame_count = (int)((long long)ftell(pFileIn) / ((w * h * 3) / 2)); // ftell 用于求文件大小,fetell对大于2.1G的文件会出错
//printf("frame num is %d \n", frame_count);
fseek(pFileIn, 0, SEEK_SET);//文件内位置定位到文件头
int bufLen = w * h * 3 / 2;
unsigned char* pYuvBuf = new unsigned char[bufLen];
for (int i = 0; i < 14315/*frame_count*/; i++)
{
fread(pYuvBuf, bufLen * sizeof(unsigned char), 1, pFileIn);
Mat yuvImg;
yuvImg.create(h * 3 / 2, w, CV_8UC1);
memcpy(yuvImg.data, pYuvBuf, bufLen * sizeof(unsigned char));
Mat rgbImg;
//颜色空间转换
cvtColor(yuvImg, rgbImg, CV_YUV2BGR_I420);
//imshow("yuv", yuvImg);
imshow("rgb", rgbImg);
waitKey(1000 / fps);
printf("cnt: %d \n", i);
}
delete[] pYuvBuf;
fclose(pFileIn);
}
int main(int argc, char* argv[]) {
AVFormatContext* pFormatCtx;
int i, videoindex;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
//存放yuv
AVFrame* pFrame, * pFrameYUV;
unsigned char* out_buffer;
//存放h264
AVPacket* packet;
struct SwsContext* img_convert_ctx;
int y_size;
FILE* fp_yuv;
int ret, got_picture;
char filepath[] = "c:/users/37075/source/repos/ffmpeg_learn/h264/output2.h264";
//初始化
av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
//打开文件
if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
//获取流的信息
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
printf("Couldn't find stram information.\n");
return -1;
}
videoindex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
//判断是否是视频
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i;
break;
}
if (videoindex == -1) {
printf("Didn't find a video stream.\n");
return -1;
}
//获取解码器
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
if ((pCodec = avcodec_find_decoder(pCodecCtx->codec_id)) == NULL) {
printf("Codec not found.\n");
return -1;
}
//打开解码器
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
printf("Could not open codec.\n");
return -1;
}
FILE* fp = fopen("c:/users/37075/source/repos/ffmpeg_learn/info/output.txt", "wb+");
fprintf(fp, "Duration: %d\n", pFormatCtx->duration);
fprintf(fp, "Long Name: %s\n", pFormatCtx->iformat->long_name);
fprintf(fp, "Width*Height: %d*%d\n", pFormatCtx->streams[videoindex]->codec->width, pFormatCtx->streams[videoindex]->codec->height);
fclose(fp);
//申请AVFrame结构,申请内存
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
//申请内存
out_buffer = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
//设置data和linesize
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
packet = (AVPacket*)av_malloc(sizeof(AVPacket));
printf("--------------- File Information ----------------\n");
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, filepath, 0);
printf("-------------------------------------------------\n");
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
fp_yuv = fopen("c:/users/37075/source/repos/ffmpeg_learn/yuv/output.yuv", "wb+");
//FILE* fp_h264 = fopen("c:/users/37075/source/repos/ffmpeg_learn/h264/test.h264", "wb+");
//读取整个视频流,解码成帧,转换为yuv并保存
//输入一个AVFrame,输出一个AVPacket
int64_t iStart = av_gettime();
while (av_read_frame(pFormatCtx, packet) >= 0) {
// Is this a packet from the video stream?
if (packet->stream_index == videoindex) {
//fwrite(packet->data, 1, packet->size, fp_h264);
//解码
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) {
printf("Decode Error.\n");
return -1;
}
if (got_picture) {
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
}
}
av_free_packet(packet);
}
printf("time: %d\n", av_gettime() - iStart);
//fclose(fp_h264);
while (1) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) break;
if (got_picture) break;
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
}
//ifstream fp_in;
//fp_in.open("c:/users/37075/source/repos/ffmpeg_learn/yuv/output.yuv", ios_base::in | ios_base::binary);
//if (fp_in.fail()) {
// cout << "the file is error" << endl;
// return -1;
//}
sws_freeContext(img_convert_ctx);
DisplayYUV(pCodecCtx->width, pCodecCtx->height, 25, fp_yuv);
fclose(fp_yuv);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}
#include <iostream>
#include <highgui.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#define __STDC_CONSTANT_MACROS
extern "C" {
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include "libavformat/avformat.h"
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/time.h>
};
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avdevice.lib")
#pragma comment(lib,"avfilter.lib")
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"postproc.lib")
#pragma comment(lib,"swresample.lib")
#pragma comment(lib,"swscale.lib")
using namespace std;
using namespace cv;
#define OUTPUT_YUV420P 0
#define SWS_BICUBIC 4
#define SDL_INIT_VIDEO 0x00000020u
#define SDL_INIT_AUDIO 0x00000010u
#define SDL_INIT_TIMER 0x00000001u
#define SDL_WINDOWPOS_UNDEFINED (0x1FFF0000|0)
void DisplayYUV(int w, int h, int fps, FILE* pFileIn)
{
printf("yuv file w: %d, h: %d \n", w, h);
//设置文件指针为文件末尾
fseek(pFileIn, 0, SEEK_END);
int frame_count = 0;
//求文件帧数,对于MxN(rows x cols,M行N列)的BGR图像(CV_8UC3),其对应的YUV420图像大小是(3M/2)xN(CV_8UC1)
//frame_count = (int)((long long)ftell(pFileIn) / ((w * h * 3) / 2)); // ftell 用于求文件大小,fetell对大于2.1G的文件会出错
//printf("frame num is %d \n", frame_count);
fseek(pFileIn, 0, SEEK_SET);//文件内位置定位到文件头
int bufLen = w * h * 3 / 2;
unsigned char* pYuvBuf = new unsigned char[bufLen];
for (int i = 0; i < 14315/*frame_count*/; i++)
{
fread(pYuvBuf, bufLen * sizeof(unsigned char), 1, pFileIn);
Mat yuvImg;
yuvImg.create(h * 3 / 2, w, CV_8UC1);
memcpy(yuvImg.data, pYuvBuf, bufLen * sizeof(unsigned char));
Mat rgbImg;
//颜色空间转换
cvtColor(yuvImg, rgbImg, CV_YUV2BGR_I420);
//imshow("yuv", yuvImg);
imshow("rgb", rgbImg);
waitKey(1000 / fps);
printf("cnt: %d \n", i);
}
delete[] pYuvBuf;
fclose(pFileIn);
}
int main(int argc, char* argv[]) {
AVFormatContext* pFormatCtx;
int i, videoindex;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
//存放yuv
AVFrame* pFrame, * pFrameYUV;
unsigned char* out_buffer;
//存放h264
AVPacket* packet;
struct SwsContext* img_convert_ctx;
int y_size;
FILE* fp_yuv;
int ret, got_picture;
char filepath[] = "c:/users/37075/source/repos/ffmpeg_learn/h264/output2.h264";
//初始化
av_register_all();
avcodec_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
//打开文件
if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
//获取流的信息
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
printf("Couldn't find stram information.\n");
return -1;
}
videoindex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
//判断是否是视频
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i;
break;
}
if (videoindex == -1) {
printf("Didn't find a video stream.\n");
return -1;
}
//获取解码器
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
pCodec = avcodec_find_decoder_by_name("h264_cuvid");
if(!pCodec){
printf("Codec not found.\n");
return -1;
}
//打开解码器
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
printf("Could not open codec.\n");
return -1;
}
FILE* fp = fopen("c:/users/37075/source/repos/ffmpeg_learn/info/output.txt", "wb+");
fprintf(stderr, "Duration: %d\n", pFormatCtx->duration);
fprintf(stderr, "Long Name: %s\n", pFormatCtx->iformat->long_name);
fprintf(stderr, "Width*Height: %d*%d\n", pFormatCtx->streams[videoindex]->codec->width, pFormatCtx->streams[videoindex]->codec->height);
printf("height: %d\n", pCodecCtx->width);
printf("width: %d\n", pCodecCtx->height);
fclose(fp);
//申请AVFrame结构,申请内存
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
//申请内存
out_buffer = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
//设置data和linesize
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
pFrameYUV->width = pCodecCtx->width;
pFrameYUV->height = pCodecCtx->height;
pFrame->width = pCodecCtx->width;
pFrame->height = pCodecCtx->height;
packet = (AVPacket*)av_malloc(sizeof(AVPacket));
printf("--------------- File Information ----------------\n");
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, filepath, 0);
printf("-------------------------------------------------\n");
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
fp_yuv = fopen("c:/users/37075/source/repos/ffmpeg_learn/yuv/output.yuv", "wb+");
//FILE* fp_h264 = fopen("c:/users/37075/source/repos/ffmpeg_learn/h264/test.h264", "wb+");
//读取整个视频流,解码成帧,转换为yuv并保存
//输入一个AVFrame,输出一个AVPacket
int64_t iStart = av_gettime();
while (av_read_frame(pFormatCtx, packet) >= 0) {
// Is this a packet from the video stream?
if (packet->stream_index == videoindex) {
//fwrite(packet->data, 1, packet->size, fp_h264);
//解码
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) {
printf("Decode Error.\n");
return -1;
}
if (got_picture) {
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
}
}
av_free_packet(packet);
}
printf("time: %d\n", (av_gettime() - iStart));
//fclose(fp_h264);
while (1) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) break;
if (got_picture) break;
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
}
//ifstream fp_in;
//fp_in.open("c:/users/37075/source/repos/ffmpeg_learn/yuv/output.yuv", ios_base::in | ios_base::binary);
//if (fp_in.fail()) {
// cout << "the file is error" << endl;
// return -1;
//}
sws_freeContext(img_convert_ctx);
DisplayYUV(pCodecCtx->width, pCodecCtx->height, 25, fp_yuv);
fclose(fp_yuv);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}