=====================================================
前言:
参考雷神的100行代码实现最简单的基于FFMPEG+SDL的视频播放器
https://blog.csdn.net/leixiaohua1020/article/details/8652605
再次致敬雷神,愿雷神在天堂没有疾病没有烦恼的敲自己喜欢的代码
1. 因为雷神上传的代码是2013年的,ffmpeg更新迅速,许多接口都被弃用。比如雷神使用的下方接口
/*
* @deprecated Use avcodec_send_packet() and avcodec_receive_frame().
*/
attribute_deprecated
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
int *got_picture_ptr,
const AVPacket *avpkt);
已经被标注了"attribute_deprecated",编译的时候会报错,因为该接口被弃用无法使用了,所以自己根据雷神的代码稍作了修改。
2. 雷神使用了SDL来作为视频的播放,因为我是初学者,想把问题简单化,所以代码是直接保存为YUYV文件,可以使用ffplay直接播放。使用ffplay播放时需要制定分辨率 ,命令如下:
ffplay -f rawvideo -video_size 1920x1080 你保存的视频.yuv
3.使用的ffmpeg版本是4.2的版本,因为ffmpeg更新迅猛,所以下方代码或许过段时间又被deprecated了也说不准
4.文件名是写死在代码里的,可以改成从命令行参数获取
5.因为是初学者,有错误,请各位大佬指出
=====================================================
#include <stdio.h>
#define __STDC_CONSTANT_MACROS
#ifdef _WIN32
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <SDL/SDL.h>
#ifdef __cplusplus
};
#endif
#endif
int main(int argc, char* argv[])
{
//FFmpeg
AVFormatContext *pFormatCtx;
int i, videoindex;
AVCodecParameters *pCodecParam;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame,*pFrameYUV;
AVPacket *packet;
FILE *fp_yuv;
int ret;
char filepath[]="D:\\test.mp4";
pFormatCtx = avformat_alloc_context();
if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
printf("Couldn't open input stream.\n");
return -1;
}
if(avformat_find_stream_info(pFormatCtx,NULL)<0){
printf("Couldn't find stream information.\n");
return -1;
}
videoindex=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_VIDEO){
videoindex=i;
break;
}
if(videoindex==-1){
printf("Didn't find a video stream.\n");
return -1;
}
pCodecParam=pFormatCtx->streams[videoindex]->codecpar;
auto codec = avcodec_find_decoder(pCodecParam->codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
pCodecCtx = avcodec_alloc_context3(codec);
if (!pCodecCtx) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
ret = avcodec_parameters_to_context(pCodecCtx, pCodecParam);
if(ret < 0){
printf("avcodec_parameters_to_context error.\n");
return -1;
}
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL){
printf("Codec not found.\n");
return -1;
}
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
fprintf(stderr, "Could not open codec.\n");
return -1;
}
pFrame=av_frame_alloc();
if (!pFrame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
packet=(AVPacket *)av_malloc(sizeof(AVPacket));
fprintf(stderr, "------------- File Information ------------------\n");
av_dump_format(pFormatCtx,0,filepath,0);
fprintf(stderr, "-------------------------------------------------\n");
fp_yuv=fopen("D:\\output.yuv","wb+");
while(av_read_frame(pFormatCtx, packet)>=0){
if(packet->stream_index==videoindex){
ret = avcodec_send_packet(pCodecCtx, packet);
if(ret != 0){
printf("avcodec_send_packet Error.\n");
continue;
}
ret = avcodec_receive_frame(pCodecCtx, pFrame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
continue;
}
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
break;
}
int y_width = pFrame->width, y_height=pFrame->height;
int y_size = y_width * y_height;
fwrite(pFrame->data[0],1,y_size,fp_yuv); //Y
fwrite(pFrame->data[1],1,y_size/4,fp_yuv); //U
fwrite(pFrame->data[2],1,y_size/4,fp_yuv); //V
}
av_packet_unref(packet);
}
//FIX: Flush Frames remained in Codec
while (true) {
ret = avcodec_send_packet(pCodecCtx, packet);
if(ret != 0){
fprintf(stderr, "avcodec_send_packet Error.\n");
break;
}
ret = avcodec_receive_frame(pCodecCtx, pFrame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
continue;
}
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
break;
}
int y_size=pCodecCtx->width*pCodecCtx->height;
fwrite(pFrame->data[0],1,y_size,fp_yuv); //Y
fwrite(pFrame->data[1],1,y_size/4,fp_yuv); //U
fwrite(pFrame->data[2],1,y_size/4,fp_yuv); //V
}
fclose(fp_yuv);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}
代码中写入YUV文件的时候,使用的雷神之前的写入方式
fwrite(pFrame->data[0],1,y_size,fp_yuv); //Y
fwrite(pFrame->data[1],1,y_size/4,fp_yuv); //U
fwrite(pFrame->data[2],1,y_size/4,fp_yuv); //V
但是在视频的分辨率不是很标准的情况下,这种写入方式会出现问题,因为pFrame->witdh 与 pFrame->linesize[0]在分辨率不标准的情况下会不一样,导致使用雷神的 直接写入的方式会造成 写入一小部分空数据,并且丢失一小部分有效数据。造成最后就是播放花屏。
改成下面写入方式即可,只是写入时,速度较慢
for (int ii = 0; ii < pFrame->height; ii++)
fwrite(pFrame->data[0] + ii * pFrame->linesize[0], 1, pFrame->width, fp_yuv);
for (int ii = 0; ii < pFrame->height/2; ii++)
fwrite(pFrame->data[1] + ii * pFrame->linesize[1], 1, pFrame->width/2, fp_yuv);
for (int ii = 0; ii < pFrame->height/2; ii++)
fwrite(pFrame->data[2] + ii * pFrame->linesize[2], 1, pFrame->width/2, fp_yuv);
因为vs鼓捣了好久实在搞不定(看了雷神的攻略依然没搞定),所以在windows下,使用的IDE是CLION ,MinGW来编译,相比VS来说真是轻松又愉快。附上CMAKE文件
cmake_minimum_required(VERSION 3.15)
project(testFFmpeg)
set(CMAKE_C_STANDARD 11)
include_directories(include)
include_directories(include/libavcodec)
include_directories(include/libavdevice)
include_directories(include/libavfilter)
include_directories(include/libavformat)
include_directories(include/libavutil)
include_directories(include/libpostproc)
include_directories(include/libswresample)
include_directories(include/libswscale)
file(GLOB LIBS lib/*.lib)
link_libraries(${LIBS})
add_executable(testFFmpeg
test.cpp
)