FFmpeg实现实时反向播放(ffmpeg+sdl+vs2017)
在某些场合下需要实现视频的倒放效果,实现倒放有很多种思路。比如直接用libavfilter下提供的reverse命令行输出一个反转过来的视频;或者比较直接的想法是把所有frame缓存起来,然后再反向逐个输出到屏幕。
如果我们需要实时的在屏幕预览,那命令行的方法大概是不行了,而第二种方法需要在播放前预先加载所有需要播放的帧,这一方面增加了耗时,另一方面占用了大量的内存空间。所以基于第二种方法的倒放必须要进行优化。
本文使用的思路是基于av_seek_frame实现的方法,大致思路如图所示(原谅我对XML图形的滥用)。
1.在解码线程我们需要得到即将被输出的帧,使用cur变量初始化为视频的末尾的位置,用来记录当前seek执行的时间戳,使用av_seek_frame函数设置参数为backward,也就是寻找cur这一位置之前的最近的关键帧。
2.随后我们就可以单单解码从找到的关键帧到cur之间的frame,把他们输出到一个vector中,然后更新cur = vector[0] - 1也就是关键帧的上一个位置,接下来再次对cur执行seek得到的就会是上一个关键帧了。
3.得到的vector输出到一个队列中,由sdl的线程获取,最后从vector的end到begin反向输出到屏幕,这样就可以实现倒放预览了。
分析这一过程不难发现,只有储存在队列中即将的frame占用了内存空间,而且使用双线程配合队列,这样在解码下个I帧及其后继B帧P帧时不会出现需要等待而卡顿的情况。
最后贴一下代码,这里的代码是直接从雷霄骅先生的代码进行更改完成的,如果增加一个packet队列进一步提高并发性应该能进一步优化性能。
#include <stdio.h>
#include <string>
#include <iostream>
#include <vector>
#include <queue>
#include <algorithm>
#include <thread>
#include <mutex>
#define __STDC_CONSTANT_MACROS
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
#include "SDL2/SDL.h"
};
//Output YUV420P data as a file
#define OUTPUT_YUV420P 0
using namespace std;
queue<vector<AVFrame*>> Q;
mutex mutex_;
condition_variable cvfull;
condition_variable cvempt;
//SDL---------------------------
int screen_w = 0, screen_h = 0;
SDL_Window *screen;
SDL_Renderer* sdlRenderer;
SDL_Texture* sdlTexture;
SDL_Rect sdlRect;
void Show() {
while (1) {
vector<AVFrame*> V;
{
unique_lock<mutex> lk(mutex_);
cvfull.wait(lk, [&]() { return Q.size() > 0; });
cout <<"队列大小:"<< Q.size()<<endl;
V = Q.front();
Q.pop();
cvempt.notify_one();
}
if (V.size() == 0) {
return;
}
for (auto i = V.rbegin(); i != V.rend(); i++) {
AVFrame* pFrameYUV = *i;
SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
pFrameYUV->data[0], pFrameYUV->linesize[0],
pFrameYUV->data[1], pFrameYUV->linesize[1],
pFrameYUV->data[2], pFrameYUV->linesize[2]);
SDL_RenderClear(sdlRenderer);
SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &sdlRect);
SDL_RenderPresent(sdlRenderer);
SDL_Delay(40);
av_frame_free(&pFrameYUV);
}
}
}
int main(int argc, char* argv[])
{
AVFormatContext *pFormatCtx;
int i, videoindex;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame, *pFrameYUV;
unsigned char *out_buffer;
AVPacket *packet;
int y_size;
int ret, got_picture;
char filepath[] = "big_buck_bunny.mp4";
FILE *fp_yuv;
av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
printf("Couldn't find stream information.\n");
return -1;
}
videoindex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i;
break;
}
if (videoindex == -1) {
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
printf("Codec not found.\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
printf("Could not open codec.\n");
return -1;
}
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer,
AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
packet = (AVPacket *)av_malloc(sizeof(AVPacket));
//Output Info-----------------------------
printf("--------------- File Information ----------------\n");
av_dump_format(pFormatCtx, 0, filepath, 0);
printf("-------------------------------------------------\n");
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
printf("Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}
screen_w = pCodecCtx->width;
screen_h = pCodecCtx->height;
//SDL 2.0 Support for multiple windows
screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
screen_w, screen_h,
SDL_WINDOW_OPENGL);
if (!screen) {
printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
return -1;
}
sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);
sdlRect.x = 0;
sdlRect.y = 0;
sdlRect.w = screen_w;
sdlRect.h = screen_h;
thread threadShow(Show);
//SDL End----------------------
int cur = pFormatCtx->duration + 5000;
while (cur > 0) {
if(ret = av_seek_frame(pFormatCtx, videoindex, cur, AVSEEK_FLAG_BACKWARD) < 0){ break; }
std::vector< AVFrame* > V;
while (av_read_frame(pFormatCtx, packet) >= 0) {
if (packet->stream_index == videoindex) {
ret = avcodec_decode_video2(pCodecCtx, pFrameYUV, &got_picture, packet);
if (ret < 0) {
printf("Decode Error.\n");
return -1;
}
if (got_picture) {
if (pFrameYUV->pts > cur) { break; }
V.push_back(av_frame_clone(pFrameYUV));
}
}
av_free_packet(packet);
}
if (V.size() != 0) {
unique_lock<mutex> lk(mutex_);
cvempt.wait(lk, [&]() { return Q.size() < 3; });//避免解码过快占用大量内存
cur = V[0]->pts - 1;//更新cur的位置
Q.push(V);
cvfull.notify_one();
}
else {
cur -= 10;
}
}
{
unique_lock<mutex> lk(mutex_);
cvempt.wait(lk, [&]() { return Q.size() < 2; });
Q.push(vector<AVFrame*>());//最后压入一个空vector 通知sdl线程解码已经结束
cvfull.notify_one();
}
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
threadShow.join();
return 0;
}