以前写了《利用SDL2库实现音频的播放(一)》,但是感觉太复杂,今天就再写了这篇。
开发环境:
qt 5.5.0、qtcreator、SDL2-2.0.10、ffmpeg
version N-95183-g97450d2b6a Copyright (c) 2000-2019 the FFmpeg
developers、
using namespace std;
#include "SDL.h"
#include "SDL_thread.h"
#include"SDL_render.h"
#include "SDL_stdinc.h"
#include "SDL_rect.h"
#include "SDL_video.h"
#include【assert.h】//
这里尖括号显示不出来,采用【】代替,请在实际中改之
#include【math.h】
#include【stdio.h】
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/pixfmt.h"
#include "libavutil/avstring.h"
#include
#include
#include
#include
#include "libswresample/swresample.h"
#include "libavutil/audio_fifo.h"
}
///由于我们建立的是C++的工程
///编译的时候使用的C++的编译器编译
///而FFMPEG是C的库
///因此这里需要加上extern "C"
///否则会提示各种未定义
///
#define AVCODEC_MAX_AUDIO_FRAME_SIZE 1024 *
1024
#define VIDEO_PICTURE_QUEUE_SIZE 1024 *
1024
#define FF_REFRESH_EVENT SDL_USEREVENT + 1
#define MAX_AUDIOQ_SIZE 1024 * 1024
* 1024
#define MAX_VIDEOQ_SIZE 1024 * 1024
* 1024
typedef struct PacketQueue
{
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
typedef struct VideoState
{
AVFormatContext *pFormatCtx;
int videoStream, audioStream;
AVStream *audio_st;
AVCodecContext
*audio_ctx;
PacketQueue audioq;
uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
unsigned int audio_buf_size;
unsigned int audio_buf_index;
AVPacket audio_pkt;
uint8_t *audio_pkt_data;
int audio_pkt_size;
AVStream *video_st;
AVCodecContext
*video_ctx;
PacketQueue videoq;
int pictq_size, pictq_rindex, pictq_windex;
SDL_mutex *pictq_mutex;
SDL_cond *pictq_cond;
SDL_Thread *parse_tid;
SDL_Thread *video_tid;
char filename[1024];
int quit;
struct SwsContext *sws_ctx;
} VideoState;
static SDL_Texture *gSdlTexture = nullptr;
static SDL_mutex *gscreenmutex = nullptr;
static SDL_Window * gSdlScreen = nullptr;
static SDL_Renderer *gSdlRenderer = nullptr;
int quit = 0;
VideoState *gvideostate;
static SwrContext *au_convert_ctx = nullptr;
void createShowWnd()
{
if (gSdlScreen != nullptr)
{
return;
}
// Allocate a place to put our YUV image on that screen
gSdlScreen = SDL_CreateWindow("Simplest ffmpeg player's
Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, \
gvideostate->video_st->codec->width,
gvideostate->video_st->codec->height,SDL_WINDOW_OPENGL);
gSdlRenderer = SDL_CreateRenderer(gSdlScreen, -1, 0);
gSdlTexture = SDL_CreateTexture(gSdlRenderer,
SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
gvideostate->video_st->codec->width,
gvideostate->video_st->codec->height);
}
void alloc_picture(void *userdata)
{
createShowWnd();
AVFrame *pFrameRGB = av_frame_alloc();
int numBytes = avpicture_get_size(AV_PIX_FMT_YUV420P,
gvideostate->video_ctx->width,
gvideostate->video_ctx->height);
numBytes *= sizeof(uint8_t);
uint8_t *out_buffer = (uint8_t *) av_malloc(numBytes *
sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrameRGB, out_buffer,
AV_PIX_FMT_YUV420P,
gvideost