AVFormatContext *pFormatCtx;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame, *pFrameRGB;
AVPacket *packet;
AVCodecContext *aCodecCtx;
AVCodec *aCodec;
常见的函数与用法:
1、 void av_register_all(void);
/**
* Initialize libavformat and register all the muxers, demuxers and
* protocols. If you do not call this function, then you can select
* exactly which formats you want to support.
*
* @see av_register_input_format()
* @see av_register_output_format()
*/
void av_register_input_format(AVInputFormat *format);
void av_register_output_format(AVOutputFormat *format);
av_register_all(); //初始化FFMPEG 调用了这个才能正常使用编码器和解码器
2、 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);
/**
* Open an input stream and read the header. The codecs are not opened.
* The stream must be closed with avformat_close_input().
*
* @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context).
* May be a pointer to NULL, in which case an AVFormatContext is allocated by this
* function and written into ps.
* Note that a user-supplied AVFormatContext will be freed on failure.
* @param filename Name of the stream to open.
* @param fmt If non-NULL, this parameter forces a specific input format.
* Otherwise the format is autodetected.
* @param options A dictionary filled with AVFormatContext and demuxer-private options.
* On return this parameter will be destroyed and replaced with a dict containing
* options that were not found. May be NULL.
*
* @return 0 on success, a negative AVERROR on failure.
*
* @note If you want to use custom IO, preallocate the format context and set its pb field.
*/
avformat_open_input(&pFormatCtx, file_path, NULL, NULL) != 0
AVFormatContext pFormatCtx = avformat_alloc_context();//上下文结构体
3、 AVFormatContext *avformat_alloc_context(void);
/**
* Allocate an AVFormatContext.
* avformat_free_context() can be used to free the context and everything
* allocated by the framework within it.
*/
AVFormatContext pFormatCtx = avformat_alloc_context();//上下文结构体
4、 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options);
/**
* Read packets of a media file to get stream information. This
* is useful for file formats with no headers such as MPEG. This
* function also computes the real framerate in case of MPEG-2 repeat
* frame mode.
* The logical file position is not changed by this function;
* examined packets may be buffered for later processing.
*
* @param ic media file handle
* @param options If non-NULL, an ic.nb_streams long array of pointers to
* dictionaries, where i-th member contains options for
* codec corresponding to i-th stream.
* On return each dictionary will be filled with options that were not found.
* @return >=0 if OK, AVERROR_xxx on error
*
* @note this function isn't guaranteed to open all the codecs, so
* options being non-empty at return is a perfectly normal behavior.
*
* @todo Let the user decide somehow what information is needed so that
* we do not waste time getting stuff the user does not need.
*/
avformat_find_stream_info(pFormatCtx, NULL) < 0 //小于0位失败
5、 AVCodec *avcodec_find_decoder(enum AVCodecID id);
/**
* Find a registered decoder with a matching codec ID.
*
* @param id AVCodecID of the requested decoder
* @return A decoder if one was found, NULL otherwise.
*/
AVCodecContext *aCodecCtx;
AVCodec *aCodec;
aCodecCtx = pFormatCtx->streams[audioStream]->codec;//解码器的结构体
aCodec = avcodec_find_decoder(aCodecCtx->codec_id);//得到使用的那个编码格式
6、 AVFrame *av_frame_alloc(void);
/**
* Allocate an AVFrame and set its fields to default values. The resulting
* struct must be freed using av_frame_free().
*
* @return An AVFrame filled with default values or NULL on failure.
*
* @note this only allocates the AVFrame itself, not the data buffers. Those
* must be allocated through other means, e.g. with av_frame_get_buffer() or
* manually.
*/
AVFrame pFrame = av_frame_alloc();
7、 int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *const dst[], const int dstStride[]);
/**
* Scale the image slice in srcSlice and put the resulting scaled
* slice in the image in dst. A slice is a sequence of consecutive
* rows in an image.
*
* Slices have to be provided in sequential order, either in
* top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined.
*
* @param c the scaling context previously created with
* sws_getContext()
* @param srcSlice the array containing the pointers to the planes of
* the source slice
* @param srcStride the array containing the strides for each plane of
* the source image
* @param srcSliceY the position in the source image of the slice to
* process, that is the number (counted starting from
* zero) in the image of the first row of the slice
* @param srcSliceH the height of the source slice, that is the number
* of rows in the slice
* @param dst the array containing the pointers to the planes of
* the destination image
* @param dstStride the array containing the strides for each plane of
* the destination image
* @return the height of the output slice
*/
sws_scale(img_convert_ctx,
(uint8_t const * const *) pFrame->data,
pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
pFrameRGB->linesize);
7、 struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param);
/**
* Allocate and return an SwsContext. You need it to perform
* scaling/conversion operations using sws_scale().
*
* @param srcW the width of the source image
* @param srcH the height of the source image
* @param srcFormat the source image format
* @param dstW the width of the destination image
* @param dstH the height of the destination image
* @param dstFormat the destination image format
* @param flags specify which algorithm and options to use for rescaling
* @return a pointer to an allocated context, or NULL in case of error
* @note this function is to be removed after a saner alternative is
* written
*/
struct SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
//#define SWS_BICUBIC 4
//#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32 // AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
8、 int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);
/**
* Calculate the size in bytes that a picture of the given width and height
* would occupy if stored in the given picture format.
*
* @param pix_fmt picture pixel format
* @param width picture width
* @param height picture height
* @return the computed picture buffer size or a negative error code
* in case of error
*
* @see av_image_get_buffer_size().
*/
int numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);//
//enum AVPixelFormat
9、 void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
/**
* Allocate a block of size bytes with alignment suitable for all
* memory accesses (including vectors if available on the CPU).
* @param size Size in bytes for the memory block to be allocated.
* @return Pointer to the allocated block, NULL if the block cannot
* be allocated.
* @see av_mallocz()
*/
uint8_t * out_buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
10、 int avpicture_fill(AVPicture *picture, const uint8_t *ptr,
enum AVPixelFormat pix_fmt, int width, int height);
/**
* Setup the picture fields based on the specified image parameters
* and the provided image data buffer.
*
* The picture fields are filled in by using the image data buffer
* pointed to by ptr.
*
* If ptr is NULL, the function will fill only the picture linesize
* array and return the required size for the image buffer.
*
* To allocate an image buffer and fill the picture data in one call,
* use avpicture_alloc().
*
* @param picture the picture to be filled in
* @param ptr buffer where the image data is stored, or NULL
* @param pix_fmt the pixel format of the image
* @param width the width of the image in pixels
* @param height the height of the image in pixels
* @return the size in bytes required for src, a negative error code
* in case of failure
*
* @see av_image_fill_arrays()
*/
avpicture_fill((AVPicture *) pFrameRGB, out_buffer, PIX_FMT_RGB32,
pCodecCtx->width, pCodecCtx->height);
11、 int av_new_packet(AVPacket *pkt, int size);
/**
* Allocate the payload of a packet and initialize its fields with
* default values.
*
* @param pkt packet
* @param size wanted payload size
* @return 0 if OK, AVERROR_xxx otherwise
*/
packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一个packet
av_new_packet(packet, y_size); //分配packet的数据
12、 void av_dump_format(AVFormatContext *ic,
int index,
const char *url,
int is_output);
/**
* Print detailed information about the input or output format, such as
* duration, bitrate, streams, container, programs, metadata, side data,
* codec and time base.
*
* @param ic the context to analyze
* @param index index of the stream to dump information about
* @param url the URL to print, such as source or destination file
* @param is_output Select whether the specified context is an input(0) or output(1)
*/
av_dump_format(pFormatCtx, 0, file_path, 0); //输出视频信息
13、 int av_read_frame(AVFormatContext *s, AVPacket *pkt);
/**
* Return the next frame of a stream.
* This function returns what is stored in the file, and does not validate
* that what is there are valid frames for the decoder. It will split what is
* stored in the file into frames and return one for each call. It will not
* omit invalid data between valid frames so as to give the decoder the maximum
* information possible for decoding.
*
* If pkt->buf is NULL, then the packet is valid until the next
* av_read_frame() or until avformat_close_input(). Otherwise the packet
* is valid indefinitely. In both cases the packet must be freed with
* av_free_packet when it is no longer needed. For video, the packet contains
* exactly one frame. For audio, it contains an integer number of frames if each
* frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames
* have a variable size (e.g. MPEG audio), then it contains one frame.
*
* pkt->pts, pkt->dts and pkt->duration are always set to correct
* values in AVStream.time_base units (and guessed if the format cannot
* provide them). pkt->pts can be AV_NOPTS_VALUE if the video format
* has B-frames, so it is better to rely on pkt->dts if you do not
* decompress the payload.
*
* @return 0 if OK, < 0 on error or end of file
*/
if (av_read_frame(pFormatCtx, packet) < 0)
{
break; //这里认为视频读取完了
}
14、 int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
int *got_picture_ptr,
const AVPacket *avpkt);
/**
* Decode the video frame of size avpkt->size from avpkt->data into picture.
* Some decoders may support multiple frames in a single AVPacket, such
* decoders would then just decode the first frame.
*
* @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
* the actual read bytes because some optimized bitstream readers read 32 or 64
* bits at once and could read over the end.
*
* @warning The end of the input buffer buf should be set to 0 to ensure that
* no overreading happens for damaged MPEG streams.
*
* @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
* between input and output, these need to be fed with avpkt->data=NULL,
* avpkt->size=0 at the end to return the remaining frames.
*
* @param avctx the codec context
* @param[out] picture The AVFrame in which the decoded video frame will be stored.
* Use av_frame_alloc() to get an AVFrame. The codec will
* allocate memory for the actual bitmap by calling the
* AVCodecContext.get_buffer2() callback.
* When AVCodecContext.refcounted_frames is set to 1, the frame is
* reference counted and the returned reference belongs to the
* caller. The caller must release the frame using av_frame_unref()
* when the frame is no longer needed. The caller may safely write
* to the frame if av_frame_is_writable() returns 1.
* When AVCodecContext.refcounted_frames is set to 0, the returned
* reference belongs to the decoder and is valid only until the
* next call to this function or until closing or flushing the
* decoder. The caller may not write to it.
*
* @param[in] avpkt The input AVPacket containing the input buffer.
* You can create such packet with av_init_packet() and by then setting
* data and size, some decoders might in addition need other fields like
* flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
* fields possible.
* @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
* @return On error a negative value is returned, otherwise the number of bytes
* used or zero if no frame could be decompressed.
*/
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);
//一般使用简单的ffmpeg处理视音频的时候用的步骤和一些结构体介绍
//结构体的介绍
AVFormatContext
iformat:输入视频的AVInputFormat
nb_streams :输入视频的AVStream 个数
streams :输入视频的AVStream []数组(AVStream)
duration :输入视频的时长(以微秒为单位)
bit_rate :输入视频的码率
//一、获取音。视频流的在数组中的标号
int videoStream = -1;
int audioStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++) { //一般视频为0个 音频为第1个//获取每个流对应的标号
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoStream = i;
}
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0)
{
audioStream = i;
}
}
//如果videoStream为-1 说明没有找到视频流
if (videoStream == -1) {
printf("Didn't find a video stream.\n");
return;
}
if (audioStream == -1) {
printf("Didn't find a audio stream.\n");
return;
}
end 1
//结构体简单的介绍
AVStream
id:序号
codec:该流对应的AVCodecContext
time_base:该流的时基
r_frame_rate:该流的帧率
AVCodecContext
codec:编解码器的AVCodec
width, height:图像的宽高(只针对视频)
pix_fmt:像素格式(只针对视频)
sample_rate:采样率(只针对音频)
channels:声道数(只针对音频)
sample_fmt:采样格式(只针对音频)
AVCodec
name:编解码器名称
long_name:编解码器长名称
type:编解码器类型
id:编解码器ID
一些编解码的接口函数
//二、获取对应数据流的解码器,查找解码器
AVCodecContext *aCodecCtx;
AVCodec *aCodec;
aCodecCtx = pFormatCtx->streams[audioStream]->codec;//解码器的结构体
aCodec = avcodec_find_decoder(aCodecCtx->codec_id);//得到使用的那个编码格式
if (aCodec == NULL) {//判断是否找到编码器,若没有没有找到解码器
printf("ACodec not found.\n");
return;
}
end 2///
//三、打开对应音视频解码器,打开解码器
if (avcodec_open2(aCodecCtx, aCodec, NULL) < 0) {
printf("Could not open audio codec.\n");
return;
}
/end 3/
AVPacket
pts:显示时间戳
dts :解码时间戳
data :压缩编码数据
size :压缩编码数据大小
stream_index :所属的AVStream
AVFrame
data:解码后的图像像素数据(音频采样数据)。
linesize:对视频来说是图像中一行像素的大小;对音频来说是整个音频帧的大小。
width, height:图像的宽高(只针对视频)。
key_frame:是否为关键帧(只针对视频) 。
pict_type:帧类型(只针对视频) 。例如I,P,B。
//
AVFrame pFrame = av_frame_alloc();
AVFrame pFrameRGB = av_frame_alloc();
//将解码后的YUV数据转换成RGB32
static struct SwsContext *img_convert_ctx;//?????干
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
int numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);
out_buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrameRGB, out_buffer, PIX_FMT_RGB32,
pCodecCtx->width, pCodecCtx->height);
int y_size = pCodecCtx->width * pCodecCtx->height;
packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一个packet
av_new_packet(packet, y_size); //分配packet的数据
av_dump_format(pFormatCtx, 0, file_path, 0); //输出视频信息
//输出
while (1)
{
if (av_read_frame(pFormatCtx, packet) < 0)//每次读一个packet
{
break; //这里认为视频读取完了
}
if (packet->stream_index == videoStream) {//所属的AVStream,就是前面说的下标
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);
if (ret < 0) {
printf("decode error.\n");
return;
}
if (got_picture) {
sws_scale(img_convert_ctx,
(uint8_t const * const *) pFrame->data,
pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
pFrameRGB->linesize);
//把这个RGB数据 用QImage加载
QImage tmpImg((uchar *)out_buffer,pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);
QImage image = tmpImg.copy(); //把图像复制一份 传递给界面显示
emit sig_GetOneFrame(image); //发送信号
}
}
av_free_packet(packet);//将这一帧释放掉
msleep(15); //停一停 不然放的太快了
}
//
//关于SDL2部分的使用
///