ffmpeg框架阅读笔记一:读取数据帧函数 int av_read_frame(AVFormatContext *s, AVPacket *pkt)

     做音频播放,数据源从完整音频文件变成了网络拉取音频流到缓冲区,实现片段流的播放。分析av_read_frame过程,自定义获取数据源的功能。重点查找如何读入数据源。

首先分析函数av_read_frame。

int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{
    //包含AVPacket结构体的链表结点
    AVPacketList *pktl;
    int eof=0;
    const int genpts= s->flags & AVFMT_FLAG_GENPTS;

    for(;;){
        pktl = s->packet_buffer;
        if (pktl) {
            AVPacket *next_pkt= &pktl->pkt;

            if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
                int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
                while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
                    if(   pktl->pkt.stream_index == next_pkt->stream_index
                       && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
                       && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
                        next_pkt->pts= pktl->pkt.dts;
                    }
                    pktl= pktl->next;
                }
                pktl = s->packet_buffer;
            }

            if(   next_pkt->pts != AV_NOPTS_VALUE
               || next_pkt->dts == AV_NOPTS_VALUE
               || !genpts || eof){
                /* read packet from packet buffer, if there is data */
                *pkt = *next_pkt;
                s->packet_buffer = pktl->next;
                av_free(pktl);
                return 0;
            }
        }
    
        if(genpts){
            //这里读取数据 
            int ret= av_read_frame_internal(s, pkt);
            if(ret<0){
                if(pktl && ret != AVERROR(EAGAIN)){
                    eof=1;
                    continue;
                }else
                    return ret;
            }

            if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
                                           &s->packet_buffer_end)) < 0)
                return AVERROR(ENOMEM);
        }else{
            assert(!s->packet_buffer);
            return av_read_frame_internal(s, pkt);
        }
    }
}

上面代码找到av_read_frame_internal函数。其他代码不加分析。看的也不是很懂。接下来是av_read_frame_internal。

static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
    AVStream *st;
    int len, ret, i;
    //初始化pkt的各个成员
    av_init_packet(pkt);

    for(;;) {
        /* select current input stream component */
        //获得当前输入流
        st = s->cur_st;
        if (st) {
            if (!st->need_parsing || !st->parser) {
                /* no parsing needed: we just output the packet as is */
                /* raw data support */
                *pkt = st->cur_pkt;
                st->cur_pkt.data= NULL;
                st->cur_pkt.side_data_elems = 0;
                st->cur_pkt.side_data = NULL;
                compute_pkt_fields(s, st, NULL, pkt);
                s->cur_st = NULL;
                if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
                    (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
                    ff_reduce_index(s, st->index);
                    av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
                }
                break;
            } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
                len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
                                       st->cur_ptr, st->cur_len,
                                       st->cur_pkt.pts, st->cur_pkt.dts,
                                       st->cur_pkt.pos);
                st->cur_pkt.pts = AV_NOPTS_VALUE;
                st->cur_pkt.dts = AV_NOPTS_VALUE;
                /* increment read pointer */
                st->cur_ptr += len;
                st->cur_len -= len;

                /* return packet if any */
                if (pkt->size) {
                got_packet:
                    pkt->duration = 0;
                    pkt->stream_index = st->index;
                    pkt->pts = st->parser->pts;
                    pkt->dts = st->parser->dts;
                    pkt->pos = st->parser->pos;
                    if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
                        s->cur_st = NULL;
                        pkt->destruct= st->cur_pkt.destruct;
                        st->cur_pkt.destruct= NULL;
                        st->cur_pkt.data    = NULL;
                        assert(st->cur_len == 0);
                    }else{
                    pkt->destruct = NULL;
                    }
                    compute_pkt_fields(s, st, st->parser, pkt);

                    if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
                        int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->pos : st->parser->frame_offset;
                        ff_reduce_index(s, st->index);
                        av_add_index_entry(st, pos, pkt->dts,
                                           0, 0, AVINDEX_KEYFRAME);
                    }

                    break;
                }
            } else {
                /* free packet */
                av_free_packet(&st->cur_pkt);
                s->cur_st = NULL;
            }
        } else { //当前输入流内容不存在 去数据源读取
            AVPacket cur_pkt;
            /* read next packet */
            //读下一个数据包
            ret = av_read_packet(s, &cur_pkt);
            if (ret < 0) {
                if (ret == AVERROR(EAGAIN))
                    return ret;
                /* return the last frames, if any */
                for(i = 0; i < s->nb_streams; i++) {
                    st = s->streams[i];
                    if (st->parser && st->need_parsing) {
                        av_parser_parse2(st->parser, st->codec,
                                        &pkt->data, &pkt->size,
                                        NULL, 0,
                                        AV_NOPTS_VALUE, AV_NOPTS_VALUE,
                                        AV_NOPTS_VALUE);
                        if (pkt->size)
                            goto got_packet;
                    }
                }
                /* no more packets: really terminate parsing */
                return ret;
            }
            st = s->streams[cur_pkt.stream_index];
            st->cur_pkt= cur_pkt;

            if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
               st->cur_pkt.dts != AV_NOPTS_VALUE &&
               st->cur_pkt.pts < st->cur_pkt.dts){
                av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
                    st->cur_pkt.stream_index,
                    st->cur_pkt.pts,
                    st->cur_pkt.dts,
                    st->cur_pkt.size);
//                av_free_packet(&st->cur_pkt);
//                return -1;
            }

            if(s->debug & FF_FDEBUG_TS)
                av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
                    st->cur_pkt.stream_index,
                    st->cur_pkt.pts,
                    st->cur_pkt.dts,
                    st->cur_pkt.size,
                    st->cur_pkt.duration,
                    st->cur_pkt.flags);

            s->cur_st = st;
            st->cur_ptr = st->cur_pkt.data;
            st->cur_len = st->cur_pkt.size;
            if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
                st->parser = av_parser_init(st->codec->codec_id);
                if (!st->parser) {
                    /* no parser available: just output the raw packets */
                    st->need_parsing = AVSTREAM_PARSE_NONE;
                }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
                    st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
                }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
                    st->parser->flags |= PARSER_FLAG_ONCE;
                }
            }
        }
    }
    if(s->debug & FF_FDEBUG_TS)
        av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
            pkt->stream_index,
            pkt->pts,
            pkt->dts,
            pkt->size,
            pkt->duration,
            pkt->flags);

    return 0;
}

找到函数int av_read_packet(AVFormatContext *s, AVPacket *pkt)

int av_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret, i;
    AVStream *st;

    for(;;){
        AVPacketList *pktl = s->raw_packet_buffer;
       //格式上下文存在原生的包数据
        if (pktl) {
            *pkt = pktl->pkt;
            if(s->streams[pkt->stream_index]->request_probe <= 0){
                s->raw_packet_buffer = pktl->next;
                s->raw_packet_buffer_remaining_size += pkt->size;
                av_free(pktl);
                return 0;
            }
        }
       
        av_init_packet(pkt);
        //原生包数据不存在 通过AVInputFormat的read_packet函数指针成员来调用函数获取数据源
        //AVInputFormat是不同格式数据的操作函数集合。在注册函数中初始化所有格式的该结构体变量。
        ret= s->iformat->read_packet(s, pkt);
        if (ret < 0) {
            if (!pktl || ret == AVERROR(EAGAIN))
                return ret;
            for (i = 0; i < s->nb_streams; i++)
                if(s->streams[i]->request_probe > 0)
                    s->streams[i]->request_probe = -1;
            continue;
        }

        if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
            av_packet_merge_side_data(pkt);
        st= s->streams[pkt->stream_index];

        switch(st->codec->codec_type){
        case AVMEDIA_TYPE_VIDEO:
            if(s->video_codec_id)   st->codec->codec_id= s->video_codec_id;
            break;
        case AVMEDIA_TYPE_AUDIO:
            if(s->audio_codec_id)   st->codec->codec_id= s->audio_codec_id;
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
            break;
        }

        if(!pktl && st->request_probe <= 0)
            return ret;

        add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
        s->raw_packet_buffer_remaining_size -= pkt->size;

        if(st->request_probe>0){
            AVProbeData *pd = &st->probe_data;
            int end;
            av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
            --st->probe_packets;

            pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
            memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
            pd->buf_size += pkt->size;
            memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);

            end=    s->raw_packet_buffer_remaining_size <= 0
                 || st->probe_packets<=0;

            if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
                int score= set_codec_from_probe_data(s, st, pd);
                if(    (st->codec->codec_id != CODEC_ID_NONE && score > AVPROBE_SCORE_MAX/4)
                    || end){
                    pd->buf_size=0;
                    av_freep(&pd->buf);
                    st->request_probe= -1;
                    if(st->codec->codec_id != CODEC_ID_NONE){
                    av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
                    }else
                        av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
                }
            }
        }
    }
}

分析AVInpuFormat结构体初始化过程

AVInputFormat ff_flv_demuxer = {
    "flv",
    NULL_IF_CONFIG_SMALL("FLV format"),
    sizeof(FLVContext),
    flv_probe,
    flv_read_header,
    //读数据包的函数指针
    flv_read_packet,
    .read_seek = flv_read_seek,
#if 0
    .read_seek2 = flv_read_seek2,
#endif
    .extensions = "flv",
    .value = CODEC_ID_FLV1,
};

分析flv_read_packet

static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    FLVContext *flv = s->priv_data;
    int ret, i, type, size, flags, is_audio;
    int64_t next, pos;
    int64_t dts, pts = AV_NOPTS_VALUE;
    AVStream *st = NULL;

 for(;;avio_skip(s->pb, 4)){ /* pkt size is repeated at end. skip it */
    pos = avio_tell(s->pb);
    type = avio_r8(s->pb);
    size = avio_rb24(s->pb);
    dts = avio_rb24(s->pb);
    dts |= avio_r8(s->pb) << 24;
    av_dlog(s, "type:%d, size:%d, dts:%"PRId64"\n", type, size, dts);
    if (url_feof(s->pb))
        return AVERROR_EOF;
    avio_skip(s->pb, 3); /* stream id, always 0 */
    flags = 0;

    if(size == 0)
        continue;

    next= size + avio_tell(s->pb);

    if (type == FLV_TAG_TYPE_AUDIO) {
        is_audio=1;
        flags = avio_r8(s->pb);
        size--;
    } else if (type == FLV_TAG_TYPE_VIDEO) {
        is_audio=0;
        flags = avio_r8(s->pb);
        size--;
        if ((flags & 0xf0) == 0x50) /* video info / command frame */
            goto skip;
    } else {
        if (type == FLV_TAG_TYPE_META && size > 13+1+4)
            flv_read_metabody(s, next);
        else /* skip packet */
            av_log(s, AV_LOG_DEBUG, "skipping flv packet: type %d, size %d, flags %d\n", type, size, flags);
    skip:
        avio_seek(s->pb, next, SEEK_SET);
        continue;
    }

    /* skip empty data packets */
    if (!size)
        continue;

    /* now find stream */
    for(i=0;i<s->nb_streams;i++) {
        st = s->streams[i];
        if (st->id == is_audio)
            break;
    }
    if(i == s->nb_streams){
        av_log(s, AV_LOG_ERROR, "invalid stream\n");
        st= create_stream(s, is_audio);
        s->ctx_flags &= ~AVFMTCTX_NOHEADER;
    }
    av_dlog(s, "%d %X %d \n", is_audio, flags, st->discard);
    if(  (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY ||         is_audio))
       ||(st->discard >= AVDISCARD_BIDIR  &&  ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && !is_audio))
       || st->discard >= AVDISCARD_ALL
       ){
        avio_seek(s->pb, next, SEEK_SET);
        continue;
    }
    if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY)
        av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME);
    break;
 }

    // if not streamed and no duration from metadata then seek to end to find the duration from the timestamps
    if(s->pb->seekable && (!s->duration || s->duration==AV_NOPTS_VALUE)){
        int size;
        const int64_t pos= avio_tell(s->pb);
        const int64_t fsize= avio_size(s->pb);
        avio_seek(s->pb, fsize-4, SEEK_SET);
        size= avio_rb32(s->pb);
        avio_seek(s->pb, fsize-3-size, SEEK_SET);
        if(size == avio_rb24(s->pb) + 11){
            uint32_t ts = avio_rb24(s->pb);
            ts |= avio_r8(s->pb) << 24;
            s->duration = ts * (int64_t)AV_TIME_BASE / 1000;
        }
        avio_seek(s->pb, pos, SEEK_SET);
    }

    if(is_audio){
        if(!st->codec->channels || !st->codec->sample_rate || !st->codec->bits_per_coded_sample) {
            st->codec->channels = (flags & FLV_AUDIO_CHANNEL_MASK) == FLV_STEREO ? 2 : 1;
            st->codec->sample_rate = (44100 << ((flags & FLV_AUDIO_SAMPLERATE_MASK) >> FLV_AUDIO_SAMPLERATE_OFFSET) >> 3);
            st->codec->bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;
        }
        if(!st->codec->codec_id){
            flv_set_audio_codec(s, st, flags & FLV_AUDIO_CODECID_MASK);
        }
    }else{
        size -= flv_set_video_codec(s, st, flags & FLV_VIDEO_CODECID_MASK);
    }

    if (st->codec->codec_id == CODEC_ID_AAC ||
        st->codec->codec_id == CODEC_ID_H264) {
        int type = avio_r8(s->pb);
        size--;
        if (st->codec->codec_id == CODEC_ID_H264) {
            int32_t cts = (avio_rb24(s->pb)+0xff800000)^0xff800000; // sign extension
            pts = dts + cts;
            if (cts < 0) { // dts are wrong
                flv->wrong_dts = 1;
                av_log(s, AV_LOG_WARNING, "negative cts, previous timestamps might be wrong\n");
            }
            if (flv->wrong_dts)
                dts = AV_NOPTS_VALUE;
        }
        if (type == 0) {
            if ((ret = flv_get_extradata(s, st, size)) < 0)
                return ret;
            if (st->codec->codec_id == CODEC_ID_AAC) {
                MPEG4AudioConfig cfg;
                ff_mpeg4audio_get_config(&cfg, st->codec->extradata,
                                         st->codec->extradata_size);
                st->codec->channels = cfg.channels;
                if (cfg.ext_sample_rate)
                    st->codec->sample_rate = cfg.ext_sample_rate;
                else
                    st->codec->sample_rate = cfg.sample_rate;
                av_dlog(s, "mp4a config channels %d sample rate %d\n",
                        st->codec->channels, st->codec->sample_rate);
            }

            ret = AVERROR(EAGAIN);
            goto leave;
        }
    }

    /* skip empty data packets */
    if (!size) {
        ret = AVERROR(EAGAIN);
        goto leave;
    }
//从AVIOContext读取数据
    ret= av_get_packet(s->pb, pkt, size);
    if (ret < 0) {
        return AVERROR(EIO);
    }
    /* note: we need to modify the packet size here to handle the last
       packet */
    pkt->size = ret;
    pkt->dts = dts;
    pkt->pts = pts == AV_NOPTS_VALUE ? dts : pts;
    pkt->stream_index = st->index;

    if (is_audio || ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY))
        pkt->flags |= AV_PKT_FLAG_KEY;

leave:
    avio_skip(s->pb, 4);
    return ret;
}

分析av_get_packet()

int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
{
    //pkt分配空间并初始化
    int ret= av_new_packet(pkt, size);

    if(ret<0)
        return ret;

    pkt->pos= avio_tell(s);
    
    //从io上下文读取数据
    ret= avio_read(s, pkt->data, size);
    if(ret<=0)
        av_free_packet(pkt);
    else
        av_shrink_packet(pkt, ret);

    return ret;
}

分析avio_read

int avio_read(AVIOContext *s, unsigned char *buf, int size)
{
    int len, size1;

    size1 = size;
    while (size > 0) {
        len = s->buf_end - s->buf_ptr;
        if (len > size)
            len = size;
        if (len == 0) {
            if(size > s->buffer_size && !s->update_checksum){
                if(s->read_packet)
                    //调用AVIOContext的成员  read_packet函数指针
                    len = s->read_packet(s->opaque, buf, size);
                if (len <= 0) {
                    /* do not modify buffer if EOF reached so that a seek back can
                    be done without rereading data */
                    s->eof_reached = 1;
                    if(len<0)
                        s->error= len;
                    break;
                } else {
                    s->pos += len;
                    size -= len;
                    buf += len;
                    s->buf_ptr = s->buffer;
                    s->buf_end = s->buffer/* + len*/;
                }
            }else{
                fill_buffer(s);
                len = s->buf_end - s->buf_ptr;
                if (len == 0)
                    break;
            }
        } else {
            memcpy(buf, s->buf_ptr, len);
            buf += len;
            s->buf_ptr += len;
            size -= len;
        }
    }
    if (size1 == size) {
        if (s->error)      return s->error;
        if (url_feof(s))   return AVERROR_EOF;
    }
    return size1 - size;
}

AVIOContext结构体需要由avformat_open_input来进行初始化。对于该函数打开媒体文件的分析,见收藏的一篇文章。目前根据需要要进行自定义的AVIOContext结构体变量的初始化。贴出ffmpeg初始化其的代码,参考进行自定义初始化。

int ffio_fdopen(AVIOContext **s, URLContext *h)
{
    uint8_t *buffer;
    int buffer_size, max_packet_size;

    max_packet_size = h->max_packet_size;
    if (max_packet_size) {
        buffer_size = max_packet_size; /* no need to bufferize more than one packet */
    } else {
        buffer_size = IO_BUFFER_SIZE;
    }
    buffer = av_malloc(buffer_size);
    if (!buffer)
        return AVERROR(ENOMEM);

    *s = av_mallocz(sizeof(AVIOContext));
    if(!*s) {
        av_free(buffer);
        return AVERROR(ENOMEM);
    }
    //初始化AVIOContext
    if (ffio_init_context(*s, buffer, buffer_size,
                      h->flags & AVIO_FLAG_WRITE, h,
                      (void*)ffurl_read, (void*)ffurl_write, (void*)ffurl_seek) < 0) {
        av_free(buffer);
        av_freep(s);
        return AVERROR(EIO);
    }
#if FF_API_OLD_AVIO
    (*s)->is_streamed = h->is_streamed;
#endif
    (*s)->seekable = h->is_streamed ? 0 : AVIO_SEEKABLE_NORMAL;
    (*s)->max_packet_size = max_packet_size;
    if(h->prot) {
        (*s)->read_pause = (int (*)(void *, int))h->prot->url_read_pause;
        (*s)->read_seek  = (int64_t (*)(void *, int, int64_t, int))h->prot->url_read_seek;
    }
    return 0;
}

int ffio_init_context(AVIOContext *s,
                  unsigned char *buffer,
                  int buffer_size,
                  int write_flag,
                  void *opaque,
                  int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
                  int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
                  int64_t (*seek)(void *opaque, int64_t offset, int whence))
{
    s->buffer = buffer;
    s->buffer_size = buffer_size;
    s->buf_ptr = buffer;
    s->opaque = opaque;
    url_resetbuf(s, write_flag ? AVIO_FLAG_WRITE : AVIO_FLAG_READ);
    s->write_packet = write_packet;
    s->read_packet = read_packet;
    s->seek = seek;
    s->pos = 0;
    s->must_flush = 0;
    s->eof_reached = 0;
    s->error = 0;
#if FF_API_OLD_AVIO
    s->is_streamed = 0;
#endif
    s->seekable = AVIO_SEEKABLE_NORMAL;
    s->max_packet_size = 0;
    s->update_checksum= NULL;
    if(!read_packet && !write_flag){
        s->pos = buffer_size;
        s->buf_end = s->buffer + buffer_size;
    }
    s->read_pause = NULL;
    s->read_seek  = NULL;
    return 0;
}
经过上述分析,需要根据项目需要自定义AVIOContext,设置数据源,设置各类回调函数。

网上找到一段探测网络流格式的方法,实现相似的功能。没有经过测试,先贴出来,后面进行测试。

#include "utils.h"
#include <pthread.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>

UdpQueue recvqueue;
UdpParam udpParam;

//注册av_read_frame的回调函数,这里只是最简处理,实际应用中应加上出错处理,超时等待...
int read_data(void *opaque, uint8_t *buf, int buf_size) {
int size = buf_size;
int ret;
// printf("read data %d\n", buf_size);
do {
ret = get_queue(&recvqueue, buf, buf_size);
} while (ret);

// printf("read data Ok %d\n", buf_size);
return size;
}

#define BUF_SIZE 4096*500

int main(int argc, char** argv) {

init_queue(&recvqueue, 1024*500);

udpParam.argv = argv;
udpParam.queue = &recvqueue;
uint8_t *buf = av_mallocz(sizeof(uint8_t)*BUF_SIZE);


//UDP接收线程
pthread_t udp_recv_thread;
pthread_create(&udp_recv_thread, NULL, udp_ts_recv, &udpParam);
pthread_detach(udp_recv_thread);



av_register_all();

AVCodec *pVideoCodec, *pAudioCodec;
AVCodecContext *pVideoCodecCtx = NULL;
AVCodecContext *pAudioCodecCtx = NULL;
AVIOContext * pb = NULL;
AVInputFormat *piFmt = NULL;
AVFormatContext *pFmt = NULL;

//step1:申请一个AVIOContext
pb = avio_alloc_context(buf, BUF_SIZE, 0, NULL, read_data, NULL, NULL);
if (!pb) {
fprintf(stderr, "avio alloc failed!\n");
return -1;
}
//step2:探测流格式
if (av_probe_input_buffer(pb, &piFmt, "", NULL, 0, 0) < 0) {
fprintf(stderr, "probe failed!\n");
return -1;
} else {
fprintf(stdout, "probe success!\n");
fprintf(stdout, "format: %s[%s]\n", piFmt->name, piFmt->long_name);
}

pFmt = avformat_alloc_context();
pFmt->pb = pb; //step3:这一步很关键
//step4:打开流
if (avformat_open_input(&pFmt, "", piFmt, NULL) < 0) {
fprintf(stderr, "avformat open failed.\n");
return -1;
} else {
fprintf(stdout, "open stream success!\n");
}
//以下就和文件处理一致了
if (av_find_stream_info(pFmt) < 0) {
fprintf(stderr, "could not fine stream.\n");
return -1;
}

av_dump_format(pFmt, 0, "", 0);

int videoindex = -1;
int audioindex = -1;
for (int i = 0; i < pFmt->nb_streams; i++) {
if ( (pFmt->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
(videoindex < 0) ) {
videoindex = i;
}
if ( (pFmt->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) &&
(audioindex < 0) ) {
audioindex = i;
}
}

if (videoindex < 0 || audioindex < 0) {
fprintf(stderr, "videoindex=%d, audioindex=%d\n", videoindex, audioindex);
return -1;
}

AVStream *pVst,*pAst;
pVst = pFmt->streams[videoindex];
pAst = pFmt->streams[audioindex];

pVideoCodecCtx = pVst->codec;
pAudioCodecCtx = pAst->codec;

pVideoCodec = avcodec_find_decoder(pVideoCodecCtx->codec_id);
if (!pVideoCodec) {
fprintf(stderr, "could not find video decoder!\n");
return -1;
}
if (avcodec_open(pVideoCodecCtx, pVideoCodec) < 0) {
fprintf(stderr, "could not open video codec!\n");
return -1;
}

pAudioCodec = avcodec_find_decoder(pAudioCodecCtx->codec_id);
if (!pAudioCodec) {
fprintf(stderr, "could not find audio decoder!\n");
return -1;
}
if (avcodec_open(pAudioCodecCtx, pAudioCodec) < 0) {
fprintf(stderr, "could not open audio codec!\n");
return -1;
}

int got_picture;
uint8_t samples[AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2];
AVFrame *pframe = avcodec_alloc_frame();
AVPacket pkt;
av_init_packet(&pkt);

while(1) {
if (av_read_frame(pFmt, &pkt) >= 0) {

if (pkt.stream_index == videoindex) {
fprintf(stdout, "pkt.size=%d,pkt.pts=%lld, pkt.data=0x%x.", pkt.size, pkt.pts,(unsigned int)pkt.data);
avcodec_decode_video2(pVideoCodecCtx, pframe, &got_picture, &pkt);
if (got_picture) {
fprintf(stdout, "decode one video frame!\n");
}
}else if (pkt.stream_index == audioindex) {
int frame_size = AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2;
if (avcodec_decode_audio3(pAudioCodecCtx, (int16_t *)samples, &frame_size, &pkt) >= 0) {
fprintf(stdout, "decode one audio frame!\n");
}
}
av_free_packet(&pkt);
}
}

av_free(buf);
av_free(pframe);
free_queue(&recvqueue);
return 0;
}
这段代码作者为 windragon0419,在此表示感谢。
后面经过测试以后,记录下解决过程。

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
你可以使用一个生产者-消费者模型,将FFmpeg读取到的数据放到一个队列中,然后让另一个线程从队列中获取数据并进行解码。下面是一个示例代码: ```c++ #include <iostream> #include <queue> #include <thread> #include <mutex> #include <condition_variable> #include "ffmpeg.h" using namespace std; // 队列元素结构体 struct QueueItem { AVPacket* pkt; int stream_idx; }; // 生产者线程 void producer_thread(queue<QueueItem>& q, AVFormatContext* fmt_ctx, mutex& mtx, condition_variable& cv) { AVPacket pkt; av_init_packet(&pkt); while (av_read_frame(fmt_ctx, &pkt) >= 0) { // 将数据包放到队列中 QueueItem item = { av_packet_clone(&pkt), pkt.stream_index }; unique_lock<mutex> lock(mtx); q.push(item); lock.unlock(); cv.notify_one(); av_packet_unref(&pkt); } // 发送结束标志 QueueItem item = { nullptr, -1 }; unique_lock<mutex> lock(mtx); q.push(item); lock.unlock(); cv.notify_one(); } // 消费者线程 void consumer_thread(queue<QueueItem>& q, AVCodecContext** codec_ctxs, AVFrame** frames, mutex& mtx, condition_variable& cv) { while (true) { // 获取队列元素 unique_lock<mutex> lock(mtx); cv.wait(lock, [&q] { return !q.empty(); }); QueueItem item = q.front(); q.pop(); lock.unlock(); if (!item.pkt) { // 退出线程 break; } // 解码数据包 AVCodecContext* codec_ctx = codec_ctxs[item.stream_idx]; AVFrame* frame = frames[item.stream_idx]; int ret = avcodec_send_packet(codec_ctx, item.pkt); if (ret < 0) { cerr << "Error sending packet to decoder: " << av_err2str(ret) << endl; continue; } while (ret >= 0) { ret = avcodec_receive_frame(codec_ctx, frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { // 没有更多的输出帧 break; } else if (ret < 0) { cerr << "Error during decoding: " << av_err2str(ret) << endl; break; } // 在此处添加帧处理操作 av_frame_unref(frame); } av_packet_unref(item.pkt); av_packet_free(&item.pkt); } } int main(int argc, char* argv[]) { // 初始化FFmpeg av_register_all(); avcodec_register_all(); // 打开输入文件 AVFormatContext* fmt_ctx = nullptr; int ret = avformat_open_input(&fmt_ctx, argv[1], nullptr, nullptr); if (ret < 0) { cerr << "Error opening input file: " << av_err2str(ret) << endl; return -1; } // 检索流信息 ret = avformat_find_stream_info(fmt_ctx, nullptr); if (ret < 0) { cerr << "Error finding stream information: " << av_err2str(ret) << endl; avformat_close_input(&fmt_ctx); return -1; } // 查找视频流 int video_stream_idx = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0); if (video_stream_idx < 0) { cerr << "Error finding video stream: " << av_err2str(video_stream_idx) << endl; avformat_close_input(&fmt_ctx); return -1; } // 获取视频解码器 AVCodec* codec = avcodec_find_decoder(fmt_ctx->streams[video_stream_idx]->codecpar->codec_id); if (!codec) { cerr << "Error finding video codec" << endl; avformat_close_input(&fmt_ctx); return -1; } // 创建解码器上下文 AVCodecContext* codec_ctx = avcodec_alloc_context3(codec); if (!codec_ctx) { cerr << "Error allocating codec context" << endl; avformat_close_input(&fmt_ctx); return -1; } // 初始化解码器上下文 ret = avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[video_stream_idx]->codecpar); if (ret < 0) { cerr << "Error initializing codec context: " << av_err2str(ret) << endl; avcodec_free_context(&codec_ctx); avformat_close_input(&fmt_ctx); return -1; } // 打开解码器 ret = avcodec_open2(codec_ctx, codec, nullptr); if (ret < 0) { cerr << "Error opening codec: " << av_err2str(ret) << endl; avcodec_free_context(&codec_ctx); avformat_close_input(&fmt_ctx); return -1; } // 创建解码器上下文数组 AVCodecContext** codec_ctxs = new AVCodecContext*[fmt_ctx->nb_streams]; for (int i = 0; i < fmt_ctx->nb_streams; i++) { codec_ctxs[i] = nullptr; if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { codec = avcodec_find_decoder(fmt_ctx->streams[i]->codecpar->codec_id); if (!codec) { cerr << "Error finding codec" << endl; continue; } codec_ctxs[i] = avcodec_alloc_context3(codec); if (!codec_ctxs[i]) { cerr << "Error allocating codec context" << endl; continue; } ret = avcodec_parameters_to_context(codec_ctxs[i], fmt_ctx->streams[i]->codecpar); if (ret < 0) { cerr << "Error initializing codec context: " << av_err2str(ret) << endl; avcodec_free_context(&codec_ctxs[i]); continue; } ret = avcodec_open2(codec_ctxs[i], codec, nullptr); if (ret < 0) { cerr << "Error opening codec: " << av_err2str(ret) << endl; avcodec_free_context(&codec_ctxs[i]); continue; } } } // 创建帧数组 AVFrame** frames =

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值