根据网上资料改的ffmpeg解码h264

翻译 2013年12月04日 00:34:13

/*
 * Copyright (c) 2001 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

/**
 * @file
 * libavcodec API use example.
 *
 * Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
 * not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
 * format handling
 * @example doc/examples/decoding_encoding.c
 */

#include <math.h>

#include <opencv2\opencv.hpp>

#ifdef __cplusplus
extern "C" {
#endif


#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libswscale/swscale.h>


#ifdef __cplusplus
}
#endif

#if _MSC_VER
#define snprintf _snprintf
#endif

static void build_avpkt(AVPacket *avpkt,FILE *fp) ;

 


#define INBUF_SIZE 4096
#define AUDIO_INBUF_SIZE 20480
#define AUDIO_REFILL_THRESH 4096


AVFrame frameRGB;
IplImage *showImage;

/* check that a given sample format is supported by the encoder */
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
{
    const enum AVSampleFormat *p = codec->sample_fmts;

    while (*p != AV_SAMPLE_FMT_NONE) {
        if (*p == sample_fmt)
            return 1;
        p++;
    }
    return 0;
}

/* just pick the highest supported samplerate */
static int select_sample_rate(AVCodec *codec)
{
    const int *p;
    int best_samplerate = 0;

    if (!codec->supported_samplerates)
        return 44100;

    p = codec->supported_samplerates;
    while (*p) {
        best_samplerate = FFMAX(*p, best_samplerate);
        p++;
    }
    return best_samplerate;
}

/* select layout with the highest channel count */
static int select_channel_layout(AVCodec *codec)
{
    const uint64_t *p;
    uint64_t best_ch_layout = 0;
    int best_nb_channells   = 0;

    if (!codec->channel_layouts)
        return AV_CH_LAYOUT_STEREO;

    p = codec->channel_layouts;
    while (*p) {
        int nb_channels = av_get_channel_layout_nb_channels(*p);

        if (nb_channels > best_nb_channells) {
            best_ch_layout    = *p;
            best_nb_channells = nb_channels;
        }
        p++;
    }
    return best_ch_layout;
}

/*
 * Audio encoding example
 */
static void audio_encode_example(const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    AVFrame *frame;
    AVPacket pkt;
    int i, j, k, ret, got_output;
    int buffer_size;
    FILE *f;
    uint16_t *samples;
    float t, tincr;

    printf("Encode audio file %s\n", filename);

    /* find the MP2 encoder */
    codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate audio codec context\n");
        exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 64000;

    /* check that the encoder supports s16 pcm input */
    c->sample_fmt = AV_SAMPLE_FMT_S16;
    if (!check_sample_fmt(codec, c->sample_fmt)) {
        fprintf(stderr, "Encoder does not support sample format %s",
                av_get_sample_fmt_name(c->sample_fmt));
        exit(1);
    }

    /* select other audio parameters supported by the encoder */
    c->sample_rate    = select_sample_rate(codec);
    c->channel_layout = select_channel_layout(codec);
    c->channels       = av_get_channel_layout_nb_channels(c->channel_layout);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    /* frame containing input raw audio */
    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate audio frame\n");
        exit(1);
    }

    frame->nb_samples     = c->frame_size;
    frame->format         = c->sample_fmt;
    frame->channel_layout = c->channel_layout;

    /* the codec gives us the frame size, in samples,
     * we calculate the size of the samples buffer in bytes */
    buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
                                             c->sample_fmt, 0);
    samples = (uint16_t *)av_malloc(buffer_size);
    if (!samples) {
        fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
                buffer_size);
        exit(1);
    }
    /* setup the data pointers in the AVFrame */
    ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
                                   (const uint8_t*)samples, buffer_size, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not setup audio frame\n");
        exit(1);
    }

    /* encode a single tone sound */
    t = 0;
    tincr = 2 * M_PI * 440.0 / c->sample_rate;
    for(i=0;i<200;i++) {
        av_init_packet(&pkt);
        pkt.data = NULL; // packet data will be allocated by the encoder
        pkt.size = 0;

        for (j = 0; j < c->frame_size; j++) {
            samples[2*j] = (int)(sin(t) * 10000);

            for (k = 1; k < c->channels; k++)
                samples[2*j + k] = samples[2*j];
            t += tincr;
        }
        /* encode the samples */
        ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding audio frame\n");
            exit(1);
        }
        if (got_output) {
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* get the delayed frames */
    for (got_output = 1; got_output; i++) {
        ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }
    fclose(f);

    av_freep(&samples);
    avcodec_free_frame(&frame);
    avcodec_close(c);
    av_free(c);
}

/*
 * Audio decoding.
 */
static void audio_decode_example(const char *outfilename, const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int len;
    FILE *f, *outfile;
    uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
    AVPacket avpkt;
    AVFrame *decoded_frame = NULL;

    av_init_packet(&avpkt);

    printf("Decode audio file %s to %s\n", filename, outfilename);

    /* find the mpeg audio decoder */
    codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate audio codec context\n");
        exit(1);
    }

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }
    outfile = fopen(outfilename, "wb");
    if (!outfile) {
        av_free(c);
        exit(1);
    }

    /* decode until eof */
    avpkt.data = inbuf;
    avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);

    while (avpkt.size > 0) {
        int got_frame = 0;

        if (!decoded_frame) {
            if (!(decoded_frame = avcodec_alloc_frame())) {
                fprintf(stderr, "Could not allocate audio frame\n");
                exit(1);
            }
        } else
            avcodec_get_frame_defaults(decoded_frame);

        len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
        if (len < 0) {
            fprintf(stderr, "Error while decoding\n");
            exit(1);
        }
        if (got_frame) {
            /* if a frame has been decoded, output it */
            int data_size = av_samples_get_buffer_size(NULL, c->channels,
                                                       decoded_frame->nb_samples,
                                                       c->sample_fmt, 1);
            fwrite(decoded_frame->data[0], 1, data_size, outfile);
        }
        avpkt.size -= len;
        avpkt.data += len;
        avpkt.dts =
        avpkt.pts = AV_NOPTS_VALUE;
        if (avpkt.size < AUDIO_REFILL_THRESH) {
            /* Refill the input buffer, to avoid trying to decode
             * incomplete frames. Instead of this, one could also use
             * a parser, or use a proper container format through
             * libavformat. */
            memmove(inbuf, avpkt.data, avpkt.size);
            avpkt.data = inbuf;
            len = fread(avpkt.data + avpkt.size, 1,
                        AUDIO_INBUF_SIZE - avpkt.size, f);
            if (len > 0)
                avpkt.size += len;
        }
    }

    fclose(outfile);
    fclose(f);

    avcodec_close(c);
    av_free(c);
    avcodec_free_frame(&decoded_frame);
}

/*
 * Video encoding example
 */
static void video_encode_example(const char *filename, AVCodecID codec_id)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, ret, x, y, got_output;
    FILE *f;
    AVFrame *frame;
    AVPacket pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    printf("Encode video file %s\n", filename);

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(codec_id);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* frames per second */
//    c->time_base= (AVRational){1,25};

 AVRational avrational;
 avrational.num=1;
 avrational.den=25;
 c->time_base= avrational;

 

    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames=1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    if(codec_id == AV_CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;

    /* the image can be allocated by any means and av_image_alloc() is
     * just the most convenient way if av_malloc() is to be used */
    ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
                         c->pix_fmt, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate raw picture buffer\n");
        exit(1);
    }

    /* encode 1 second of video */
    for(i=0;i<25;i++) {
        av_init_packet(&pkt);
        pkt.data = NULL;    // packet data will be allocated by the encoder
        pkt.size = 0;

        fflush(stdout);
        /* prepare a dummy image */
        /* Y */
        for(y=0;y<c->height;y++) {
            for(x=0;x<c->width;x++) {
                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
            }
        }

        /* Cb and Cr */
        for(y=0;y<c->height/2;y++) {
            for(x=0;x<c->width/2;x++) {
                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        frame->pts = i;

        /* encode the image */
        ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            printf("Write frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* get the delayed frames */
    for (got_output = 1; got_output; i++) {
        fflush(stdout);

        ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            printf("Write frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* add sequence end code to have a real mpeg file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_freep(&frame->data[0]);
    avcodec_free_frame(&frame);
    printf("\n");
}

/*
 * Video decoding example
 */

static void pgm_save_testh264(unsigned char *buf, int wrap, int xsize, int ysize, FILE *f)
{
    int i;
 //buf += 64;
    //fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255);
    for(i=0;i<ysize;i++)
        fwrite(buf + i * wrap,1,xsize,f);
}

 


static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
                     char *filename)
{
    FILE *f;
    int i;

    f=fopen(filename,"w");
    fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255);
    for(i=0;i<ysize;i++)
        fwrite(buf + i * wrap,1,xsize,f);
    fclose(f);
}

static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
                              AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
{
    int len, got_frame;
    char buf[1024];

    len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
    if (len < 0) {
        fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
        return len;
    }
    if (got_frame) {
        printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
        fflush(stdout);

   //show
  //将YUV420格式的图像转换成RGB格式所需要的转换上下文  
                SwsContext* scxt =sws_getContext(frame->width,frame->height,PIX_FMT_YUV420P, 
                                                  frame->width,frame->height,PIX_FMT_RGB24, 
                                                  2,NULL,NULL,NULL); 
                if(scxt != NULL) 
                { 
                    sws_scale(scxt,frame->data,frame->linesize,0,avctx->height,frameRGB.data,frameRGB.linesize);//图像格式转换  
                    showImage->imageSize =frameRGB.linesize[0];//指针赋值给要显示的图像  
                    showImage->imageData = (char *)frameRGB.data[0]; 
                    cvShowImage("decode",showImage);//显示  
                    cvWaitKey(1);//设置0.5s显示一帧,如果不设置由于这是个循环,会导致看不到显示出来的图像  
                }

 

 

 

        /* the picture is allocated by the decoder, no need to free it */
        snprintf(buf, sizeof(buf), outfilename, *frame_count);
 //       pgm_save(frame->data[0], frame->linesize[0],
 //                avctx->width, avctx->height, buf);
        (*frame_count)++;
    }
    if (pkt->data) {
        pkt->size -= len;
        pkt->data += len;
    }
    return 0;
}

static void video_decode_example(const char *outfilename, const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int frame_count;
    FILE *f;
    AVFrame *frame;
    uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
    AVPacket avpkt;

    av_init_packet(&avpkt);

    /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
    memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    printf("Decode video file %s to %s\n", filename, outfilename);

    /* find the mpeg1 video decoder */
    codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }


    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    if(codec->capabilities&CODEC_CAP_TRUNCATED)
        c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */

    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream. */

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    frame_count = 0;
    for(;;) {

        //avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);  error
    build_avpkt(&avpkt, f); 
    if(avpkt.size == 0)  
     break;


        /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
           and this is the only method to use them because you cannot
           know the compressed data size before analysing it.

           BUT some other codecs (msmpeg4, mpeg4) are inherently frame
           based, so you must call them with all the data for one
           frame exactly. You must also initialize 'width' and
           'height' before initializing them. */

        /* NOTE2: some codecs allow the raw parameters (frame size,
           sample rate) to be changed at any frame. We handle this, so
           you should also take care of it */

        /* here, we use a stream based decoder (mpeg1video), so we
           feed decoder and see if it could decode a frame */
        //avpkt.data = inbuf;
        while (avpkt.size > 0)
            if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
                exit(1);
    }

    /* some codecs, such as MPEG, transmit the I and P frame with a
       latency of one frame. You must do the following to have a
       chance to get the last frame of the video */
    avpkt.data = NULL;
    avpkt.size = 0;
    decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);

    fclose(f);

    avcodec_close(c);
    av_free(c);
    avcodec_free_frame(&frame);
    printf("\n");


}

static int _find_head(unsigned char *buffer, int len)  
{  
  int i;  
  int j;  
  
  for(i=512;i<len;i++)  
  { 
    if(buffer[i] == 0  
      && buffer[i+1] == 0 
      && buffer[i+2] == 0 
      && buffer[i+3] == 1) 
      break; 
  } 
  if(i == len)  
   return 0;  
  if(i == 512)  
   return 0;  
  return i;  
}

#define FILE_READING_BUFFER (1*1024*1024)  
static void build_avpkt(AVPacket *avpkt,FILE *fp)  
{  
#if 0   
     intlen;  
     static unsigned char buffer[INBUF_SIZE];  
     len =fread(buffer, 1, INBUF_SIZE, fp); 
     avpkt->data = buffer; 
     avpkt->size = len; 
#else   
     static unsigned char buffer[1*1024*1024];  
     static int readptr = 0;  
     static int writeptr = 0;  
     int len,toread;  
   
     int nexthead;  
   
     if(writeptr - readptr < 200 * 1024)  
     { 
          memmove(buffer, &buffer[readptr], writeptr - readptr); 
          writeptr -= readptr; 
          readptr = 0; 
          toread = FILE_READING_BUFFER - writeptr; 
          len =fread(&buffer[writeptr], 1, toread, fp); 
          writeptr += len; 
     } 
   
     nexthead = _find_head(&buffer[readptr], writeptr-readptr); 
     if(nexthead == 0)  
     { 
         printf("failed find next head...n"); 
         nexthead = writeptr - readptr; 
     } 
   
     avpkt->size = nexthead; 
     avpkt->data = &buffer[readptr]; 
     readptr += nexthead; 
#endif   
}  

static void video_decode_h264(const char *outfilename, const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int frame_count;
    FILE *f;
    AVFrame *frame;
    uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
    AVPacket avpkt;

 //AVDictionary *opts;

    av_init_packet(&avpkt);

  //opts = NULL;
    //av_dict_set(&opts, "b", "2.5M", 0);

    /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
    memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    printf("Decode video file %s to %s\n", filename, outfilename);

    /* find the mpeg1 video decoder */
    codec = avcodec_find_decoder(CODEC_ID_H264);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

 

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

 /*
  //初始化参数,下面的参数应该由具体的业务决定  
 c->time_base.num = 1; 
 c->frame_number = 1; //每包一个视频帧  
 c->codec_type = AVMEDIA_TYPE_VIDEO; 
 c->bit_rate = 0; 
 c->time_base.den = 25;//帧率  
 c->width = 352;//视频宽  
 c->height = 288;//视频高
 */

    if(codec->capabilities&CODEC_CAP_TRUNCATED)
        c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */

    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream. */

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    frame_count = 0;
    for(;;) {
        //avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
  build_avpkt(&avpkt, f);
        if (avpkt.size == 0)
            break;

        /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
           and this is the only method to use them because you cannot
           know the compressed data size before analysing it.

           BUT some other codecs (msmpeg4, mpeg4) are inherently frame
           based, so you must call them with all the data for one
           frame exactly. You must also initialize 'width' and
           'height' before initializing them. */

        /* NOTE2: some codecs allow the raw parameters (frame size,
           sample rate) to be changed at any frame. We handle this, so
           you should also take care of it */

        /* here, we use a stream based decoder (mpeg1video), so we
           feed decoder and see if it could decode a frame */
        //avpkt.data = inbuf;
        while (avpkt.size > 0)
            if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
                exit(1);
    }

    /* some codecs, such as MPEG, transmit the I and P frame with a
       latency of one frame. You must do the following to have a
       chance to get the last frame of the video */
    avpkt.data = NULL;
    avpkt.size = 0;
    decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);

    fclose(f);

    avcodec_close(c);
    av_free(c);
    avcodec_free_frame(&frame);
    printf("\n");


}


static void video_decode_testh264(const char *outfilename, const char*filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int frame, got_picture, len;
    FILE *f, *fout;
    AVFrame *picture;
    uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
    char buf[1024];
    AVPacket avpkt;
    AVDictionary *opts;
 
    av_init_packet(&avpkt);
 
    /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
    memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
 
    printf("Video decodingn");
    opts = NULL;
    //av_dict_set(&opts, "b", "2.5M", 0);
    /* find the mpeg1 video decoder */
    codec = avcodec_find_decoder(CODEC_ID_H264);
    if(!codec) {
        fprintf(stderr,"codec not foundn");
        exit(1);
    }
 
    c = avcodec_alloc_context3(codec);
    picture= avcodec_alloc_frame();
 
    if(codec->capabilities&CODEC_CAP_TRUNCATED)
        c->flags|= CODEC_FLAG_TRUNCATED;/* we do not send complete frames */
 
    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream. */
 
    /* open it */
    if(avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr,"could not open codecn");
        exit(1);
    }
 
  
    fout=fopen(outfilename,"wb");
    /* the codec gives us the frame size, in samples */
 
    f =fopen(filename,"rb");
    if(!f) {
        fprintf(stderr,"could not open %sn", filename);
        exit(1);
    }
 
    frame = 0;
    for(;;) {
        //avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
        build_avpkt(&avpkt, f);
        if(avpkt.size == 0)
            break;
 
        /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
           and this is the only method to use them because you cannot
           know the compressed data size before analysing it.
 
           BUT some other codecs (msmpeg4, mpeg4) are inherently frame
           based, so you must call them with all the data for one
           frame exactly. You must also initialize 'width' and
           'height' before initializing them. */
 
        /* NOTE2: some codecs allow the raw parameters (frame size,
           sample rate) to be changed at any frame. We handle this, so
           you should also take care of it */
 
        /* here, we use a stream based decoder (mpeg1video), so we
           feed decoder and see if it could decode a frame */
        //avpkt.data = inbuf;
        while(avpkt.size > 0) {
            len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
            if(len < 0) {
                fprintf(stderr,"Error while decoding frame %dn", frame);
                break;
                //   exit(1);
            }
            if(got_picture) {
                printf("saving frame %3dn", frame);
                fflush(stdout);
 
                /* the picture is allocated by the decoder. no need to
                   free it */
                sprintf(buf, outfilename, frame);
                pgm_save_testh264(picture->data[0], picture->linesize[0],
                         c->width, c->height, fout);
                pgm_save_testh264(picture->data[1], picture->linesize[1],
                         c->width/2, c->height/2, fout);
                pgm_save_testh264(picture->data[2], picture->linesize[2],
                         c->width/2, c->height/2, fout);

                frame++;
            }
            avpkt.size -= len;
            avpkt.data += len;
        }
    }
 
    /* some codecs, such as MPEG, transmit the I and P frame with a
       latency of one frame. You must do the following to have a
       chance to get the last frame of the video */
    avpkt.data = NULL;
    avpkt.size = 0;
    len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
    if(got_picture) {
        printf("saving last frame %3dn", frame);
        fflush(stdout);
 
        /* the picture is allocated by the decoder. no need to
           free it */
        sprintf(buf, outfilename, frame);
        //pgm_save(picture->data[0], picture->linesize[0],
          //       c->width, c->height, fout);
      pgm_save_testh264(picture->data[0], picture->linesize[0],c->width, c->height, fout);
    pgm_save_testh264(picture->data[1], picture->linesize[1],c->width/2, c->height/2, fout);
    pgm_save_testh264(picture->data[2], picture->linesize[2],c->width/2, c->height/2, fout);
 
  frame++;
    }
 
    fclose(f);
 fclose(fout);
 
    avcodec_close(c);
    av_free(c);
    av_free(picture);
    printf("n");
}

 

int main(int argc, char **argv)
{
    const char *output_type;

    /* register all the codecs */
    avcodec_register_all();

    if (argc < 2) {
        printf("usage: %s output_type\n"
               "API example program to decode/encode a media stream with libavcodec.\n"
               "This program generates a synthetic stream and encodes it to a file\n"
               "named test.h264, test.mp2 or test.mpg depending on output_type.\n"
               "The encoded stream is then decoded and written to a raw data output.\n"
               "output_type must be choosen between 'h264', 'mp2', 'mpg'.\n",
               argv[0]);
        return 1;
    }
    output_type = argv[1];


 showImage =cvCreateImage(cvSize(352,288),8,3); 
 avpicture_alloc((AVPicture*)&frameRGB,PIX_FMT_RGB24,352,288); 
 cvNamedWindow("decode");

    if (!strcmp(output_type, "h264")) {

        //video_encode_example("test.h264", AV_CODEC_ID_H264);
  video_decode_h264("test%02d.pgm", "test.h264");
  //video_decode_testh264("d:\\output.yuv", "test.h264");


    } else if (!strcmp(output_type, "mp2")) {
        audio_encode_example("test.mp2");
        audio_decode_example("test.sw", "test.mp2");
    } else if (!strcmp(output_type, "mpg")) {
        video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
        video_decode_example("test%02d.pgm", "test.mpg");
    } else {
        fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
                output_type);
        return 1;
    }
 //System("Pause");
    return 0;
}

Android之ffmpeg-H264解码-移植ffmpeg中的H264解码部分到Android

H264解码器源码,移植ffmpeg中的H264解码部分到Android,深度删减优化,在模拟器(320x480)中验证通过。 程序的采用jni架构。界面部分,文件读取,视频显示都是用java做...
  • lqhed
  • lqhed
  • 2016年06月27日 20:42
  • 1879

利用FFmpge进行视频解码(从H264视频流到图像)

class Ffmpeg_Decoder { public:     AVCodecParserContext *avParserContext;     AVPacket avpkt;   ...
  • humiaor
  • humiaor
  • 2017年03月28日 17:25
  • 1042

利用ffmpeg将H264流 解码为RGB

利用H264解码分为几个步骤: 注意一点在添加头文件的时候要添加extern "C",不然会出现错误[cpp] view plaincopyextern "C"  {  #include   #inc...
  • mao0514
  • mao0514
  • 2015年08月07日 11:39
  • 1890

ffmpeg如何从内存读取h264音视频流进行解码显示

由于项目组重组,自己有幸开始做音视频编解码方面的研发工作,现将自己近期的工作收获作为BOLG的方式记录起来,方便自己日后查阅和学习。说到H264编解码,不能不提到ffmpeg,据自己查证的资料显示,现...
  • piplu
  • piplu
  • 2015年10月23日 17:38
  • 3095

在iOS平台使用ffmpeg解码h264视频流

摘要: 对于socket传输的h264流,需要手工设置AVCodec 和AVCodecContext,用接收到的完整nalu初始化avpacket,传给解码函数进行解码。遇到SPS PPS的时候,需要...
  • humiaor
  • humiaor
  • 2017年03月29日 15:53
  • 1062

Android使用FFmpeg 解码H264并播放(二)

上一节记录了Android使用FFmpeg环境搭建过程。这一节记录视频解码过程。问题描述在开发中使用某摄像头的SDK,只能获取到一帧帧的 H264 视频数据,不知道视频流地址,需要自己解码出图像并播放...
  • a571293251
  • a571293251
  • 2017年06月12日 17:19
  • 750

利用 ffmpeg x264 编码解码 h264

一步一步学习,每天进步一点点 ffmpeg + x264 + qt 解码 编码h264 解码:把h264编码格式的mp4文件解码后保存rgb为ppm格式 编码:把解码保存的rgb格式编码为h26...
  • liuhongxiangm
  • liuhongxiangm
  • 2013年04月19日 16:11
  • 8117

FFmpeg的H.264解码器源代码简单分析:概述

本文简单记录FFmpeg中libavcodec的H.264解码器(H.264 Decoder)的源代码。这个H.264解码器十分重要,可以说FFmpeg项目今天可以几乎“垄断”视音频编解码技术,很大一...
  • leixiaohua1020
  • leixiaohua1020
  • 2015年04月04日 01:09
  • 21926

使用ffmpeg-1.0内置RTMP协议实时解码H264视频流

转自:http://bashell.sinaapp.com/archives/using-ffmpeg-1_0-rtmp-protocol-decode-h264-stream.html 正...
  • hjwang1
  • hjwang1
  • 2013年12月29日 22:00
  • 2548

利用ffmpeg将H264解码为RGB

由于公司买到了一个不提供解码器的设备,我不得已还要做解码的工作。在网上找了一圈,H264解码比较方便的也就是ffmpeg一系列的函数库了,原本设备中也是用这套函数库解码,但厂家不给提供,没办法,只得自...
  • q339659207
  • q339659207
  • 2014年04月11日 12:28
  • 14489
内容举报
返回顶部
收藏助手
不良信息举报
您举报文章:根据网上资料改的ffmpeg解码h264
举报原因:
原因补充:

(最多只允许输入30个字)