YUV420P像素数据编码为JPEG图片

转自:http://www.cnblogs.com/ht-beyond/p/5265710.html

最新接口的代码见:http://blog.csdn.net/x_iya/article/details/52299058

/*
FFmpeg环境配置:
配置包含目录,库目录,附加依赖性
添加dll到工程debug文件下
*/

/*
libavcodec        encoding/decoding library
libavfilter        graph-based frame editing library
libavformat        I/O and muxing/demuxing library
libavdevice        special devices muxing/demuxing library
libavutil        common utility library
libswresample    audio resampling, format conversion and mixing
libpostproc        post processing library
libswscale        color conversion and scaling library
*/


#include <stdio.h>

extern "C" // 因为FFmpeg是纯C程序
{
    // FFmpeg libraries
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
};



//#define STEP1        // 视频文件解码为YUV420数据
#define STEP2        // YUV420P像素数据编码为JPEG图片


#ifdef STEP1

/
// 视频文件解码为YUV数据

int main(int argc, char* argv[])
{
    AVFormatContext    *pFormatCtx;
    AVCodecContext    *pCodecCtx;
    AVCodec            *pCodec;
    AVFrame    *pFrame, *pFrameYUV;
    AVPacket *packet;
    struct SwsContext *img_convert_ctx;
    uint8_t *out_buffer;

    int    videoindex = -1;
    int y_size;
    int ret, got_picture;

    char filepath[] = "video1.mkv";
    FILE *fp_yuv = fopen("video1.yuv", "wb+");

    av_register_all();
    //avformat_network_init();
    pFormatCtx = avformat_alloc_context();

    if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0)
    {
        printf("Couldn't open input stream.\n");
        return -1;
    }
    if (avformat_find_stream_info(pFormatCtx, NULL) <0)
    {
        printf("Couldn't find stream information.\n");
        return -1;
    }

    for (int i = 0; i < pFormatCtx->nb_streams; i++)
    {
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            videoindex = i;
            break;
        }
    }
    if (videoindex == -1)
    {
        printf("Didn't find a video stream.\n");
        return -1;
    }

    pCodecCtx = pFormatCtx->streams[videoindex]->codec;

    // 根据编码器的ID查找FFmpeg的解码器
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL)
    {
        printf("Codec not found.\n");
        return -1;
    }
    // 初始化一个视音频编解码器的AVCodecContext
    if (avcodec_open2(pCodecCtx, pCodec, NULL)<0)
    {
        printf("Could not open codec.\n");
        return -1;
    }

    // 一些内存分配
    packet = (AVPacket *)av_malloc(sizeof(AVPacket));
    pFrame = av_frame_alloc();
    pFrameYUV = av_frame_alloc();
    out_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
    // 为已经分配空间的结构体AVPicture挂上一段用于保存数据的空间
    // AVFrame/AVPicture有一个data[4]的数据字段,buffer里面存放的只是yuv这样排列的数据,
    // 而经过fill 之后,会把buffer中的yuv分别放到data[0],data[1],data[2]中。
    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

    //Output Info-----------------------------
    printf("--------------- File Information ----------------\n");
    av_dump_format(pFormatCtx, 0, filepath, 0);
    printf("-------------------------------------------------\n");

    // 初始化一个SwsContext
    // 参数:源图像的宽,源图像的高,源图像的像素格式,目标图像的宽,目标图像的高,目标图像的像素格式,设定图像拉伸使用的算法
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
        pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

    while (av_read_frame(pFormatCtx, packet) >= 0)
    {
        if (packet->stream_index == videoindex)
        {
            // 解码一帧视频数据。输入一个压缩编码的结构体AVPacket,输出一个解码后的结构体AVFrame
            ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
            if (ret < 0)
            {
                printf("Decode Error.\n");
                return -1;
            }
            if (got_picture)
            {
                // 转换像素
                sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
                    pFrameYUV->data, pFrameYUV->linesize);

                y_size = pCodecCtx->width * pCodecCtx->height;
                // 向文件写入一个数据块
                fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);        //Y 
                fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
                fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
                //printf("Succeed to decode 1 frame!\n");

            }
        }
        av_free_packet(packet);
    }

    //flush decoder
    //FIX: Flush Frames remained in Codec
    while (1)
    {
        ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
        if (ret < 0)
            break;
        if (!got_picture)
            break;
        sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
            pFrameYUV->data, pFrameYUV->linesize);

        int y_size = pCodecCtx->width*pCodecCtx->height;

        fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);        //Y 
        fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
        fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
        //printf("Flush Decoder: Succeed to decode 1 frame!\n");
    }

    sws_freeContext(img_convert_ctx);
    fclose(fp_yuv);
    av_frame_free(&pFrameYUV);
    av_frame_free(&pFrame);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    return 0;
}

#endif



#ifdef STEP2

/
// YUV420P像素数据编码为JPEG图片

int main(int argc, char* argv[])
{
    AVFormatContext* pFormatCtx;
    AVOutputFormat* fmt;
    AVStream* video_st;
    AVCodecContext* pCodecCtx;
    AVCodec* pCodec;

    uint8_t* picture_buf;
    AVFrame* picture;
    AVPacket pkt;
    int y_size;
    int got_picture = 0;
    int size;
    int ret = 0;
    //int in_w = 480, in_h = 272;
    int in_w = 720, in_h = 480;
    int frame_num = 1;

    FILE *in_file = NULL;
    const char* out_file = "new1.jpg";

    in_file = fopen("video1.yuv", "rb");

    // 计算YUV420帧数量
    const int WIDTH = 720;
    const int HEIGHT = 480;
    const int EachFrameSize = (int)(WIDTH*HEIGHT*1.5);
    if (in_file == NULL)
    {
        printf("couldn't open file.\n");
        return  -1;
    }
    // 将文件指针移到文件末尾
    fseek(in_file, 0, SEEK_END);
    // 得到文件尾相对于文件首的位移,即文件的总字节数
    // 该函数对大于2^31 -1文件,即2.1G以上的文件操作时可能出错
    long total_size = ftell(in_file);
    // 重置文件指针指向文件头部
    rewind(in_file);       

    // size/(WIDTH*HEIGHT*1.5)可获得了yuv420文件的总帧数
    long nFrame = total_size / EachFrameSize;
    printf("该YUV420总帧数:%ld\n\n", nFrame);

    printf("转换第几帧? 请输入: ");
    scanf("%d", &frame_num);
    printf("\n");

    fseek(in_file, (frame_num-1) * EachFrameSize, SEEK_SET);


    av_register_all();
    pFormatCtx = avformat_alloc_context();

    // 返回一个已经注册的最合适的输出格式
    fmt = av_guess_format("mjpeg", NULL, NULL);
    pFormatCtx->oformat = fmt;

    if (avio_open(&pFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) < 0)
    {
        printf("Couldn't open output file.");
        return -1;
    }

    // 上述更简单的方法:
    //avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
    //fmt = pFormatCtx->oformat;

    video_st = avformat_new_stream(pFormatCtx, 0);
    if (video_st == NULL)
    {
        return -1;
    }
    pCodecCtx = video_st->codec;
    pCodecCtx->codec_id = fmt->video_codec;
    pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
    pCodecCtx->pix_fmt = AV_PIX_FMT_YUVJ420P;

    pCodecCtx->width = in_w;
    pCodecCtx->height = in_h;

    pCodecCtx->time_base.num = 1;
    pCodecCtx->time_base.den = 25;

    // Output some information
    av_dump_format(pFormatCtx, 0, out_file, 1);

    pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
    if (!pCodec)
    {
        printf("Codec not found.");
        return -1;
    }
    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
    {
        printf("Could not open codec.");
        return -1;
    }
    picture = av_frame_alloc();
    size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
    picture_buf = (uint8_t *)av_malloc(size);
    if (!picture_buf)
    {
        return -1;
    }

    avpicture_fill((AVPicture *)picture, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);

    // 写头部
    avformat_write_header(pFormatCtx, NULL);

    y_size = pCodecCtx->width * pCodecCtx->height;
    av_new_packet(&pkt, y_size * 3);




    // Read YUV
    if (fread(picture_buf, 1, y_size * 3 / 2, in_file) <= 0)
    {
        printf("Could not read input file.");
        return -1;
    }
    picture->data[0] = picture_buf;                        // Y
    picture->data[1] = picture_buf + y_size;                // U 
    picture->data[2] = picture_buf + y_size * 5 / 4;        // V

    // 编码
    ret = avcodec_encode_video2(pCodecCtx, &pkt, picture, &got_picture);
    if (ret < 0)
    {
        printf("Encode Error.\n");
        return -1;
    }
    if (got_picture == 1)
    {
        pkt.stream_index = video_st->index;
        ret = av_write_frame(pFormatCtx, &pkt);
    }

    av_free_packet(&pkt);

    av_write_trailer(pFormatCtx);

    printf("Encode Successful.\n");

    if (video_st)
    {
        avcodec_close(video_st->codec);
        av_free(picture);
        av_free(picture_buf);
    }
    avio_close(pFormatCtx->pb);
    avformat_free_context(pFormatCtx);
    fclose(in_file);

    return 0;
}

#endif


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值