x264编码后保存写入到.264文件中

10 篇文章 0 订阅
8 篇文章 0 订阅

#include <assert.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <stdint.h> #include "x264.h" #include "h264_encoder.h" #include <time.h>

#pragma comment(lib, "lib264.lib")

#pragma pack(push) #pragma pack(2) typedef struct BITMAPFILEHEADER {     uint16_t bfType;     uint32_t bfSize;     uint16_t bfReserved1;     uint16_t bfReserved2;     uint32_t bfOffBits; }BMPFILEHEADER;

typedef struct BITMAPINFOHEADER {     uint32_t biSize;     uint32_t biWidth;     uint32_t biHeight;     uint16_t biPlanes;     uint16_t biBitCount;     uint32_t biCompression;     uint32_t biSizeImage;     uint32_t biXPelsPerMeter;     uint32_t biYPelsPerMeter;     uint32_t biClrUsed;     uint32_t biClrImportant; }BMPINFOHEADER; #pragma pack(pop)

void RGB2RGBA(uint8_t *src, uint8_t *dest, int32_t width, int32_t height) {     int32_t i = 0;     int32_t j = 0;     for(i = 0; i<height ; i++)     {         for (j = 0; j < width; j++)         {             *dest++ = *src++; /* red */             *dest++ = *src++; /* green */             *dest++ = *src++; /* blue */             *dest++;        /*a*/         }     } }

void ReadPic(uint8_t *rgb, uint8_t num,int32_t *pwidth, int32_t *pheight) {     BMPFILEHEADER bmpheader;     BMPINFOHEADER bmpinfo;     FILE *fp;

    char szFilename[1024];

    sprintf(szFilename,"../bmps/03 (%d).bmp", num);

    fp=fopen(szFilename, "rb");     if(fp==NULL)     {         return;     }     fread(&bmpheader, sizeof(bmpheader), 1, fp);     fread(&bmpinfo, sizeof(bmpinfo), 1, fp);

    *pwidth = bmpinfo.biWidth;     *pheight = bmpinfo.biHeight;     int32_t width = *pwidth;     int32_t height = *pheight;     int32_t linesize = (((width * 24) + 31) & (~31)) / 8;     fread(rgb, linesize * height, 1, fp);     fclose(fp); }

#define TABLE_SIZE  256 short Y_R[TABLE_SIZE]; short Y_G[TABLE_SIZE]; short Y_B[TABLE_SIZE];

short U_G[TABLE_SIZE]; short U_B[TABLE_SIZE];

short V_R[TABLE_SIZE]; short V_G[TABLE_SIZE]; void table_init() {     short i;     for(i = 0; i < TABLE_SIZE; i++)     {             Y_R[i] = (i * 38) >> 7;         Y_G[i] = (i * 75) >> 7;         Y_B[i] = (i * 15) >> 7;         V_R[i] = (i * 22) >> 7;         V_G[i] = (i * 42) >> 7;         U_G[i] = (i * 54) >> 7;         U_B[i] = (i * 10) >> 7;     } }

X264Encoder *x264_encoder_new(int width, int height) {     X264Encoder *enc;

    if (width % 2 != 0 || height % 2 != 0) {         fprintf(stderr, "width or height can not be divisible by two!!!!!!!!!!!!!!\n");         return NULL;     }

    static int thread = 1;

    enc = (X264Encoder *)malloc(sizeof(X264Encoder));

    enc->first_frame = 1;     enc->width = width;     enc->height = height;     enc->quality = 70;

    if( width * height < 1024 * 600)     {         x264_param_default_preset(&enc->param, "superfast", "zerolatency");     } else     {         x264_param_default_preset(&enc->param, "ultrafast", "zerolatency");     }

    enc->param.i_threads = thread;     enc->param.i_width = width;     enc->param.i_height = height;     enc->param.i_fps_num = 25;     enc->param.i_fps_den = 1;     enc->param.i_frame_reference = 1;     enc->param.i_keyint_max = 5;     enc->param.b_intra_refresh = 1;

    enc->param.i_bframe = 0;     enc->param.b_repeat_headers = 1;

    //Rate control:     enc->param.rc.b_mb_tree = 0;     enc->param.rc.i_rc_method = X264_RC_CRF;     enc->param.rc.f_rf_constant = 25;     enc->param.rc.f_rf_constant_max = 35;     enc->param.rc.i_bitrate = 1024 * 8;     ///

    enc->param.rc.i_rc_method = X264_RC_CQP;

    enc->param.rc.i_qp_constant = 15;     enc->param.b_repeat_headers = 1;     enc->param.b_annexb = 1;     x264_param_apply_profile(&enc->param, "baseline");

    enc->param.rc.i_lookahead = 0;     enc->param.i_sync_lookahead = 0;     enc->param.i_bframe = 0;     enc->param.b_sliced_threads = 1;     enc->param.b_vfr_input = 0;     enc->param.rc.b_mb_tree = 0;     //initial the encoder     enc->coder = x264_encoder_open(&enc->param);

    x264_picture_init(&enc->pic_in);     x264_picture_alloc(&enc->pic_in, X264_CSP_I420, width, height);

    enc->pic_in.img.i_csp = X264_CSP_I420;     enc->pic_in.img.i_plane = 3;

    enc->pic_in.img.i_stride[0] = width;     enc->pic_in.img.i_stride[1] = width >> 1;     enc->pic_in.img.i_stride[2] = width >> 1;

    enc->pic_in.i_pts = 0;

    enc->row = (uint8_t *)malloc(width * 3);     fprintf(stderr, "width : %d, height : %d\n", width, height);

    table_init();

    return enc; }

void x264_encoder_destroy(X264Encoder *encoder) {     if( encoder->coder ) {         x264_encoder_close( encoder->coder );         encoder->coder = NULL;     }

    x264_picture_clean(&encoder->pic_in);

    if(encoder->row != NULL)     {         free(encoder->row);         encoder->row = NULL;     }     free(encoder); } #define COLOUR_VALID(x) (x = ((x) & 0x100 ? 255 : 0));

typedef uint64_t UInt64;

UInt64 SSE_YUV_RGB[13];

void coef_init() {     SSE_YUV_RGB[0] ……

………… }

inline void RGB2YUV_MMX_evenline(uint8_t *y, uint8_t *u, uint8_t *v, const uint8_t *rgba, uint32_t width) {     int j;     uint32_t expand16_width = (width >> 4) << 4;

    UInt64 *p_coef;     p_coef = SSE_YUV_RGB;

    if (expand16_width > 0)     {         asm("movl %4,%%ecx;             \                 movq %5,%%rax;                  \                 movq %0,%%rbx;                  \                 movq %1,%%rdx;                  \                ……

……

源码不方便透露,有需要者留邮箱               ………………                 movdqu %%xmm1,(%%rdi);          \                 packuswb %%xmm6,%%xmm6;         \                 movdqu %%xmm6,(%%rdx);          \                 ……                 subl $16,%%ecx;                 \                 jnz loop_begin1;                \                 "                 :                 :"m"(y),"m"(u),"m"(v),"m"(rgba),"m"(expand16_width),"m"(p_coef)                 :"%rax","%ecx","%rbx","%rdx","rsi","rdi");     }     short i = 0;     uint32_t wide = width - expand16_width;     for( i = 0; i <= wide; i++)     {         short Y,U,V; ……………………

多余的用查表

……………………             if(U & 0xff00)                 COLOUR_VALID(U)                     *(u+((expand16_width+i)>>1))=(uint8_t)U;         }     } }

 

void static inline RGB2YUV_MMX_oddline(uint8_t *y, const uint8_t *rgba, uint32_t width) {

    uint32_t expand16_width = (width >> 4) << 4;

    UInt64 *p_coef;     ………………………………………… }

 

void  ENCODEER(X264Encoder *encoder, uint8_t *rgbabuf,int32_t width, int32_t height) {     uint8_t endcode[] = { 0, 0, 1, 0xb7 };     FILE * pFile = fopen("test.264","wb");     assert(pFile);     clock_t start1, finish1, start2, finish2;     static int num1 = 0, num2 = 0;     static double time1 = 0.0,time2 = 0.0;

    struct timeval tv_begin1,tv_end1;     struct timeval tv_begin2,tv_end2;     x264_picture_t pic_out;     uint8_t *y, *u, *v;

    int num =0;      int i_nal;     x264_nal_t  *nal;

    int i = 0;

    coef_init();

    for(num = 0; num < 300; num++)     {         ………………………………

    }      fwrite(endcode, 1, sizeof(endcode), pFile);         fclose(pFile); }

 

int main(int argc, char **argv) {     uint8_t *rgbabuf = NULL;     uint32_t width, height;     int num = 0;     rgbabuf = malloc(1280*720*300*4);     uint8_t *rgb = malloc(1280*720*300*3);     for(num = 1; num <= 300; num++)     {         ReadPic(rgb + width * 3 * height * (num - 1), num, &width, &height);         RGB2RGBA(rgb + width * 3 * height * (num - 1), rgbabuf + width * 4 * height * (num - 1), width, height);     }     sleep(10);     X264Encoder *encoder = x264_encoder_new(width, height);     ENCODEER(encoder, rgbabuf, width, height);     x264_encoder_destroy(encoder);     free(rgbabuf);     return 0; }

/**
 * @note x264的编码示例.
 * 使用x264的版本为libx264-115
 * 1. 示例是个死循环,会源源不断的编码,然后将数据写文件.
 * 2. 示例的行为是:编码1000帧后,取空编码缓冲区,然后循环执行这两步.
 * @author 戈
 */
#include <cassert>
#include <iostream>
#include <string>
#include "stdint.h"
extern "C"
{
#include "x264.h"
};
unsigned int g_uiPTSFactor = 0;
int iNal   = 0;
x264_nal_t* pNals = NULL;
int encode(x264_t* p264, x264_picture_t* pIn, x264_picture_t* pOut);
int main(int argc, char** argv)
{
 int iResult = 0;
 x264_t* pX264Handle   = NULL;
 x264_param_t* pX264Param = new x264_param_t;
 assert(pX264Param);
 //* 配置参数
 //* 使用默认参数
 x264_param_default(pX264Param);
 //* cpuFlags
 pX264Param->i_threads  = X264_SYNC_LOOKAHEAD_AUTO;//* 取空缓冲区继续使用不死锁的保证.
 //* video Properties
 pX264Param->i_width   = 320; //* 宽度.
 pX264Param->i_height  = 240; //* 高度
 pX264Param->i_frame_total = 0; //* 编码总帧数.不知道用0.
 pX264Param->i_keyint_max = 10;
 //* bitstream parameters
 pX264Param->i_bframe  = 5;
 pX264Param->b_open_gop  = 0;
 pX264Param->i_bframe_pyramid = 0;
 pX264Param->i_bframe_adaptive = X264_B_ADAPT_TRELLIS;
 
 //* 宽高比,有效果,但不是想要的.
 //pX264Param->vui.i_sar_width = 1080;
 //pX264Param->vui.i_sar_height = 720;
 //* Log
 pX264Param->i_log_level  = X264_LOG_DEBUG;
 //* Rate control Parameters
 pX264Param->rc.i_bitrate = 1024 * 10;//* 码率(比特率,单位Kbps)
 //* muxing parameters
 pX264Param->i_fps_den  = 1; //* 帧率分母
 pX264Param->i_fps_num  = 25;//* 帧率分子
 pX264Param->i_timebase_den = pX264Param->i_fps_num;
 pX264Param->i_timebase_num = pX264Param->i_fps_den;
 //* 设置Profile.使用MainProfile
 x264_param_apply_profile(pX264Param, x264_profile_names[1]);
 //* 打开编码器句柄,通过x264_encoder_parameters得到设置给X264
 //* 的参数.通过x264_encoder_reconfig更新X264的参数
 pX264Handle = x264_encoder_open(pX264Param);
 assert(pX264Handle);
 //* 获取整个流的PPS和SPS,不需要可以不调用.
 iResult = x264_encoder_headers(pX264Handle, &pNals, &iNal);
 assert(iResult >= 0);
 //* PPS SPS 总共只有36B,如何解析出来呢?
 for (int i = 0; i < iNal; ++i)
 {
  switch (pNals[i].i_type)
  {
  case NAL_SPS:
   break;
  case  NAL_PPS:
   break;
  default:
   break;
  }
 }
 //* 获取允许缓存的最大帧数.
 int iMaxFrames = x264_encoder_maximum_delayed_frames(pX264Handle);
 //* 编码需要的参数.
 iNal = 0;
 pNals = NULL;
 x264_picture_t* pPicIn = new x264_picture_t;
 x264_picture_t* pPicOut = new x264_picture_t;
 x264_picture_init(pPicOut);
 x264_picture_alloc(pPicIn, X264_CSP_I420, pX264Param->i_width, pX264Param->i_height);
 pPicIn->img.i_csp = X264_CSP_I420;
 pPicIn->img.i_plane = 3;
 //* 创建文件,用于存储编码数据
 FILE* pFile = fopen("agnt.264", "wb");
 assert(pFile);
 //* 示例用编码数据.
 int iDataLen = pX264Param->i_width * pX264Param->i_height;
 uint8_t* data = new uint8_t[iDataLen];
 unsigned int  uiComponent = 0;
 while (++uiComponent)
 {
  //* 构建需要编码的源数据(YUV420色彩格式)
  ::memset(data, uiComponent, iDataLen);
  ::memcpy(pPicIn->img.plane[0], data, iDataLen);
  ::memcpy(pPicIn->img.plane[1], data, iDataLen/4 );
  ::memcpy(pPicIn->img.plane[2], data, iDataLen/4);
  if (uiComponent <= 1000)
  {
   pPicIn->i_pts = uiComponent + g_uiPTSFactor * 1000;
   encode(pX264Handle, pPicIn, pPicOut);
  }
  else
  {
   //* 将缓存的数据取出
   int iResult = encode(pX264Handle, NULL, pPicOut);
   if (0== iResult)
   {
    //break; //* 取空,跳出
    uiComponent = 0;
    ++g_uiPTSFactor;
    /* {{ 这个解决不了取空缓冲区,再压缩无B帧的问题
    x264_encoder_reconfig(pX264Handle, pX264Param);
    x264_encoder_intra_refresh(pX264Handle);
    //* }} */
   }
  }
  //* 将编码数据写入文件.
  for (int i = 0; i < iNal; ++i)
  {
   fwrite(pNals[i].p_payload, 1, pNals[i].i_payload, pFile);
  }
 }
 //* 清除图像区域
 x264_picture_clean(pPicIn);
 x264_picture_clean(pPicOut);
 //* 关闭编码器句柄
 x264_encoder_close(pX264Handle);
 pX264Handle = NULL;
 delete pPicIn ;
 pPicIn = NULL;
 delete pPicOut;
 pPicOut = NULL;
 delete pX264Param;
 pX264Param = NULL;
 delete [] data;
 data = NULL;
 return 0;
}
int encode(x264_t* pX264Handle, x264_picture_t* pPicIn, x264_picture_t* pPicOut)
{
 int iResult   = 0;
  iResult = x264_encoder_encode(pX264Handle, &pNals, &iNal, pPicIn, pPicOut);
 if (0 == iResult)
 {
  std::cout<<"编码成功,但被缓存了."<<std::endl;
 }else
 if(iResult < 0)
 {
  std::cout<<"编码出错"<<std::endl;
 }else
 if (iResult > 0)
 {
  std::cout<<"得到编码数据"<<std::endl;
 }
 /* {{ 作用不明
 unsigned char* pNal = NULL;
 for (int i = 0;i < iNal; ++i)
 {
 int iData = 1024 * 32;
 x264_nal_encode(pX264Handle, pNal,&pNals[i]);
 }
 //* }} */
 //* 获取X264中缓冲帧数.
 int iFrames = x264_encoder_delayed_frames(pX264Handle);
 std::cout<<"当前编码器中缓存数据:"<<iFrames<<"帧\n";
 return iFrames;
}

  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
以下是用Java在Android应用程序将H264视频流保存到本地的示例代码: 首先,你需要创建一个类来处理视频编码保存: ``` import android.media.MediaCodec; import android.media.MediaCodecInfo; import android.media.MediaFormat; import android.media.MediaMuxer; import android.os.Environment; import java.io.IOException; import java.nio.ByteBuffer; public class VideoEncoder { private static final String MIME_TYPE = "video/avc"; // H.264 Advanced Video Coding private static final int FRAME_RATE = 30; // 帧率 private static final int I_FRAME_INTERVAL = 1; // I帧间隔(单位秒) private static final int BIT_RATE = 125000; // 比特率(单位bps) private MediaCodec mediaCodec; private MediaMuxer mediaMuxer; private int trackIndex; private boolean isMuxerStarted; public VideoEncoder() throws IOException { MediaFormat format = MediaFormat.createVideoFormat(MIME_TYPE, 640, 480); format.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE); format.setInteger(MediaFormat.KEY_FRAME_RATE, FRAME_RATE); format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, I_FRAME_INTERVAL); format.setInteger(MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Flexible); mediaCodec = MediaCodec.createEncoderByType(MIME_TYPE); mediaCodec.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE); mediaCodec.start(); mediaMuxer = new MediaMuxer(Environment.getExternalStorageDirectory() + "/output.mp4", MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4); } public void encode(byte[] data) { long presentationTimeUs = System.nanoTime() / 1000; ByteBuffer[] inputBuffers = mediaCodec.getInputBuffers(); int inputBufferIndex = mediaCodec.dequeueInputBuffer(-1); if (inputBufferIndex >= 0) { ByteBuffer inputBuffer = inputBuffers[inputBufferIndex]; inputBuffer.clear(); inputBuffer.put(data); mediaCodec.queueInputBuffer(inputBufferIndex, 0, data.length, presentationTimeUs, 0); } MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo(); int outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 0); while (outputBufferIndex >= 0) { ByteBuffer outputBuffer = mediaCodec.getOutputBuffers()[outputBufferIndex]; if (!isMuxerStarted) { MediaFormat outputFormat = mediaCodec.getOutputFormat(); trackIndex = mediaMuxer.addTrack(outputFormat); mediaMuxer.start(); isMuxerStarted = true; } mediaMuxer.writeSampleData(trackIndex, outputBuffer, bufferInfo); mediaCodec.releaseOutputBuffer(outputBufferIndex, false); outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 0); } } public void release() { mediaCodec.stop(); mediaCodec.release(); mediaMuxer.stop(); mediaMuxer.release(); } } ``` 这个类使用MediaCodec API对视频进行编码,并使用MediaMuxer API将编码后的视频写入文件。 在你的Activity,你可以使用Camera API或其他方法来捕获视频帧,并将它们传递给VideoEncoder对象进行编码保存。以下是使用Camera API捕获视频帧并将它们传递给VideoEncoder对象的示例代码: ``` import android.Manifest; import android.app.Activity; import android.content.pm.PackageManager; import android.hardware.Camera; import android.os.Bundle; import android.util.Log; import android.view.SurfaceHolder; import android.view.SurfaceView; import androidx.annotation.NonNull; import androidx.core.app.ActivityCompat; import androidx.core.content.ContextCompat; import java.io.IOException; public class MainActivity extends Activity implements SurfaceHolder.Callback, Camera.PreviewCallback { private static final int CAMERA_PERMISSION_REQUEST_CODE = 100; private static final String TAG = "MainActivity"; private Camera camera; private SurfaceView surfaceView; private SurfaceHolder surfaceHolder; private VideoEncoder videoEncoder; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); surfaceView = findViewById(R.id.surfaceView); surfaceHolder = surfaceView.getHolder(); surfaceHolder.addCallback(this); } @Override public void surfaceCreated(SurfaceHolder holder) { openCamera(); try { videoEncoder = new VideoEncoder(); } catch (IOException e) { Log.e(TAG, "Failed to create VideoEncoder", e); } } @Override public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) { } @Override public void surfaceDestroyed(SurfaceHolder holder) { releaseCamera(); videoEncoder.release(); } private void openCamera() { if (ContextCompat.checkSelfPermission(this, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) { ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.CAMERA}, CAMERA_PERMISSION_REQUEST_CODE); } else { camera = Camera.open(); try { camera.setPreviewDisplay(surfaceHolder); camera.setPreviewCallback(this); Camera.Parameters parameters = camera.getParameters(); Camera.Size size = parameters.getPreviewSize(); int bufferSize = size.width * size.height * 3 / 2; camera.addCallbackBuffer(new byte[bufferSize]); camera.startPreview(); } catch (IOException e) { Log.e(TAG, "Failed to start preview", e); } } } private void releaseCamera() { if (camera != null) { camera.stopPreview(); camera.setPreviewCallback(null); camera.release(); camera = null; } } @Override public void onPreviewFrame(byte[] data, Camera camera) { if (videoEncoder != null) { videoEncoder.encode(data); } camera.addCallbackBuffer(data); } @Override public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) { if (requestCode == CAMERA_PERMISSION_REQUEST_CODE) { if (grantResults.length > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) { openCamera(); } } } } ``` 在这个示例,MainActivity类使用SurfaceView来显示摄像头预览,并使用Camera API来捕获视频帧。当收到预览帧时,它将它们传递给VideoEncoder对象进行编码保存。 请注意,这只是一个基本示例,你需要根据你的需求进行修改和扩展。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值