【x264编码器】章节1——x264编码流程及基于x264的编码器demo

系列文章目录

   HEVC视频编解码标准简介

【x264编码器】章节1——x264编码流程及基于x264的编码器demo

【x264编码器】章节2——x264的lookahead流程分析

【x264编码器】章节3——x264的码率控制

【x264编码器】章节4——x264的帧内预测流程

【x264编码器】章节5——x264的帧间预测流程

【x264编码器】章节6——x264的变换量化

【x265编码器】章节1——lookahead模块分析

【x265编码器】章节2——编码流程及基于x265的编码器demo

【x265编码器】章节3——帧内预测流程

【x265编码器】章节4——帧间预测流程

【x265编码器】章节5——x265帧间运动估计流程

【x265编码器】章节6——x265的码率控制

【x265编码器】章节7——滤波模块

【x265编码器】章节8——变换量化模块


目录

系列文章目录

前言

一、对外的API接口

二、代码模块说明

1.参数初始化x264_param_default

2.编码器参数预设x264_param_default_preset

3.设置profile x264_param_apply_profile

4.打开编码器x264_encoder_open

5.编码YUV图像并获取编码结果x264_encoder_encode

6.关闭编码器x264_encoder_close

三、完整x264编码demo

点赞、收藏,会是我继续写作的动力!赠人玫瑰,手有余香


一、对外的API接口

对外总的API接口如下:

x264完整的流程框架如下:

二、代码模块说明

1.参数初始化x264_param_default

它用于设置 x264 参数结构体(x264_param_t)的默认值,下面是对该函数中每个参数的简要说明:

  • param->cpu:通过调用 x264_cpu_detect() 函数自动检测 CPU 类型。
  • param->i_threads:使用自动线程数。
  • param->i_lookahead_threads:使用自动预测线程数。
  • param->b_deterministic:启用确定性模式,以确保相同输入产生相同的输出。
  • param->i_sync_lookahead:使用自动同步预测深度。

接下来是一系列与视频属性相关的参数,如色度格式、宽度、高度、宽高比等。

然后是编码器参数,包括帧参考数、最大关键帧间隔、最小关键帧间隔、B 帧数、场景切换阈值等。

之后是一些编码器设置,如去块滤波、CABAC 编码、码率控制方法、参考帧压缩系数等。

然后是一些与日志输出、分析类型、量化矩阵等相关的参数。

最后是一些与视频格式、OpenCL 加速、AVC-Intra 等特定功能相关的参数。

该函数通过对 x264_param_t 结构体的成员进行赋值,将其初始化为默认值。这些默认值可能在实际使用中根据需求进行修改。

REALIGN_STACK void x264_param_default( x264_param_t *param )
{
    /* */
    memset( param, 0, sizeof( x264_param_t ) );

    /* CPU autodetect */
    param->cpu = x264_cpu_detect();//通过调用 x264_cpu_detect() 函数自动检测 CPU 类型
    param->i_threads = X264_THREADS_AUTO;//使用自动线程数
    param->i_lookahead_threads = X264_THREADS_AUTO;//使用自动预测线程数
    param->b_deterministic = 1;//启用确定性模式,以确保相同输入产生相同的输出
    param->i_sync_lookahead = X264_SYNC_LOOKAHEAD_AUTO;//使用自动同步预测深度

    /* Video properties *///接下来是一系列与视频属性相关的参数,如色度格式、宽度、高度、宽高比等
    param->i_csp           = X264_CHROMA_FORMAT ? X264_CHROMA_FORMAT : X264_CSP_I420;
    param->i_width         = 0;
    param->i_height        = 0;
    param->vui.i_sar_width = 0;
    param->vui.i_sar_height= 0;
    param->vui.i_overscan  = 0;  /* undef */
    param->vui.i_vidformat = 5;  /* undef */
    param->vui.b_fullrange = -1; /* default depends on input */
    param->vui.i_colorprim = 2;  /* undef */
    param->vui.i_transfer  = 2;  /* undef */
    param->vui.i_colmatrix = -1; /* default depends on input */
    param->vui.i_chroma_loc= 0;  /* left center */
    param->i_fps_num       = 25;
    param->i_fps_den       = 1;
    param->i_level_idc     = -1;
    param->i_slice_max_size = 0;
    param->i_slice_max_mbs = 0;
    param->i_slice_count = 0;
#if HAVE_BITDEPTH8
    param->i_bitdepth = 8;
#elif HAVE_BITDEPTH10
    param->i_bitdepth = 10;
#else
    param->i_bitdepth = 8;
#endif

    /* Encoder parameters *///然后是编码器参数,包括帧参考数、最大关键帧间隔、最小关键帧间隔、B 帧数、场景切换阈值等
    param->i_frame_reference = 3;
    param->i_keyint_max = 250;
    param->i_keyint_min = X264_KEYINT_MIN_AUTO;
    param->i_bframe = 3;
    param->i_scenecut_threshold = 40;
    param->i_bframe_adaptive = X264_B_ADAPT_FAST;
    param->i_bframe_bias = 0;
    param->i_bframe_pyramid = X264_B_PYRAMID_NORMAL;
    param->b_interlaced = 0;
    param->b_constrained_intra = 0;
    //之后是一些编码器设置,如去块滤波、CABAC 编码、码率控制方法、参考帧压缩系数等
    param->b_deblocking_filter = 1;
    param->i_deblocking_filter_alphac0 = 0;
    param->i_deblocking_filter_beta = 0;

    param->b_cabac = 1;
    param->i_cabac_init_idc = 0;

    param->rc.i_rc_method = X264_RC_CRF;
    param->rc.i_bitrate = 0;
    param->rc.f_rate_tolerance = 1.0;
    param->rc.i_vbv_max_bitrate = 0;
    param->rc.i_vbv_buffer_size = 0;
    param->rc.f_vbv_buffer_init = 0.9;
    param->rc.i_qp_constant = -1;
    param->rc.f_rf_constant = 23;
    param->rc.i_qp_min = 0;
    param->rc.i_qp_max = INT_MAX;
    param->rc.i_qp_step = 4;
    param->rc.f_ip_factor = 1.4;
    param->rc.f_pb_factor = 1.3;
    param->rc.i_aq_mode = X264_AQ_VARIANCE;
    param->rc.f_aq_strength = 1.0;
    param->rc.i_lookahead = 40;

    param->rc.b_stat_write = 0;
    param->rc.psz_stat_out = "x264_2pass.log";
    param->rc.b_stat_read = 0;
    param->rc.psz_stat_in = "x264_2pass.log";
    param->rc.f_qcompress = 0.6;
    param->rc.f_qblur = 0.5;
    param->rc.f_complexity_blur = 20;
    param->rc.i_zones = 0;
    param->rc.b_mb_tree = 1;

    /* Log *///然后是一些与日志输出、分析类型、量化矩阵等相关的参数
    param->pf_log = x264_log_default;
    param->p_log_private = NULL;
    param->i_log_level = X264_LOG_INFO;

    /* *///最后是一些与视频格式、OpenCL 加速、AVC-Intra 等特定功能相关的参数
    param->analyse.intra = X264_ANALYSE_I4x4 | X264_ANALYSE_I8x8;
    param->analyse.inter = X264_ANALYSE_I4x4 | X264_ANALYSE_I8x8
                         | X264_ANALYSE_PSUB16x16 | X264_ANALYSE_BSUB16x16;
    param->analyse.i_direct_mv_pred = X264_DIRECT_PRED_SPATIAL;
    param->analyse.i_me_method = X264_ME_HEX;
    param->analyse.f_psy_rd = 1.0;
    param->analyse.b_psy = 1;
    param->analyse.f_psy_trellis = 0;
    param->analyse.i_me_range = 16;
    param->analyse.i_subpel_refine = 7;
    param->analyse.b_mixed_references = 1;
    param->analyse.b_chroma_me = 1;
    param->analyse.i_mv_range_thread = -1;
    param->analyse.i_mv_range = -1; // set from level_idc
    param->analyse.i_chroma_qp_offset = 0;
    param->analyse.b_fast_pskip = 1;
    param->analyse.b_weighted_bipred = 1;
    param->analyse.i_weighted_pred = X264_WEIGHTP_SMART;
    param->analyse.b_dct_decimate = 1;
    param->analyse.b_transform_8x8 = 1;
    param->analyse.i_trellis = 1;
    param->analyse.i_luma_deadzone[0] = 21;
    param->analyse.i_luma_deadzone[1] = 11;
    param->analyse.b_psnr = 0;
    param->analyse.b_ssim = 0;

    param->i_cqm_preset = X264_CQM_FLAT;
    memset( param->cqm_4iy, 16, sizeof( param->cqm_4iy ) );
    memset( param->cqm_4py, 16, sizeof( param->cqm_4py ) );
    memset( param->cqm_4ic, 16, sizeof( param->cqm_4ic ) );
    memset( param->cqm_4pc, 16, sizeof( param->cqm_4pc ) );
    memset( param->cqm_8iy, 16, sizeof( param->cqm_8iy ) );
    memset( param->cqm_8py, 16, sizeof( param->cqm_8py ) );
    memset( param->cqm_8ic, 16, sizeof( param->cqm_8ic ) );
    memset( param->cqm_8pc, 16, sizeof( param->cqm_8pc ) );

    param->b_repeat_headers = 1;
    param->b_annexb = 1;
    param->b_aud = 0;
    param->b_vfr_input = 1;
    param->i_nal_hrd = X264_NAL_HRD_NONE;
    param->b_tff = 1;
    param->b_pic_struct = 0;
    param->b_fake_interlaced = 0;
    param->i_frame_packing = -1;
    param->i_alternative_transfer = 2; /* undef */
    param->b_opencl = 0;
    param->i_opencl_device = 0;
    param->opencl_device_id = NULL;
    param->psz_clbin_file = NULL;
    param->i_avcintra_class = 0;
    param->i_avcintra_flavor = X264_AVCINTRA_FLAVOR_PANASONIC;
}

2.编码器参数预设x264_param_default_preset

它用于设置 x264 参数结构体(x264_param_t)的默认值,并根据预设和调整参数对其进行修改,代码分析如下:

REALIGN_STACK int x264_param_default_preset( x264_param_t *param, const char *preset, const char *tune )
{   //将参数结构体初始化为默认值
    x264_param_default( param );
    //如果传入的预设参数(preset)不为空,将调用 param_apply_preset 函数应用该预设参数对参数结构体进行修改。如果应用预设参数失败,则返回 -1
    if( preset && param_apply_preset( param, preset ) < 0 )
        return -1;
    if( tune && param_apply_tune( param, tune ) < 0 )//如果传入的调整参数(tune)不为空,将调用 param_apply_tune 函数应用该调整参数对参数结构体进行修改。如果应用调整参数失败,则返回 -1
        return -1;
    return 0;
}

3.设置profile x264_param_apply_profile

提供了一种方便的方式来根据指定的配置profile应用相应的设置到 x264 参数结构体上。不同的配置文件类型会对参数结构体进行不同的修改,以满足特定的编码需求

代码如下:

REALIGN_STACK int x264_param_apply_profile( x264_param_t *param, const char *profile )
{   //函数首先检查是否传入了profile,如果profile件为空,则直接返回 0,表示没有应用任何profile
    if( !profile )
        return 0;
    //将根据profile的字符串形式将其转换为整数 p,以便进行后续的配置判断
    const int qp_bd_offset = 6 * (param->i_bitdepth-8);
    int p = profile_string_to_int( profile );
    if( p < 0 )//如果转换为整数的结果 p 小于 0,则表示配置无效,函数会打印错误日志并返回 -1
    {
        x264_log_internal( X264_LOG_ERROR, "invalid profile: %s\n", profile );
        return -1;
    }//如果配置类型小于 PROFILE_HIGH444_PREDICTIVE 并且码率控制方法为恒定质量(X264_RC_CRF)且质量因子小于等于 0,或者码率控制方法为恒定码率(X264_RC_CQP)且 qp 值小于等于 0,则表示该配置不支持无损编码,函数会打印错误日志并返回 -1
    if( p < PROFILE_HIGH444_PREDICTIVE && ((param->rc.i_rc_method == X264_RC_CQP && param->rc.i_qp_constant <= 0) ||
        (param->rc.i_rc_method == X264_RC_CRF && (int)(param->rc.f_rf_constant + qp_bd_offset) <= 0)) )
    {
        x264_log_internal( X264_LOG_ERROR, "%s profile doesn't support lossless\n", profile );
        return -1;
    }//如果配置类型小于 PROFILE_HIGH444_PREDICTIVE 并且色度空间类型为 4:4:4 及以上,则表示该配置不支持 4:4:4,函数会打印错误日志并返回 -1
    if( p < PROFILE_HIGH444_PREDICTIVE && (param->i_csp & X264_CSP_MASK) >= X264_CSP_I444 )
    {
        x264_log_internal( X264_LOG_ERROR, "%s profile doesn't support 4:4:4\n", profile );
        return -1;
    }//如果配置类型小于 PROFILE_HIGH422 并且色度空间类型为 4:2:2 及以上,则表示该配置不支持 4:2:2,函数会打印错误日志并返回 -1
    if( p < PROFILE_HIGH422 && (param->i_csp & X264_CSP_MASK) >= X264_CSP_I422 )
    {
        x264_log_internal( X264_LOG_ERROR, "%s profile doesn't support 4:2:2\n", profile );
        return -1;
    }//如果配置类型小于 PROFILE_HIGH10 并且位深度大于 8,则表示该配置不支持大于 8 位的位深度,函数会打印错误日志并返回 -1
    if( p < PROFILE_HIGH10 && param->i_bitdepth > 8 )
    {
        x264_log_internal( X264_LOG_ERROR, "%s profile doesn't support a bit depth of %d\n", profile, param->i_bitdepth );
        return -1;
    }//如果配置类型为 PROFILE_HIGH 并且色度空间类型为 4:0:0,则表示该配置不支持 4:0:0,函数会打印错误日志并返回 -1
    if( p < PROFILE_HIGH && (param->i_csp & X264_CSP_MASK) == X264_CSP_I400 )
    {
        x264_log_internal( X264_LOG_ERROR, "%s profile doesn't support 4:0:0\n", profile );
        return -1;
    }
    //对于 PROFILE_BASELINE 配置,会将一些参数设置为特定的值,如关闭 8x8 变换、使用 CABAC 编码、使用平坦的量化矩阵预设
    if( p == PROFILE_BASELINE )
    {
        param->analyse.b_transform_8x8 = 0;
        param->b_cabac = 0;
        param->i_cqm_preset = X264_CQM_FLAT;
        param->psz_cqm_file = NULL;
        param->i_bframe = 0;
        param->analyse.i_weighted_pred = X264_WEIGHTP_NONE;
        if( param->b_interlaced )
        {
            x264_log_internal( X264_LOG_ERROR, "baseline profile doesn't support interlacing\n" );
            return -1;
        }
        if( param->b_fake_interlaced )
        {
            x264_log_internal( X264_LOG_ERROR, "baseline profile doesn't support fake interlacing\n" );
            return -1;
        }
    }
    else if( p == PROFILE_MAIN )
    {
        param->analyse.b_transform_8x8 = 0;
        param->i_cqm_preset = X264_CQM_FLAT;
        param->psz_cqm_file = NULL;
    }
    return 0;
}

4.打开编码器x264_encoder_open

它用于打开 x264 编码器并返回一个指向 x264 编码器的指针(x264_t),代码如下:

REALIGN_STACK x264_t *x264_encoder_open( x264_param_t *param )
{   //分配了一个 x264_api_t 结构体的内存,并将其初始化为零。如果内存分配失败,则返回 NULL
    x264_api_t *api = calloc( 1, sizeof( x264_api_t ) );
    if( !api )
        return NULL;

#if HAVE_BITDEPTH8//根据参数结构体中的位深度(param->i_bitdepth)选择适当的编码器函数
    if( param->i_bitdepth == 8 )
    {   //如果位深度为 8,将设置 api 结构体中的函数指针为 8 位编码器对应的函数
        api->nal_encode = x264_8_nal_encode;
        api->encoder_reconfig = x264_8_encoder_reconfig;
        api->encoder_parameters = x264_8_encoder_parameters;
        api->encoder_headers = x264_8_encoder_headers;
        api->encoder_encode = x264_8_encoder_encode;
        api->encoder_close = x264_8_encoder_close;
        api->encoder_delayed_frames = x264_8_encoder_delayed_frames;
        api->encoder_maximum_delayed_frames = x264_8_encoder_maximum_delayed_frames;
        api->encoder_intra_refresh = x264_8_encoder_intra_refresh;
        api->encoder_invalidate_reference = x264_8_encoder_invalidate_reference;

        api->x264 = x264_8_encoder_open( param, api );
    }
    else
#endif
#if HAVE_BITDEPTH10
    if( param->i_bitdepth == 10 )
    {   //如果位深度为 10,将设置 api 结构体中的函数指针为 10 位编码器对应的函数
        api->nal_encode = x264_10_nal_encode;
        api->encoder_reconfig = x264_10_encoder_reconfig;
        api->encoder_parameters = x264_10_encoder_parameters;
        api->encoder_headers = x264_10_encoder_headers;
        api->encoder_encode = x264_10_encoder_encode;
        api->encoder_close = x264_10_encoder_close;
        api->encoder_delayed_frames = x264_10_encoder_delayed_frames;
        api->encoder_maximum_delayed_frames = x264_10_encoder_maximum_delayed_frames;
        api->encoder_intra_refresh = x264_10_encoder_intra_refresh;
        api->encoder_invalidate_reference = x264_10_encoder_invalidate_reference;

        api->x264 = x264_10_encoder_open( param, api );
    }
    else
#endif  //如果编译时没有支持所选位深度的编码器,函数将打印错误日志并返回
        x264_log_internal( X264_LOG_ERROR, "not compiled with %d bit depth support\n", param->i_bitdepth );

    if( !api->x264 )
    {
        free( api );
        return NULL;
    }

    /* x264_t is opaque */
    return (x264_t *)api;
}

5.编码YUV图像并获取编码结果x264_encoder_encode

用于调用 x264 编码器进行图像编码,代码如下:

/****************************************************************************
 * x264_encoder_encode:
 *  XXX: i_poc   : is the poc of the current given picture
 *       i_frame : is the number of the frame being coded
 *  ex:  type frame poc
 *       I      0   2*0
 *       P      1   2*3
 *       B      2   2*1
 *       B      3   2*2
 *       P      4   2*6
 *       B      5   2*4
 *       B      6   2*5
 ****************************************************************************/
int     x264_encoder_encode( x264_t *h,
                             x264_nal_t **pp_nal, int *pi_nal,
                             x264_picture_t *pic_in,
                             x264_picture_t *pic_out )
{
    x264_t *thread_current, *thread_prev, *thread_oldest;
    int i_nal_type, i_nal_ref_idc, i_global_qp;
    int overhead = NALU_OVERHEAD;

#if HAVE_OPENCL
    if( h->opencl.b_fatal_error )
        return -1;
#endif

    if( h->i_thread_frames > 1 )//表示启用了多线程编码
    {   //代码根据当前的编码阶段选择当前线程、前一个线程和最旧的线程,并进行线程同步和速率控制的操作。然后,将当前线程设置为活动线程 h
        thread_prev    = h->thread[ h->i_thread_phase ];
        h->i_thread_phase = (h->i_thread_phase + 1) % h->i_thread_frames;
        thread_current = h->thread[ h->i_thread_phase ];
        thread_oldest  = h->thread[ (h->i_thread_phase + 1) % h->i_thread_frames ];
        thread_sync_context( thread_current, thread_prev );
        x264_thread_sync_ratecontrol( thread_current, thread_prev, thread_oldest );
        h = thread_current;
    }
    else
    {   //则将当前线程和最旧的线程都设置为 h
        thread_current =
        thread_oldest  = h;
    }
    h->i_cpb_delay_pir_offset = h->i_cpb_delay_pir_offset_next;

    /* no data out *///将 pi_nal 和 pp_nal 的值重置为默认值,表示没有输出的数据
    *pi_nal = 0;
    *pp_nal = NULL;

    /* ------------------- Setup new frame from picture -------------------- */
    if( pic_in != NULL )
    {
        if( h->lookahead->b_exit_thread )
        {
            x264_log( h, X264_LOG_ERROR, "lookahead thread is already stopped\n" );
            return -1;
        }
        //从编码器的未使用帧队列中取出一个帧 fenc
        /* 1: Copy the picture to a frame and move it to a buffer */
        x264_frame_t *fenc = x264_frame_pop_unused( h, 0 );
        if( !fenc )
            return -1;
        //输入图像 pic_in 复制到帧 fenc 中
        if( x264_frame_copy_picture( h, fenc, pic_in ) < 0 )
            return -1;
        //如果编码器的宽度和高度与宏块(MB)的宽度和高度不匹配,则调用 x264_frame_expand_border_mod16 对帧进行扩展
        if( h->param.i_width != 16 * h->mb.i_mb_width ||
            h->param.i_height != 16 * h->mb.i_mb_height )
            x264_frame_expand_border_mod16( h, fenc );
        //设置帧的帧号 i_frame 为 h->frames.i_input++
        fenc->i_frame = h->frames.i_input++;
        //如果帧号为 0,则将帧的时间戳 i_pts 赋值给 h->frames.i_first_pts
        if( fenc->i_frame == 0 )
            h->frames.i_first_pts = fenc->i_pts;
        if( h->frames.i_bframe_delay && fenc->i_frame == h->frames.i_bframe_delay )//如果启用了 B 帧,并且当前帧为 B 帧延迟帧(b-frame delay),则计算 B 帧延迟时间
            h->frames.i_bframe_delay_time = fenc->i_pts - h->frames.i_first_pts;
        //如果输入帧的时间戳小于等于已知的最大时间戳 h->frames.i_largest_pts,则输出警告信息
        if( h->param.b_vfr_input && fenc->i_pts <= h->frames.i_largest_pts )
            x264_log( h, X264_LOG_WARNING, "non-strictly-monotonic PTS\n" );
        //更新最大时间戳 h->frames.i_largest_pts 和次大时间戳 h->frames.i_second_largest_pts
        h->frames.i_second_largest_pts = h->frames.i_largest_pts;
        h->frames.i_largest_pts = fenc->i_pts;
        //设置帧的图像结构类型 i_pic_struct,如果是自动(PIC_STRUCT_AUTO),根据编码器参数和图像属性进行自动选择
        if( (fenc->i_pic_struct < PIC_STRUCT_AUTO) || (fenc->i_pic_struct > PIC_STRUCT_TRIPLE) )
            fenc->i_pic_struct = PIC_STRUCT_AUTO;

        if( fenc->i_pic_struct == PIC_STRUCT_AUTO )
        {
#if HAVE_INTERLACED
            int b_interlaced = fenc->param ? fenc->param->b_interlaced : h->param.b_interlaced;
#else
            int b_interlaced = 0;
#endif
            if( b_interlaced )
            {
                int b_tff = fenc->param ? fenc->param->b_tff : h->param.b_tff;
                fenc->i_pic_struct = b_tff ? PIC_STRUCT_TOP_BOTTOM : PIC_STRUCT_BOTTOM_TOP;
            }
            else
                fenc->i_pic_struct = PIC_STRUCT_PROGRESSIVE;
        }
        //如果启用了宏块树(mb-tree)和统计读取(stat read),则调用 x264_macroblock_tree_read 读取宏块数
        if( h->param.rc.b_mb_tree && h->param.rc.b_stat_read )
        {
            if( x264_macroblock_tree_read( h, fenc, pic_in->prop.quant_offsets ) )
                return -1;
        }
        else//否则,调用 x264_adaptive_quant_frame 进行自适应量化
            x264_adaptive_quant_frame( h, fenc, pic_in->prop.quant_offsets );
        //如果 pic_in 的 quant_offsets_free 不为空,则调用其释放函数释放 quant_offsets
        if( pic_in->prop.quant_offsets_free )
            pic_in->prop.quant_offsets_free( pic_in->prop.quant_offsets );
        //如果编码器支持低分辨率编码,则调用 x264_frame_init_lowres 进行低分辨率帧初始化
        if( h->frames.b_have_lowres )
            x264_frame_init_lowres( h, fenc );
        //将帧放入前瞻队列中,供切片类型决策使用
        /* 2: Place the frame into the queue for its slice type decision */
        x264_lookahead_put_frame( h, fenc );
        //如果输入帧数量小于等于延迟帧数加1减去线程数,则表示还没有足够的帧用于编码,返回0,并设置 pic_out 的帧类型为自动(X264_TYPE_AUTO)
        if( h->frames.i_input <= h->frames.i_delay + 1 - h->i_thread_frames )
        {
            /* Nothing yet to encode, waiting for filling of buffers */
            pic_out->i_type = X264_TYPE_AUTO;
            return 0;
        }
    }
    else
    {   //表示信号量用于结束前瞻线程(lookahead thread)。代码会设置 h->lookahead->b_exit_thread 为1,然后发出广播信号以通知其他等待的线程,并释放互斥锁
        /* signal kills for lookahead thread */
        x264_pthread_mutex_lock( &h->lookahead->ifbuf.mutex );
        h->lookahead->b_exit_thread = 1;
        x264_pthread_cond_broadcast( &h->lookahead->ifbuf.cv_fill );
        x264_pthread_mutex_unlock( &h->lookahead->ifbuf.mutex );
    }
    //表示当前帧的计数
    h->i_frame++;
    /* 3: The picture is analyzed in the lookahead */
    if( !h->frames.current[0] )//是否需要进行前瞻分析。如果当前没有可用的帧,则调用 x264_lookahead_get_frames进行前瞻分析
        x264_lookahead_get_frames( h );
    //检查当前帧是否为空,并且前瞻队列是否为空。如果是,则调用 encoder_frame_end 结束帧编码过程,并返回相应的结果
    if( !h->frames.current[0] && x264_lookahead_is_empty( h ) )
        return encoder_frame_end( thread_oldest, thread_current, pp_nal, pi_nal, pic_out );

    /* ------------------- Get frame to be encoded ------------------------- */
    /* 4: get picture to encode *///代码调用 x264_frame_shift 获取要进行编码的帧 h->fenc
    h->fenc = x264_frame_shift( h->frames.current );
    //如果编码器参数中启用了分片线程(sliced threads),则代码会等待前一帧的重构过程完成
    /* If applicable, wait for previous frame reconstruction to finish */
    if( h->param.b_sliced_threads )
        if( threadpool_wait_all( h ) < 0 )
            return -1;
    //如果当前帧是第一帧(h->i_frame == 0),则将当前帧的 i_reordered_pts 赋值给 h->i_reordered_pts_delay
    if( h->i_frame == 0 )
        h->i_reordered_pts_delay = h->fenc->i_reordered_pts;
    if( h->reconfig )//如果编码器需要重新配置(h->reconfig == 1),则调用 x264_encoder_reconfig_apply 应用新的参数配置,并将 h->reconfig 设置为0
    {
        x264_encoder_reconfig_apply( h, &h->reconfig_h->param );
        h->reconfig = 0;
    }
    if( h->fenc->param )//如果当前帧的参数存在(h->fenc->param != NULL),则调用 x264_encoder_reconfig_apply 应用参数配置,并释放参数结构体
    {
        x264_encoder_reconfig_apply( h, h->fenc->param );
        if( h->fenc->param->param_free )
        {
            x264_param_cleanup( h->fenc->param );
            h->fenc->param->param_free( h->fenc->param );
            h->fenc->param = NULL;
        }
    }//代码调用 x264_ratecontrol_zone_init 初始化码率控制相关的数据结构
    x264_ratecontrol_zone_init( h );
    //调用 reference_update 更新参考帧列表
    // ok to call this before encoding any frames, since the initial values of fdec have b_kept_as_ref=0
    if( reference_update( h ) )
        return -1;
    h->fdec->i_lines_completed = -1;//将当前解码帧的 i_lines_completed 设置为-1,表示尚未完成解码
    //如果当前编码帧不是I帧,则代码检查是否还有有效的参考帧。如果没有有效的参考帧,则将当前帧设置为关键帧(I帧)
    if( !IS_X264_TYPE_I( h->fenc->i_type ) )
    {
        int valid_refs_left = 0;
        for( int i = 0; h->frames.reference[i]; i++ )
            if( !h->frames.reference[i]->b_corrupt )
                valid_refs_left++;
        /* No valid reference frames left: force an IDR. */
        if( !valid_refs_left )
        {
            h->fenc->b_keyframe = 1;
            h->fenc->i_type = X264_TYPE_IDR;
        }
    }
    //如果当前编码帧是关键帧,则更新最后一个关键帧的帧号和最后一个IDR帧的帧号
    if( h->fenc->b_keyframe )
    {
        h->frames.i_last_keyframe = h->fenc->i_frame;
        if( h->fenc->i_type == X264_TYPE_IDR )
        {
            h->i_frame_num = 0;
            h->frames.i_last_idr = h->fenc->i_frame;
        }
    }//代码将一些计数器和标志重置为初始值,为当前解码帧和编码帧设置 POC(Picture Order Count),根据帧号计算得出
    h->sh.i_mmco_command_count =
    h->sh.i_mmco_remove_from_end = 0;
    h->b_ref_reorder[0] =
    h->b_ref_reorder[1] = 0;
    h->fdec->i_poc =
    h->fenc->i_poc = 2 * ( h->fenc->i_frame - X264_MAX( h->frames.i_last_idr, 0 ) );
    //这段代码用于设置帧的上下文信息,并准备进行比特流的写入
    /* ------------------- Setup frame context ----------------------------- */
    /* 5: Init data dependent of frame type *///根据当前编码帧的类型(h->fenc->i_type),进行不同的设置和处理
    if( h->fenc->i_type == X264_TYPE_IDR )
    {   //如果当前编码帧是 IDR 帧(关键帧),则重置参考帧列表,设置相应的 NAL 单元类型和参考级别(i_nal_type 和 i_nal_ref_idc),将当前切片类型(h->sh.i_type)设置为 I 帧类型,设置最后一个开放 GOP 的 POC 值为-1
        /* reset ref pictures */
        i_nal_type    = NAL_SLICE_IDR;
        i_nal_ref_idc = NAL_PRIORITY_HIGHEST;
        h->sh.i_type = SLICE_TYPE_I;
        reference_reset( h );
        h->frames.i_poc_last_open_gop = -1;
    }
    else if( h->fenc->i_type == X264_TYPE_I )
    {   //如果当前编码帧是 I 帧,则设置相应的 NAL 单元类型和参考级别,将当前切片类型设置为 I 帧类型,重置参考层次结构,并根据是否开启开放 GOP 设置最后一个开放 GOP 的 POC 值
        i_nal_type    = NAL_SLICE;
        i_nal_ref_idc = NAL_PRIORITY_HIGH; /* Not completely true but for now it is (as all I/P are kept as ref)*/
        h->sh.i_type = SLICE_TYPE_I;
        reference_hierarchy_reset( h );
        if( h->param.b_open_gop )
            h->frames.i_poc_last_open_gop = h->fenc->b_keyframe ? h->fenc->i_poc : -1;
    }
    else if( h->fenc->i_type == X264_TYPE_P )
    {   //如果当前编码帧是 P 帧,则设置相应的 NAL 单元类型和参考级别,将当前切片类型设置为 P 帧类型,重置参考层次结构,并将最后一个开放 GOP 的 POC 值设置为-1
        i_nal_type    = NAL_SLICE;
        i_nal_ref_idc = NAL_PRIORITY_HIGH; /* Not completely true but for now it is (as all I/P are kept as ref)*/
        h->sh.i_type = SLICE_TYPE_P;
        reference_hierarchy_reset( h );
        h->frames.i_poc_last_open_gop = -1;
    }
    else if( h->fenc->i_type == X264_TYPE_BREF )
    {   //如果当前编码帧是 B 参考帧,则设置相应的 NAL 单元类型和参考级别,将当前切片类型设置为 B 帧类型,重置参考层次结构
        i_nal_type    = NAL_SLICE;
        i_nal_ref_idc = h->param.i_bframe_pyramid == X264_B_PYRAMID_STRICT ? NAL_PRIORITY_LOW : NAL_PRIORITY_HIGH;
        h->sh.i_type = SLICE_TYPE_B;
        reference_hierarchy_reset( h );
    }
    else    /* B frame */
    {   //如果当前编码帧是 B 帧,则设置相应的 NAL 单元类型和参考级别,将当前切片类型设置为 B 帧类型
        i_nal_type    = NAL_SLICE;
        i_nal_ref_idc = NAL_PRIORITY_DISPOSABLE;
        h->sh.i_type = SLICE_TYPE_B;
    }
    //将解码帧的类型和帧号设置为与编码帧相同
    h->fdec->i_type = h->fenc->i_type;
    h->fdec->i_frame = h->fenc->i_frame;
    h->fenc->b_kept_as_ref =
    h->fdec->b_kept_as_ref = i_nal_ref_idc != NAL_PRIORITY_DISPOSABLE && h->param.i_keyint_max > 1;//根据参考级别和最大关键帧间隔的设置,确定编码帧和解码帧是否保留为参考帧
    //代码将解码帧的宏块信息和释放函数设置为与编码帧相同,并清空编码帧的宏块信息和释放函数
    h->fdec->mb_info = h->fenc->mb_info;
    h->fdec->mb_info_free = h->fenc->mb_info_free;
    h->fenc->mb_info = NULL;
    h->fenc->mb_info_free = NULL;
    //代码将解码帧的呈现时间戳(PTS)设置为编码帧的PTS,并根据B帧延迟的设置,计算解码帧的解码时间戳(DTS)
    h->fdec->i_pts = h->fenc->i_pts;
    if( h->frames.i_bframe_delay )
    {
        int64_t *prev_reordered_pts = thread_current->frames.i_prev_reordered_pts;
        h->fdec->i_dts = h->i_frame > h->frames.i_bframe_delay
                       ? prev_reordered_pts[ (h->i_frame - h->frames.i_bframe_delay) % h->frames.i_bframe_delay ]
                       : h->fenc->i_reordered_pts - h->frames.i_bframe_delay_time;
        prev_reordered_pts[ h->i_frame % h->frames.i_bframe_delay ] = h->fenc->i_reordered_pts;
    }
    else
        h->fdec->i_dts = h->fenc->i_reordered_pts;
    if( h->fenc->i_type == X264_TYPE_IDR )//当前编码帧是 IDR 帧,则将最后一个 IDR 帧的PTS设置为解码帧的PTS
        h->i_last_idr_pts = h->fdec->i_pts;

    /* ------------------- Init                ----------------------------- */
    /* build ref list 0/1 *///代码调用 reference_build_list 构建参考帧列表
    reference_build_list( h, h->fdec->i_poc );

    /* ---------------------- Write the bitstream -------------------------- */
    /* Init bitstream context */
    if( h->param.b_sliced_threads )
    {   //如果启用了分片线程(sliced threads),则为每个线程初始化比特流上下文,并将相关的计数器重置为初始值
        for( int i = 0; i < h->param.i_threads; i++ )
        {
            bs_init( &h->thread[i]->out.bs, h->thread[i]->out.p_bitstream, h->thread[i]->out.i_bitstream );
            h->thread[i]->out.i_nal = 0;
        }
    }
    else
    {   //如果没有启用分片线程,则直接初始化主线程的比特流上下文,并将相关的计数器重置为初始值
        bs_init( &h->out.bs, h->out.p_bitstream, h->out.i_bitstream );
        h->out.i_nal = 0;
    }

    if( h->param.b_aud )
    {
        int pic_type;
        //根据当前切片类型(h->sh.i_type)确定 pic_type 的值,用于指示帧的类型。如果是 I 帧,则 pic_type 为 0,如果是 P 帧,则为 1,如果是 B 帧,则为 2,否则为 7
        if( h->sh.i_type == SLICE_TYPE_I )
            pic_type = 0;
        else if( h->sh.i_type == SLICE_TYPE_P )
            pic_type = 1;
        else if( h->sh.i_type == SLICE_TYPE_B )
            pic_type = 2;
        else
            pic_type = 7;
        //调用 nal_start 函数开始一个 AUD NAL 单元,设置 NAL 单元类型为 NAL_AUD,参考级别为 NAL_PRIORITY_DISPOSABLE
        nal_start( h, NAL_AUD, NAL_PRIORITY_DISPOSABLE );
        bs_write( &h->out.bs, 3, pic_type );//使用 bs_write 函数将 pic_type(占3位)写入比特流中
        bs_rbsp_trailing( &h->out.bs );//调用 bs_rbsp_trailing 函数处理比特流的尾部对齐
        bs_flush( &h->out.bs );//调用 bs_flush 函数将剩余的比特写入比特流
        if( nal_end( h ) )
            return -1;
        overhead += h->out.nal[h->out.i_nal-1].i_payload + NALU_OVERHEAD;//计算 AUD NAL 单元的开销(overhead),并将其添加到 overhead 变量中
    }
    //代码设置编码帧的 NAL 单元类型和参考级别为给定的
    h->i_nal_type = i_nal_type;
    h->i_nal_ref_idc = i_nal_ref_idc;
    //如果启用了帧内刷新(intra refresh),则根据编码帧类型进行不同的处理
    if( h->param.b_intra_refresh )
    {
        if( IS_X264_TYPE_I( h->fenc->i_type ) )
        {   //如果当前编码帧是 I 帧,将解码帧的 i_frames_since_pir 设置为 0,将解码帧的 f_pir_position 设置为宏块的宽度,将 b_queued_intra_refresh 设置为 0
            h->fdec->i_frames_since_pir = 0;
            h->b_queued_intra_refresh = 0;
            /* PIR is currently only supported with ref == 1, so any intra frame effectively refreshes
             * the whole frame and counts as an intra refresh. */
            h->fdec->f_pir_position = h->mb.i_mb_width;
        }
        else if( h->fenc->i_type == X264_TYPE_P )
        {
            int pocdiff = (h->fdec->i_poc - h->fref[0][0]->i_poc)/2;
            float increment = X264_MAX( ((float)h->mb.i_mb_width-1) / h->param.i_keyint_max, 1 );
            h->fdec->f_pir_position = h->fref[0][0]->f_pir_position;
            h->fdec->i_frames_since_pir = h->fref[0][0]->i_frames_since_pir + pocdiff;
            if( h->fdec->i_frames_since_pir >= h->param.i_keyint_max ||
                (h->b_queued_intra_refresh && h->fdec->f_pir_position + 0.5 >= h->mb.i_mb_width) )
            {
                h->fdec->f_pir_position = 0;
                h->fdec->i_frames_since_pir = 0;
                h->b_queued_intra_refresh = 0;
                h->fenc->b_keyframe = 1;
            }
            h->fdec->i_pir_start_col = h->fdec->f_pir_position+0.5;
            h->fdec->f_pir_position += increment * pocdiff;
            h->fdec->i_pir_end_col = h->fdec->f_pir_position+0.5;
            /* If our intra refresh has reached the right side of the frame, we're done. */
            if( h->fdec->i_pir_end_col >= h->mb.i_mb_width - 1 )
            {
                h->fdec->f_pir_position = h->mb.i_mb_width;
                h->fdec->i_pir_end_col = h->mb.i_mb_width - 1;
            }
        }
    }
    //如果编码帧的 b_keyframe 为真,表示当前帧是关键帧
    if( h->fenc->b_keyframe )
    {
        /* Write SPS and PPS *///如果参数 b_repeat_headers 为真,生成序列参数集(SPS)和图像参数集(PPS)
        if( h->param.b_repeat_headers )
        {   //对于 SPS,调用 nal_start 函数开始一个 SPS NAL 单元,设置 NAL 单元类型为 NAL_SPS,参考级别为 NAL_PRIORITY_HIGHEST。然后调用 x264_sps_write 函数将 SPS 写入到比特流中
            /* generate sequence parameters */
            nal_start( h, NAL_SPS, NAL_PRIORITY_HIGHEST );
            x264_sps_write( &h->out.bs, h->sps );
            if( nal_end( h ) )
                return -1;
            /* Pad AUD/SPS to 256 bytes like Panasonic */
            if( h->param.i_avcintra_class )//根据参数配置将 AUD/SPS 填充到256字节,计算填充的长度并赋值给 i_padding 字段
                h->out.nal[h->out.i_nal-1].i_padding = 256 - bs_pos( &h->out.bs ) / 8 - 2*NALU_OVERHEAD;
            overhead += h->out.nal[h->out.i_nal-1].i_payload + h->out.nal[h->out.i_nal-1].i_padding + NALU_OVERHEAD;//根据 SPS NAL 单元的负载和填充的长度计算开销,并将其添加到 overhead 变量中。
            //对于 PPS,调用 nal_start 函数开始一个 PPS NAL 单元,设置 NAL 单元类型为 NAL_PPS,参考级别为 NAL_PRIORITY_HIGHEST
            /* generate picture parameters */
            nal_start( h, NAL_PPS, NAL_PRIORITY_HIGHEST );
            x264_pps_write( &h->out.bs, h->sps, h->pps );//调用 x264_pps_write 函数将 PPS 写入到比特流中,需要提供 SPS 和 PPS 参数
            if( nal_end( h ) )
                return -1;
            if( h->param.i_avcintra_class )
            {
                int total_len = 256;//根据参数配置计算 PPS NAL 单元的填充长度,并赋值给 i_padding 字段
                /* Sony XAVC uses an oversized PPS instead of SEI padding */
                if( h->param.i_avcintra_flavor == X264_AVCINTRA_FLAVOR_SONY )
                    total_len += h->param.i_height >= 1080 ? 18*512 : 10*512;
                h->out.nal[h->out.i_nal-1].i_padding = total_len - h->out.nal[h->out.i_nal-1].i_payload - NALU_OVERHEAD;
            }//根据 PPS NAL 单元的负载和填充的长度计算开销,并将其添加到 overhead 变量中
            overhead += h->out.nal[h->out.i_nal-1].i_payload + h->out.nal[h->out.i_nal-1].i_padding + NALU_OVERHEAD;
        }
        //如果帧线程数为 1(i_thread_frames == 1)且 SPS 中指定了 HRD 参数(vui.b_nal_hrd_parameters_present 为真),则在 encoder_frame_end 中写入缓冲期间的 SEI(Supplemental Enhancement Information)
        /* when frame threading is used, buffering period sei is written in encoder_frame_end */
        if( h->i_thread_frames == 1 && h->sps->vui.b_nal_hrd_parameters_present )
        {   //调用 x264_hrd_fullness 函数计算 HRD 的满度。然后,调用 nal_start 函数开始一个 SEI NAL 单元,设置 NAL 单元类型为 NAL_SEI,参考级别为 NAL_PRIORITY_DISPOSABLE
            x264_hrd_fullness( h );
            nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
            x264_sei_buffering_period_write( h, &h->out.bs );//将缓冲期间的 SEI 写入比特流中
            if( nal_end( h ) )
               return -1;
            overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;
        }
    }

    /* write extra sei *///处理额外的 SEI(Supplemental Enhancement Information)信息
    for( int i = 0; i < h->fenc->extra_sei.num_payloads; i++ )
    {
        nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
        x264_sei_write( &h->out.bs, h->fenc->extra_sei.payloads[i].payload, h->fenc->extra_sei.payloads[i].payload_size,
                        h->fenc->extra_sei.payloads[i].payload_type );
        if( nal_end( h ) )
            return -1;
        overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;
        if( h->fenc->extra_sei.sei_free )
        {
            h->fenc->extra_sei.sei_free( h->fenc->extra_sei.payloads[i].payload );
            h->fenc->extra_sei.payloads[i].payload = NULL;
        }
    }

    if( h->fenc->extra_sei.sei_free )
    {   //调用该函数释放 SEI 负载的内存,并将负载指针设为 NULL
        h->fenc->extra_sei.sei_free( h->fenc->extra_sei.payloads );
        h->fenc->extra_sei.payloads = NULL;
        h->fenc->extra_sei.sei_free = NULL;
    }

    if( h->fenc->b_keyframe )//如果当前帧是关键帧
    {   //如果参数 b_repeat_headers 为真、当前帧是第一帧且不是 AVC-Intra 类型,则插入一个自定义的 SEI 用于标识编码器版本
        /* Avid's decoder strictly wants two SEIs for AVC-Intra so we can't insert the x264 SEI */
        if( h->param.b_repeat_headers && h->fenc->i_frame == 0 && !h->param.i_avcintra_class )
        {
            /* identify ourself */
            nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
            if( x264_sei_version_write( h, &h->out.bs ) )
                return -1;
            if( nal_end( h ) )
                return -1;
            overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;
        }
        //如果当前帧不是 IDR 帧,则插入一个恢复点 SEI
        if( h->fenc->i_type != X264_TYPE_IDR )
        {   //根据参数配置计算时间到恢复点的距离,并存储在 time_to_recovery 变量中
            int time_to_recovery = h->param.b_open_gop ? 0 : X264_MIN( h->mb.i_mb_width - 1, h->param.i_keyint_max ) + h->param.i_bframe - 1;
            nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
            x264_sei_recovery_point_write( h, &h->out.bs, time_to_recovery );//接着调用 x264_sei_recovery_point_write 函数将恢复点信息写入比特流中,需要提供时间到恢复点的距离
            if( nal_end( h ) )
                return -1;
            overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;
        }
        //如果参数配置中指定了主显示参数(mastering display),则插入一个主显示参数 SEI
        if( h->param.mastering_display.b_mastering_display )
        {
            nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
            x264_sei_mastering_display_write( h, &h->out.bs );
            if( nal_end( h ) )
                return -1;
            overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;
        }
        //如果参数配置中指定了(content light level),则插入一个 SEI
        if( h->param.content_light_level.b_cll )
        {
            nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
            x264_sei_content_light_level_write( h, &h->out.bs );
            if( nal_end( h ) )
                return -1;
            overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;
        }
        //如果参数配置中指定了alternative_transfer
        if( h->param.i_alternative_transfer != 2 )
        {
            nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
            x264_sei_alternative_transfer_write( h, &h->out.bs );
            if( nal_end( h ) )
                return -1;
            overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;
        }
    }

    if( h->param.i_frame_packing >= 0 && (h->fenc->b_keyframe || h->param.i_frame_packing == 5) )
    {
        nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
        x264_sei_frame_packing_write( h, &h->out.bs );
        if( nal_end( h ) )
            return -1;
        overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;
    }
    //如果 SPS 中指定了图片时间信息(b_pic_struct_present 为真)或者指定了 NAL HRD 参数(b_nal_hrd_parameters_present 为真),则插入一个图片时间 SEI
    /* generate sei pic timing */
    if( h->sps->vui.b_pic_struct_present || h->sps->vui.b_nal_hrd_parameters_present )
    {
        nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
        x264_sei_pic_timing_write( h, &h->out.bs );
        if( nal_end( h ) )
            return -1;
        overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;
    }
    //如果当前帧不是 B 帧且 h->b_sh_backup 为真,则插入一个解码参考图片标记 SEI
    /* As required by Blu-ray. */
    if( !IS_X264_TYPE_B( h->fenc->i_type ) && h->b_sh_backup )
    {
        h->b_sh_backup = 0;
        nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
        x264_sei_dec_ref_pic_marking_write( h, &h->out.bs );
        if( nal_end( h ) )
            return -1;
        overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;
    }

    if( h->fenc->b_keyframe && h->param.b_intra_refresh )
        h->i_cpb_delay_pir_offset_next = h->fenc->i_cpb_delay;

    /* Filler space: 10 or 18 SEIs' worth of space, depending on resolution */
    if( h->param.i_avcintra_class && h->param.i_avcintra_flavor != X264_AVCINTRA_FLAVOR_SONY )
    {
        /* Write an empty filler NAL to mimic the AUD in the P2 format*/
        nal_start( h, NAL_FILLER, NAL_PRIORITY_DISPOSABLE );
        x264_filler_write( h, &h->out.bs, 0 );
        if( nal_end( h ) )
            return -1;
        overhead += h->out.nal[h->out.i_nal-1].i_payload + NALU_OVERHEAD;

        /* All lengths are magic lengths that decoders expect to see */
        /* "UMID" SEI */
        nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
        if( x264_sei_avcintra_umid_write( h, &h->out.bs ) < 0 )
            return -1;
        if( nal_end( h ) )
            return -1;
        overhead += h->out.nal[h->out.i_nal-1].i_payload + SEI_OVERHEAD;

        int unpadded_len;
        int total_len;
        if( h->param.i_height == 1080 )
        {
            unpadded_len = 5780;
            total_len = 17*512;
        }
        else
        {
            unpadded_len = 2900;
            total_len = 9*512;
        }
        /* "VANC" SEI */
        nal_start( h, NAL_SEI, NAL_PRIORITY_DISPOSABLE );
        if( x264_sei_avcintra_vanc_write( h, &h->out.bs, unpadded_len ) < 0 )
            return -1;
        if( nal_end( h ) )
            return -1;
        //调整最后一个 SEI NAL 单元的填充长度,使其满足总长度为 total_len 的要求
        h->out.nal[h->out.i_nal-1].i_padding = total_len - h->out.nal[h->out.i_nal-1].i_payload - SEI_OVERHEAD;
        overhead += h->out.nal[h->out.i_nal-1].i_payload + h->out.nal[h->out.i_nal-1].i_padding + SEI_OVERHEAD;
    }

    /* Init the rate control *///初始化码率控制器,并传入初始的 QP(Quantization Parameter)值和开销值。
    /* FIXME: Include slice header bit cost. */
    x264_ratecontrol_start( h, h->fenc->i_qpplus1, overhead*8 );
    i_global_qp = x264_ratecontrol_qp( h );//然后调用 x264_ratecontrol_qp 函数获取全局的 QP 值

    pic_out->i_qpplus1 =
    h->fdec->i_qpplus1 = i_global_qp + 1;
    //如果码率控制器的统计信息已经读取并且当前帧不是关键帧
    if( h->param.rc.b_stat_read && h->sh.i_type != SLICE_TYPE_I )
    {   //构建最优的参考帧列表
        x264_reference_build_list_optimal( h );
        reference_check_reorder( h );//进行参考帧的重新排序
    }
    //如果参考帧列表中有参考帧存在
    if( h->i_ref[0] )
        h->fdec->i_poc_l0ref0 = h->fref[0][0]->i_poc;

    /* ------------------------ Create slice header  ----------------------- */
    slice_init( h, i_nal_type, i_global_qp );//创建一个切片头,并传入切片类型和全局 QP 值

    /*------------------------- Weights -------------------------------------*/
    if( h->sh.i_type == SLICE_TYPE_B )//如果当前帧的切片类型是 B 帧,则进行双向预测的初始化
        x264_macroblock_bipred_init( h );
    //进行加权预测的初始化
    weighted_pred_init( h );
    //根据当前帧的 NAL 参考级别确定是否增加帧号
    if( i_nal_ref_idc != NAL_PRIORITY_DISPOSABLE )
        h->i_frame_num++;
    //设置线程切片的起始和结束位置,并根据线程数的不同进行不同的编码方式
    /* Write frame */
    h->i_threadslice_start = 0;
    h->i_threadslice_end = h->mb.i_mb_height;
    if( h->i_thread_frames > 1 )
    {   //如果线程数大于 1,则调用线程池的 x264_threadpool_run 函数并传入 slices_write 函数进行多线程编码
        x264_threadpool_run( h->threadpool, (void*)slices_write, h );
        h->b_thread_active = 1;
    }
    else if( h->param.b_sliced_threads )
    {   //如果参数配置中启用了切片线程,则调用 threaded_slices_write 函数进行多线程切片编码
        if( threaded_slices_write( h ) )
            return -1;
    }
    else//如果以上两种情况都不满足,则调用 slices_write 函数进行单线程编码
        if( (intptr_t)slices_write( h ) )
            return -1;
    //返回 encoder_frame_end 函数的结果,完成整个编码过程
    return encoder_frame_end( thread_oldest, thread_current, pp_nal, pi_nal, pic_out );
}

6.关闭编码器x264_encoder_close

用于释放编码器使用的资源,代码分析如下:

void    x264_encoder_close  ( x264_t *h )
{   //计算 YUV 数据的大小并赋值给 i_yuv_size 变量
    int64_t i_yuv_size = FRAME_SIZE( h->param.i_width * h->param.i_height );
    int64_t i_mb_count_size[2][7] = {{0}};//计算 YUV 数据的大小并赋值给 i_yuv_size 变量
    char buf[200];
    int b_print_pcm = h->stat.i_mb_count[SLICE_TYPE_I][I_PCM]//用于判断是否需要打印 PCM 数据
                   || h->stat.i_mb_count[SLICE_TYPE_P][I_PCM]
                   || h->stat.i_mb_count[SLICE_TYPE_B][I_PCM];
    //调用 x264_lookahead_delete 函数释放预测模块使用的资
    x264_lookahead_delete( h );

#if HAVE_OPENCL
    x264_opencl_lookahead_delete( h );
    x264_opencl_function_t *ocl = h->opencl.ocl;
#endif
    //根据参数配置中的线程设置,依次释放线程池和前向预测线程池的资源
    if( h->param.b_sliced_threads )
        threadpool_wait_all( h );
    if( h->param.i_threads > 1 )//如果线程数大于 1,则调用 x264_threadpool_delete 函数释放线程池资源
        x264_threadpool_delete( h->threadpool );
    if( h->param.i_lookahead_threads > 1 )
        x264_threadpool_delete( h->lookaheadpool );
    if( h->i_thread_frames > 1 )
    {   //如果编码器的线程帧数大于 1,则遍历线程数组,如果线程的 b_thread_active 标志为真,则删除线程的编码帧
        for( int i = 0; i < h->i_thread_frames; i++ )
            if( h->thread[i]->b_thread_active )
            {
                assert( h->thread[i]->fenc->i_reference_count == 1 );
                x264_frame_delete( h->thread[i]->fenc );
            }

        x264_t *thread_prev = h->thread[h->i_thread_phase];
        x264_thread_sync_ratecontrol( h, thread_prev, h );//函数进行速率控制的同步
        x264_thread_sync_ratecontrol( thread_prev, thread_prev, h );
        h->i_frame = thread_prev->i_frame + 1 - h->i_thread_frames;//更新编码器的帧号为前一个线程的帧号加一减去线程帧数
    }
    h->i_frame++;//将编码器的帧号加一

    /* Slices used and PSNR *///通过一个循环遍历三种切片类型:I 帧、P 帧和 B 帧
    for( int i = 0; i < 3; i++ )
    {
        static const uint8_t slice_order[] = { SLICE_TYPE_I, SLICE_TYPE_P, SLICE_TYPE_B };
        int i_slice = slice_order[i];
        //在每个切片类型的统计信息中,如果帧的数量大于零,则获取帧的数量和帧的时长
        if( h->stat.i_frame_count[i_slice] > 0 )
        {
            int i_count = h->stat.i_frame_count[i_slice];
            double dur =  h->stat.f_frame_duration[i_slice];
            if( h->param.analyse.b_psnr )
            {   //如果参数配置中启用了 PSNR(峰值信噪比)分析,则输出帧的平均 QP(量化参数)、大小和 PSNR 值
                x264_log( h, X264_LOG_INFO,
                          "frame %c:%-5d Avg QP:%5.2f  size:%6.0f  PSNR Mean Y:%5.2f U:%5.2f V:%5.2f Avg:%5.2f Global:%5.2f\n",
                          slice_type_to_char[i_slice],
                          i_count,
                          h->stat.f_frame_qp[i_slice] / i_count,
                          (double)h->stat.i_frame_size[i_slice] / i_count,
                          h->stat.f_psnr_mean_y[i_slice] / dur, h->stat.f_psnr_mean_u[i_slice] / dur, h->stat.f_psnr_mean_v[i_slice] / dur,
                          h->stat.f_psnr_average[i_slice] / dur,
                          calc_psnr( h->stat.f_ssd_global[i_slice], dur * i_yuv_size ) );//计算方法是使用全局 SSD(Sum of Squared Differences)值除以 YUV 数据的总大小并乘以时长
            }
            else
            {   //如果没有启用 PSNR 分析,则只输出帧的平均 QP 和大小
                x264_log( h, X264_LOG_INFO,
                          "frame %c:%-5d Avg QP:%5.2f  size:%6.0f\n",
                          slice_type_to_char[i_slice],
                          i_count,
                          h->stat.f_frame_qp[i_slice] / i_count,
                          (double)h->stat.i_frame_size[i_slice] / i_count );
            }
        }
    }
    if( h->param.i_bframe && h->stat.i_frame_count[SLICE_TYPE_B] )
    {   //如果编码器参数中启用了 B 帧(双向预测帧)且存在 B 帧的统计信息
        char *p = buf;
        int den = 0;
        // weight by number of frames (including the I/P-frames) that are in a sequence of N B-frames
        for( int i = 0; i <= h->param.i_bframe; i++ )
            den += (i+1) * h->stat.i_consecutive_bframes[i];
        for( int i = 0; i <= h->param.i_bframe; i++ )
            p += sprintf( p, " %4.1f%%", 100. * (i+1) * h->stat.i_consecutive_bframes[i] / den );
        x264_log( h, X264_LOG_INFO, "consecutive B-frames:%s\n", buf );//将连续 B 帧的百分比信息打印到日志中
    }

    for( int i_type = 0; i_type < 2; i_type++ )
        for( int i = 0; i < X264_PARTTYPE_MAX; i++ )
        {   //如果当前宏块类型为 D_DIRECT_8x8,则跳过该循环(因为直接预测帧类型是单独计算的)
            if( i == D_DIRECT_8x8 ) continue; /* direct is counted as its own type */
            i_mb_count_size[i_type][x264_mb_partition_pixel_table[i]] += h->stat.i_mb_partition[i_type][i];//将宏块分区的数量累加到对应的类型和大小的计数器中
        }

    /* MB types used *///分别处理三种切片类型的宏块信息
    if( h->stat.i_frame_count[SLICE_TYPE_I] > 0 )
    {   //如果 I 帧的数量大于零,则根据统计信息生成 I 帧宏块的字符串表示,并打印到日志中
        int64_t *i_mb_count = h->stat.i_mb_count[SLICE_TYPE_I];
        double i_count = (double)h->stat.i_frame_count[SLICE_TYPE_I] * h->mb.i_mb_count / 100.0;
        print_intra( i_mb_count, i_count, b_print_pcm, buf );
        x264_log( h, X264_LOG_INFO, "mb I  %s\n", buf );
    }
    if( h->stat.i_frame_count[SLICE_TYPE_P] > 0 )
    {   //如果 P 帧的数量大于零,则根据统计信息生成 P 帧宏块的字符串表示,并打印到日志中。同时,还打印了 P 帧宏块的分区类型的百分比信息
        int64_t *i_mb_count = h->stat.i_mb_count[SLICE_TYPE_P];
        double i_count = (double)h->stat.i_frame_count[SLICE_TYPE_P] * h->mb.i_mb_count / 100.0;
        int64_t *i_mb_size = i_mb_count_size[SLICE_TYPE_P];
        print_intra( i_mb_count, i_count, b_print_pcm, buf );
        x264_log( h, X264_LOG_INFO,
                  "mb P  %s  P16..4: %4.1f%% %4.1f%% %4.1f%% %4.1f%% %4.1f%%    skip:%4.1f%%\n",
                  buf,
                  i_mb_size[PIXEL_16x16] / (i_count*4),
                  (i_mb_size[PIXEL_16x8] + i_mb_size[PIXEL_8x16]) / (i_count*4),
                  i_mb_size[PIXEL_8x8] / (i_count*4),
                  (i_mb_size[PIXEL_8x4] + i_mb_size[PIXEL_4x8]) / (i_count*4),
                  i_mb_size[PIXEL_4x4] / (i_count*4),
                  i_mb_count[P_SKIP] / i_count );
    }
    if( h->stat.i_frame_count[SLICE_TYPE_B] > 0 )
    {   //如果 B 帧的数量大于零,则根据统计信息生成 B 帧宏块的字符串表示,并打印到日志中。同时,还打印了 B 帧宏块的分区类型、直接模式和跳过模式的百分比信息
        int64_t *i_mb_count = h->stat.i_mb_count[SLICE_TYPE_B];
        double i_count = (double)h->stat.i_frame_count[SLICE_TYPE_B] * h->mb.i_mb_count / 100.0;
        double i_mb_list_count;
        int64_t *i_mb_size = i_mb_count_size[SLICE_TYPE_B];
        int64_t list_count[3] = {0}; /* 0 == L0, 1 == L1, 2 == BI */
        print_intra( i_mb_count, i_count, b_print_pcm, buf );
        for( int i = 0; i < X264_PARTTYPE_MAX; i++ )
            for( int j = 0; j < 2; j++ )
            {
                int l0 = x264_mb_type_list_table[i][0][j];
                int l1 = x264_mb_type_list_table[i][1][j];
                if( l0 || l1 )
                    list_count[l1+l0*l1] += h->stat.i_mb_count[SLICE_TYPE_B][i] * 2;
            }
        list_count[0] += h->stat.i_mb_partition[SLICE_TYPE_B][D_L0_8x8];
        list_count[1] += h->stat.i_mb_partition[SLICE_TYPE_B][D_L1_8x8];
        list_count[2] += h->stat.i_mb_partition[SLICE_TYPE_B][D_BI_8x8];
        i_mb_count[B_DIRECT] += (h->stat.i_mb_partition[SLICE_TYPE_B][D_DIRECT_8x8]+2)/4;
        i_mb_list_count = (list_count[0] + list_count[1] + list_count[2]) / 100.0;
        sprintf( buf + strlen(buf), "  B16..8: %4.1f%% %4.1f%% %4.1f%%  direct:%4.1f%%  skip:%4.1f%%",
                 i_mb_size[PIXEL_16x16] / (i_count*4),
                 (i_mb_size[PIXEL_16x8] + i_mb_size[PIXEL_8x16]) / (i_count*4),
                 i_mb_size[PIXEL_8x8] / (i_count*4),
                 i_mb_count[B_DIRECT] / i_count,
                 i_mb_count[B_SKIP]   / i_count );
        if( i_mb_list_count != 0 )
            sprintf( buf + strlen(buf), "  L0:%4.1f%% L1:%4.1f%% BI:%4.1f%%",
                     list_count[0] / i_mb_list_count,
                     list_count[1] / i_mb_list_count,
                     list_count[2] / i_mb_list_count );
        x264_log( h, X264_LOG_INFO, "mb B  %s\n", buf );
    }
    //调用 x264_ratecontrol_summary 函数打印速率控制的摘要信息
    x264_ratecontrol_summary( h );
    //首先检查不同切片类型(I、P和B)的帧数之和是否大于零。如果是,则继续进行计算和日志记录
    if( h->stat.i_frame_count[SLICE_TYPE_I] + h->stat.i_frame_count[SLICE_TYPE_P] + h->stat.i_frame_count[SLICE_TYPE_B] > 0 )
    {
#define SUM3(p) (p[SLICE_TYPE_I] + p[SLICE_TYPE_P] + p[SLICE_TYPE_B])
#define SUM3b(p,o) (p[SLICE_TYPE_I][o] + p[SLICE_TYPE_P][o] + p[SLICE_TYPE_B][o])
        int64_t i_i8x8 = SUM3b( h->stat.i_mb_count, I_8x8 );//通过SUM3b宏计算得到的h->stat.i_mb_count数组中I_8x8切片类型的值之和
        int64_t i_intra = i_i8x8 + SUM3b( h->stat.i_mb_count, I_4x4 )//通过对h->stat.i_mb_count数组中的不同切片类型进行求和得到的值。包括I_8x8、I_4x4和I_16x16切片类型
                                 + SUM3b( h->stat.i_mb_count, I_16x16 );
        int64_t i_all_intra = i_intra + SUM3b( h->stat.i_mb_count, I_PCM );//将i_intra值与h->stat.i_mb_count数组中I_PCM切片类型的值之和相加得到的值
        int64_t i_skip = SUM3b( h->stat.i_mb_count, P_SKIP )//将h->stat.i_mb_count数组中P_SKIP和B_SKIP切片类型的值之和得到的值
                       + SUM3b( h->stat.i_mb_count, B_SKIP );
        const int i_count = h->stat.i_frame_count[SLICE_TYPE_I] +//不同切片类型的帧数之和
                            h->stat.i_frame_count[SLICE_TYPE_P] +
                            h->stat.i_frame_count[SLICE_TYPE_B];
        int64_t i_mb_count = (int64_t)i_count * h->mb.i_mb_count;//将帧数乘以每帧的宏块数得到的值
        int64_t i_inter = i_mb_count - i_skip - i_all_intra;//通过减去i_skip和i_all_intra的值得到的i_mb_count值
        const double duration = h->stat.f_frame_duration[SLICE_TYPE_I] +//不同切片类型的帧持续时间之和
                                h->stat.f_frame_duration[SLICE_TYPE_P] +
                                h->stat.f_frame_duration[SLICE_TYPE_B];
        float f_bitrate = SUM3(h->stat.i_frame_size) / duration / 125;//通过对h->stat.i_frame_size数组进行求和,再除以持续时间和常数得到的比特率值

        if( PARAM_INTERLACED )
        {   //如果参数 PARAM_INTERLACED 为真,则生成关于场(field)的统计信息
            char *fieldstats = buf;
            fieldstats[0] = 0;
            if( i_inter )
                fieldstats += sprintf( fieldstats, " inter:%.1f%%", h->stat.i_mb_field[1] * 100.0 / i_inter );
            if( i_skip )
                fieldstats += sprintf( fieldstats, " skip:%.1f%%", h->stat.i_mb_field[2] * 100.0 / i_skip );
            x264_log( h, X264_LOG_INFO, "field mbs: intra: %.1f%%%s\n",
                      h->stat.i_mb_field[0] * 100.0 / i_all_intra, buf );
        }

        if( h->pps->b_transform_8x8_mode )
        {   //如果参数 h->pps->b_transform_8x8_mode 为真(非零),则生成关于8x8变换的统计信息
            buf[0] = 0;
            if( h->stat.i_mb_count_8x8dct[0] )
                sprintf( buf, " inter:%.1f%%", 100. * h->stat.i_mb_count_8x8dct[1] / h->stat.i_mb_count_8x8dct[0] );
            x264_log( h, X264_LOG_INFO, "8x8 transform intra:%.1f%%%s\n", 100. * i_i8x8 / X264_MAX( i_intra, 1 ), buf );
        }

        if( (h->param.analyse.i_direct_mv_pred == X264_DIRECT_PRED_AUTO ||
            (h->stat.i_direct_frames[0] && h->stat.i_direct_frames[1]))
            && h->stat.i_frame_count[SLICE_TYPE_B] )
        {
            x264_log( h, X264_LOG_INFO, "direct mvs  spatial:%.1f%% temporal:%.1f%%\n",
                      h->stat.i_direct_frames[1] * 100. / h->stat.i_frame_count[SLICE_TYPE_B],
                      h->stat.i_direct_frames[0] * 100. / h->stat.i_frame_count[SLICE_TYPE_B] );
        }

        buf[0] = 0;//创建字符数组 buf,并将其初始化为空字符串
        if( CHROMA_FORMAT )
        {   //CHROMA_FORMAT 为真(非零),则生成关于色度(chroma)的统计信息
            int csize = CHROMA444 ? 4 : 1;
            if( i_mb_count != i_all_intra )
                sprintf( buf, " inter: %.1f%% %.1f%% %.1f%%",
                         h->stat.i_mb_cbp[1] * 100.0 / ((i_mb_count - i_all_intra)*4),
                         h->stat.i_mb_cbp[3] * 100.0 / ((i_mb_count - i_all_intra)*csize),
                         h->stat.i_mb_cbp[5] * 100.0 / ((i_mb_count - i_all_intra)*csize) );
            x264_log( h, X264_LOG_INFO, "coded y,%s,%s intra: %.1f%% %.1f%% %.1f%%%s\n",
                      CHROMA444?"u":"uvDC", CHROMA444?"v":"uvAC",
                      h->stat.i_mb_cbp[0] * 100.0 / (i_all_intra*4),
                      h->stat.i_mb_cbp[2] * 100.0 / (i_all_intra*csize),
                      h->stat.i_mb_cbp[4] * 100.0 / (i_all_intra*csize), buf );
        }
        else
        {   //如果 CHROMA_FORMAT 不为真(零),则生成关于亮度(luma)的统计信息
            if( i_mb_count != i_all_intra )
                sprintf( buf, " inter: %.1f%%", h->stat.i_mb_cbp[1] * 100.0 / ((i_mb_count - i_all_intra)*4) );
            x264_log( h, X264_LOG_INFO, "coded y intra: %.1f%%%s\n",
                      h->stat.i_mb_cbp[0] * 100.0 / (i_all_intra*4), buf );
        }
        //定义一个二维整数数组 fixed_pred_modes,用于存储预测模式的统计数据。数组的大小为 4x9,初始值都为 0。定义一个一维整数数组 sum_pred_modes,用于存储每个预测模式类别的总数,初始值都为 0
        int64_t fixed_pred_modes[4][9] = {{0}};
        int64_t sum_pred_modes[4] = {0};
        for( int i = 0; i <= I_PRED_16x16_DC_128; i++ )
        {   //第一个 for 循环用于统计 16x16 宏块的预测模式。循环遍历 I_PRED_16x16_DC_128(表示预测模式的枚举值),将统计数据加到对应的 fixed_pred_modes 和 sum_pred_modes 中
            fixed_pred_modes[0][x264_mb_pred_mode16x16_fix[i]] += h->stat.i_mb_pred_mode[0][i];
            sum_pred_modes[0] += h->stat.i_mb_pred_mode[0][i];
        }
        if( sum_pred_modes[0] )//如果 sum_pred_modes[0] 不为 0,则计算并输出关于 16x16 宏块预测模式的统计信息。使用 x264_log 函数将各个预测模式的百分比输出到日志中
            x264_log( h, X264_LOG_INFO, "i16 v,h,dc,p: %2.0f%% %2.0f%% %2.0f%% %2.0f%%\n",
                      fixed_pred_modes[0][0] * 100.0 / sum_pred_modes[0],
                      fixed_pred_modes[0][1] * 100.0 / sum_pred_modes[0],
                      fixed_pred_modes[0][2] * 100.0 / sum_pred_modes[0],
                      fixed_pred_modes[0][3] * 100.0 / sum_pred_modes[0] );
        for( int i = 1; i <= 2; i++ )
        {
            for( int j = 0; j <= I_PRED_8x8_DC_128; j++ )
            {   //用于统计 4x4 宏块的预测模式。循环遍历 I_PRED_8x8_DC_128(表示预测模式的枚举值),将统计数据加到对应的 fixed_pred_modes 和 sum_pred_modes 中
                fixed_pred_modes[i][x264_mb_pred_mode4x4_fix(j)] += h->stat.i_mb_pred_mode[i][j];
                sum_pred_modes[i] += h->stat.i_mb_pred_mode[i][j];
            }
            if( sum_pred_modes[i] )//计算并输出关于 4x4 宏块预测模式的统计信息。使用 x264_log 函数将各个预测模式的百分比输出到日志中
                x264_log( h, X264_LOG_INFO, "i%d v,h,dc,ddl,ddr,vr,hd,vl,hu: %2.0f%% %2.0f%% %2.0f%% %2.0f%% %2.0f%% %2.0f%% %2.0f%% %2.0f%% %2.0f%%\n", (3-i)*4,
                          fixed_pred_modes[i][0] * 100.0 / sum_pred_modes[i],
                          fixed_pred_modes[i][1] * 100.0 / sum_pred_modes[i],
                          fixed_pred_modes[i][2] * 100.0 / sum_pred_modes[i],
                          fixed_pred_modes[i][3] * 100.0 / sum_pred_modes[i],
                          fixed_pred_modes[i][4] * 100.0 / sum_pred_modes[i],
                          fixed_pred_modes[i][5] * 100.0 / sum_pred_modes[i],
                          fixed_pred_modes[i][6] * 100.0 / sum_pred_modes[i],
                          fixed_pred_modes[i][7] * 100.0 / sum_pred_modes[i],
                          fixed_pred_modes[i][8] * 100.0 / sum_pred_modes[i] );
        }
        for( int i = 0; i <= I_PRED_CHROMA_DC_128; i++ )
        {   //用于统计色度(chroma)宏块的预测模式。循环遍历 I_PRED_CHROMA_DC_128(表示预测模式的枚举值),将统计数据加到对应的 fixed_pred_modes 和 sum_pred_modes 中
            fixed_pred_modes[3][x264_mb_chroma_pred_mode_fix[i]] += h->stat.i_mb_pred_mode[3][i];
            sum_pred_modes[3] += h->stat.i_mb_pred_mode[3][i];
        }
        if( sum_pred_modes[3] && !CHROMA444 )//则计算并输出关于色度宏块预测模式的统计信息。使用 x264_log 函数将各个预测模式的百分比输出到日志中
            x264_log( h, X264_LOG_INFO, "i8c dc,h,v,p: %2.0f%% %2.0f%% %2.0f%% %2.0f%%\n",
                      fixed_pred_modes[3][0] * 100.0 / sum_pred_modes[3],
                      fixed_pred_modes[3][1] * 100.0 / sum_pred_modes[3],
                      fixed_pred_modes[3][2] * 100.0 / sum_pred_modes[3],
                      fixed_pred_modes[3][3] * 100.0 / sum_pred_modes[3] );

        if( h->param.analyse.i_weighted_pred >= X264_WEIGHTP_SIMPLE && h->stat.i_frame_count[SLICE_TYPE_P] > 0 )
        {   //则生成关于加权预测帧的统计信息。将字符串 buf 的第一个字符设为 0,然后根据 CHROMA_FORMAT 的值,将字符串 " UV:%.1f%%" 追加到 buf 中。最后,使用 x264_log 函数将加权预测帧的百分比输出到日志中
            buf[0] = 0;
            if( CHROMA_FORMAT )
                sprintf( buf, " UV:%.1f%%", h->stat.i_wpred[1] * 100.0 / h->stat.i_frame_count[SLICE_TYPE_P] );
            x264_log( h, X264_LOG_INFO, "Weighted P-Frames: Y:%.1f%%%s\n",
                      h->stat.i_wpred[0] * 100.0 / h->stat.i_frame_count[SLICE_TYPE_P], buf );
        }
        //第一个嵌套的 for 循环用于统计参考帧的使用情况。循环遍历 i_list 和 i_slice,其中 i_list 表示参考帧列表,i_slice 表示切片
        for( int i_list = 0; i_list < 2; i_list++ )
            for( int i_slice = 0; i_slice < 2; i_slice++ )
            {
                char *p = buf;
                int64_t i_den = 0;
                int i_max = 0;
                for( int i = 0; i < X264_REF_MAX*2; i++ )
                    if( h->stat.i_mb_count_ref[i_slice][i_list][i] )
                    {
                        i_den += h->stat.i_mb_count_ref[i_slice][i_list][i];
                        i_max = i;
                    }
                if( i_max == 0 )
                    continue;
                for( int i = 0; i <= i_max; i++ )//使用 sprintf 函数将参考帧的使用百分比追加到字符串 buf 中
                    p += sprintf( p, " %4.1f%%", 100. * h->stat.i_mb_count_ref[i_slice][i_list][i] / i_den );
                x264_log( h, X264_LOG_INFO, "ref %c L%d:%s\n", "PB"[i_slice], i_list, buf );
            }

        if( h->param.analyse.b_ssim )
        {   //计算并输出 SSIM 的平均值。首先计算 h->stat.f_ssim_mean_y 数组的元素之和,并除以 duration 的值得到平均值
            float ssim = SUM3( h->stat.f_ssim_mean_y ) / duration;
            x264_log( h, X264_LOG_INFO, "SSIM Mean Y:%.7f (%6.3fdb)\n", ssim, calc_ssim_db( ssim ) );
        }
        if( h->param.analyse.b_psnr )
        {   //计算并输出 PSNR 的各项指标。使用 x264_log 函数将平均亮度、平均色度、平均值、全局值和码率输出到日志中
            x264_log( h, X264_LOG_INFO,
                      "PSNR Mean Y:%6.3f U:%6.3f V:%6.3f Avg:%6.3f Global:%6.3f kb/s:%.2f\n",
                      SUM3( h->stat.f_psnr_mean_y ) / duration,
                      SUM3( h->stat.f_psnr_mean_u ) / duration,
                      SUM3( h->stat.f_psnr_mean_v ) / duration,
                      SUM3( h->stat.f_psnr_average ) / duration,
                      calc_psnr( SUM3( h->stat.f_ssd_global ), duration * i_yuv_size ),
                      f_bitrate );
        }
        else
            x264_log( h, X264_LOG_INFO, "kb/s:%.2f\n", f_bitrate );
    }

    /* rc *///释放与比特率控制相关的资源
    x264_ratecontrol_delete( h );

    /* param *///清理和释放与参数相关的资源
    x264_param_cleanup( &h->param );

    x264_cqm_delete( h );//释放颜色量化矩阵(CQM)相关的资源
    x264_free( h->nal_buffer );//释放 h->nal_buffer、h->reconfig_h、h->cost_table 等资源
    x264_free( h->reconfig_h );
    x264_analyse_free_costs( h );
    x264_free( h->cost_table );

    if( h->i_thread_frames > 1 )
        h = h->thread[h->i_thread_phase];

    /* frames *///释放未使用的帧列表、当前帧列表和空白未使用帧列表相关的资源
    x264_frame_delete_list( h->frames.unused[0] );
    x264_frame_delete_list( h->frames.unused[1] );
    x264_frame_delete_list( h->frames.current );
    x264_frame_delete_list( h->frames.blank_unused );

    h = h->thread[0];//将 h 的指针指向 h->thread[0]
    //使用嵌套的两个 for 循环,遍历线程和参考帧,如果参考帧是复制的,则调用 x264_frame_delete 函数删除复制的参考帧
    for( int i = 0; i < h->i_thread_frames; i++ )
        if( h->thread[i]->b_thread_active )
            for( int j = 0; j < h->thread[i]->i_ref[0]; j++ )
                if( h->thread[i]->fref[0][j] && h->thread[i]->fref[0][j]->b_duplicate )
                    x264_frame_delete( h->thread[i]->fref[0][j] );
    //如果 h->param.i_lookahead_threads 大于 1,则遍历 h->lookahead_thread 数组,使用 x264_free 函数释放相关资源
    if( h->param.i_lookahead_threads > 1 )
        for( int i = 0; i < h->param.i_lookahead_threads; i++ )
            x264_free( h->lookahead_thread[i] );

    for( int i = h->param.i_threads - 1; i >= 0; i-- )
    {   //使用逆序的 for 循环,从最后一个线程开始遍历到第一个线程
        x264_frame_t **frame;
        //在每个线程中,首先检查 h->param.b_sliced_threads 是否为真,或者当前线程是否是第一个线程(i == 0)
        if( !h->param.b_sliced_threads || i == 0 )
        {
            for( frame = h->thread[i]->frames.reference; *frame; frame++ )
            {   //遍历当前线程的参考帧列表 h->thread[i]->frames.reference,并递减每个参考帧的引用计数。如果引用计数减为零,则调用 x264_frame_delete 函数删除参考帧
                assert( (*frame)->i_reference_count > 0 );
                (*frame)->i_reference_count--;
                if( (*frame)->i_reference_count == 0 )
                    x264_frame_delete( *frame );
            }
            frame = &h->thread[i]->fdec;
            if( *frame )
            {   //检查当前线程的解码帧 h->thread[i]->fdec 是否存在。如果存在,则递减解码帧的引用计数,并在引用计数减为零时调用 x264_frame_delete 函数删除解码帧
                assert( (*frame)->i_reference_count > 0 );
                (*frame)->i_reference_count--;
                if( (*frame)->i_reference_count == 0 )
                    x264_frame_delete( *frame );
            }//调用 x264_macroblock_cache_free 函数释放当前线程的宏块缓存资源
            x264_macroblock_cache_free( h->thread[i] );
        }//调用 x264_macroblock_thread_free 函数释放当前线程的宏块线程资源
        x264_macroblock_thread_free( h->thread[i], 0 );
        x264_free( h->thread[i]->out.p_bitstream );//使用 x264_free 函数释放当前线程的输出比特流和NAL单元缓冲区资源
        x264_free( h->thread[i]->out.nal );
        x264_pthread_mutex_destroy( &h->thread[i]->mutex );
        x264_pthread_cond_destroy( &h->thread[i]->cv );
        x264_free( h->thread[i] );
    }
#if HAVE_OPENCL
    x264_opencl_close_library( ocl );
#endif
}

三、完整x264编码demo

完整的工程如下:基于x264的视频编码器demo

以下是完整的x264对yuv进行编码对应的代码:

#include <stdio.h>
#include <stdlib.h>
#include <x264.h>

int main(int argc, char** argv){
    int ret;
    FILE* fp_input = NULL;
    FILE* fp_output = NULL;  
    x264_param_t x264_param;
    //1.编码器参数初始化
    x264_param_default( &x264_param );
    x264_param_default_preset(&x264_param, "ultafast", "zerolatecy"); 
    x264_param.i_width = 480;
    x264_param.i_height = 360;
    x264_param.i_bitdepth = 8;
    x264_param.i_csp = X264_CSP_I420;
    x264_param.b_repeat_headers = 1;
    x264_param.b_annexb = 1;
    //编码帧率
    x264_param.i_fps_num= 25;      
    x264_param.i_fps_den = 1;
 
    x264_param_apply_profile(&x264_param, x264_profile_names[0]);
    //输入YUV
    fp_input = fopen("./input_480x360.yuv", "rb");
    //输出地址
    fp_output = fopen("./output.h264", "wb");
    if(fp_input==NULL||fp_output==NULL)
    {
        return -1;
    }
    //2.打开编码器
    x264_t* p_encoder=NULL;
    p_encoder = x264_encoder_open(&x264_param);
    if(p_encoder==NULL){
        printf("x264_encoder_open fail\n");
        return 0;
    }
    //3.填写输入图片信息
    x264_picture_t pic;
    x264_picture_t pic_out;
    if (x264_picture_alloc( &pic, x264_param.i_csp, x264_param.i_width, x264_param.i_height ) < 0)
    {
        printf("x264_picture_alloc fail.\n");
    }
    
    int frame_pix = x264_param.i_width * x264_param.i_height;
    //计算输入yuv的总数量
    int frame_num = 0;
    fseek(fp_input, 0, SEEK_END);
    frame_num = ftell(fp_input)/(frame_pix * 3 / 2);
    fseek(fp_input,0,SEEK_SET);
    //4.进行编码
    x264_nal_t* p_nals=NULL;
    int nal_num = 0;
    int i_frame_size;
    for(int i = 0;i < frame_num; i++)
    {
        fread(pic.img.plane[0], 1, frame_pix,fp_input);    //Y
        fread(pic.img.plane[1], 1, frame_pix/4,fp_input);  //U
        fread(pic.img.plane[2], 1, frame_pix/4,fp_input);  //V
        pic.i_pts = i;
        i_frame_size = x264_encoder_encode(p_encoder, &p_nals, &nal_num, &pic, &pic_out); 
        if(i_frame_size < 0) 
        {
            printf("error, i_frame_size < 0\n");
        }
        else if(i_frame_size)
        {
            printf("encode frame\n");
            fwrite(p_nals->p_payload, 1, i_frame_size, fp_output);
        }
    }
    while(x264_encoder_delayed_frames( p_encoder ))
    {
        i_frame_size = x264_encoder_encode(p_encoder, &p_nals, &nal_num, NULL, &pic_out);
        if(i_frame_size < 0) 
        {
            printf("Flush error, i_frame_size < 0\n");
        }
        else if(i_frame_size)
        {
            printf("Flush frame\n");
            fwrite(p_nals->p_payload, 1, i_frame_size, fp_output);
        }
    }
    //5.释放内存,关闭编码器
    fclose(fp_input);
    fclose(fp_output);
    x264_encoder_close(p_encoder);
    x264_picture_clean(&pic);
    return 0;
}

点赞、收藏,会是我继续写作的动力!赠人玫瑰,手有余香

  • 24
    点赞
  • 28
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值