一个完整的播放器,少不了会有很多的默认设置或者提供给用户设置的选项。而IjkPlayer 同样也用。
从Java层设置IjkPlayer播放器选项的流程请看这篇文章:https://www.jianshu.com/p/ab405e5bc1de 。
本篇记录上面博客中ff_ffplay_options.ffp_context_options中没解释的一些选项:
{ "max-buffer-size", "max buffer size should be pre-read",
OPTION_OFFSET(dcc.max_buffer_size), OPTION_INT(MAX_QUEUE_SIZE, 0, MAX_QUEUE_SIZE) },
{ "min-frames", "minimal frames to stop pre-reading",
OPTION_OFFSET(dcc.min_frames), OPTION_INT(DEFAULT_MIN_FRAMES, MIN_MIN_FRAMES, MAX_MIN_FRAMES) },
{ "first-high-water-mark-ms", "first chance to wakeup read_thread",
OPTION_OFFSET(dcc.first_high_water_mark_in_ms),
OPTION_INT(DEFAULT_FIRST_HIGH_WATER_MARK_IN_MS,
DEFAULT_FIRST_HIGH_WATER_MARK_IN_MS,
DEFAULT_LAST_HIGH_WATER_MARK_IN_MS) },
{ "next-high-water-mark-ms", "second chance to wakeup read_thread",
OPTION_OFFSET(dcc.next_high_water_mark_in_ms),
OPTION_INT(DEFAULT_NEXT_HIGH_WATER_MARK_IN_MS,
DEFAULT_FIRST_HIGH_WATER_MARK_IN_MS,
DEFAULT_LAST_HIGH_WATER_MARK_IN_MS) },
{ "last-high-water-mark-ms", "last chance to wakeup read_thread",
OPTION_OFFSET(dcc.last_high_water_mark_in_ms),
OPTION_INT(DEFAULT_LAST_HIGH_WATER_MARK_IN_MS,
DEFAULT_FIRST_HIGH_WATER_MARK_IN_MS,
DEFAULT_LAST_HIGH_WATER_MARK_IN_MS) },
首先需要先看懂一个选项表达的意思,下面以选项一为例:
{ "max-buffer-size", "max buffer size should be pre-read",
OPTION_OFFSET(dcc.max_buffer_size), OPTION_INT(MAX_QUEUE_SIZE, 0, MAX_QUEUE_SIZE) }
配置一个选项需要填写4个信息:
max-buffer-size:选项的名称;
max buffer size should be pre-read:选项的描述;
OPTION_OFFSET(dcc.max_buffer_size):选项对应的系统变量;
OPTION_INT(MAX_QUEUE_SIZE, 0, MAX_QUEUE_SIZE):选项的取值范围;
下面解释OPTION_OFFSET、OPTION_INT函数:
OPTION_OFFSET:
#define OPTION_OFFSET(x) offsetof(FFPlayer, x)
系统最终回调用offsetof函数,这个是gcc中的函数,网上搜了下,其作用为:访问返回结构体FFPlayer中成员变量x相对于结构体首地址的偏移量,转换为Java的思维就是返回FFPlayer类中成员变量x.
OPTION_INT:
#define OPTION_INT(default__, min__, max__) \
.type = AV_OPT_TYPE_INT, \
{ .i64 = default__ }, \
.min = min__, \
.max = max__, \
.flags = AV_OPT_FLAG_DECODING_PARAM
其第一个参数为选项的默认值,第二个参数为选项的最小值,第三个参数为选项的最大值
OK,现在知道了选项的配置值,下面细聊这些选项的作用:
不知道你注意IjkPlayer 自带视频缓存
其实现代码在ff_ffplay.c的read_thread 函数中:
// 监控的缓存队列
if (is->audio_stream >= 0) {
is->audioq.is_buffer_indicator = 1;
is->buffer_indicator_queue = &is->audioq;
} else if (is->video_stream >= 0) {
is->videoq.is_buffer_indicator = 1;
is->buffer_indicator_queue = &is->videoq;
} else {
assert("invalid streams");
}
// 注意:无限循环
for(;;){
// 省略。。。
// 如果队列满了。
// 其中stream_has_enough_packets会判断当前缓存中的AvPacket数是否大于MIN_FRAMES
// 大于就等待10ms
if (ffp->infinite_buffer<1 && !is->seek_req &&
#ifdef FFP_MERGE
(is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
#else
(is->audioq.size + is->videoq.size + is->subtitleq.size > ffp->dcc.max_buffer_size
#endif
|| ( stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq, MIN_FRAMES)
&& stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq, MIN_FRAMES)
&& stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq, MIN_FRAMES)))) {
if (!is->eof) {
ffp_toggle_buffering(ffp, 0);
}
// 睡眠10ms等待
/* wait 10 ms */
SDL_LockMutex(wait_mutex);
SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
SDL_UnlockMutex(wait_mutex);
continue;
}
省略。。。。
// 下面这一段代码作用:
// 1. 更新UI----缓存进度条的值。注意不是播放进度条
// 2. 让播放器由暂停转为播放
// 系统是否开启packet数据缓存
if (ffp->packet_buffering) {
// io记号器=当前时间
io_tick_counter = SDL_GetTickHR();
// 如果还没播放的时候,即开屏
if ((!ffp->first_video_frame_rendered && is->video_st) || (!ffp->first_audio_frame_rendered && is->audio_st)) {
// 如果缓存时间超过50ms,即第一次开屏播放需要等待50ms,才能播放
if (abs((int)(io_tick_counter - prev_io_tick_counter)) > FAST_BUFFERING_CHECK_PER_MILLISECONDS) {
prev_io_tick_counter = io_tick_counter;
// 设置缓存时间
ffp->dcc.current_high_water_mark_in_ms = ffp->dcc.first_high_water_mark_in_ms;
// 1.更新UI----缓存进度条的值,注意不是播放进度条.
// 2.请求播放器播放
ffp_check_buffering_l(ffp);
}
} else {
// 如果缓存时间超过500ms,即播放开始后,等待500ms,更新一次UI---缓存进度条的值
if (abs((int)(io_tick_counter - prev_io_tick_counter)) > BUFFERING_CHECK_PER_MILLISECONDS) {
prev_io_tick_counter = io_tick_counter;
ffp_check_buffering_l(ffp);
}
}
}
}
上面是个无限循环,有详细的注释,我们直接看其中核心方法:
ffp_check_buffering_l
void ffp_check_buffering_l(FFPlayer *ffp)
{
省略。。。
// 判断时间基是否有效
if(is->audio_st)
audio_time_base_valid = is->audio_st->time_base.den > 0 && is->audio_st->time_base.num > 0;
if(is->video_st)
video_time_base_valid = is->video_st->time_base.den > 0 && is->video_st->time_base.num > 0;
if (hwm_in_ms > 0) {
int cached_duration_in_ms = -1;
int64_t audio_cached_duration = -1;
int64_t video_cached_duration = -1;
// 获得未播放的缓存时间
if (is->audio_st && audio_time_base_valid) {
audio_cached_duration = ffp->stat.audio_cache.duration;
#ifdef FFP_SHOW_DEMUX_CACHE
int audio_cached_percent = (int)av_rescale(audio_cached_duration, 1005, hwm_in_ms * 10);
av_log(ffp, AV_LOG_DEBUG, "audio cache=%%%d milli:(%d/%d) bytes:(%d/%d) packet:(%d/%d)\n", audio_cached_percent,
(int)audio_cached_duration, hwm_in_ms,
is->audioq.size, hwm_in_bytes,
is->audioq.nb_packets, MIN_FRAMES);
#endif
}
if (is->video_st && video_time_base_valid) {
video_cached_duration = ffp->stat.video_cache.duration;
#ifdef FFP_SHOW_DEMUX_CACHE
int video_cached_percent = (int)av_rescale(video_cached_duration, 1005, hwm_in_ms * 10);
av_log(ffp, AV_LOG_DEBUG, "video cache=%%%d milli:(%d/%d) bytes:(%d/%d) packet:(%d/%d)\n", video_cached_percent,
(int)video_cached_duration, hwm_in_ms,
is->videoq.size, hwm_in_bytes,
is->videoq.nb_packets, MIN_FRAMES);
#endif
}
// 取最短的未播放的缓存时间
if (video_cached_duration > 0 && audio_cached_duration > 0) {
cached_duration_in_ms = (int)IJKMIN(video_cached_duration, audio_cached_duration);
} else if (video_cached_duration > 0) {
cached_duration_in_ms = (int)video_cached_duration;
} else if (audio_cached_duration > 0) {
cached_duration_in_ms = (int)audio_cached_duration;
}
// 如果有未播放的缓存时间
if (cached_duration_in_ms >= 0) {
// 计算未来的播放时间
buf_time_position = ffp_get_current_position_l(ffp) + cached_duration_in_ms;
ffp->playable_duration_ms = buf_time_position;
// 将未播放的缓存时间转换以系统的时间基为单位的时间
buf_time_percent = (int)av_rescale(cached_duration_in_ms, 1005, hwm_in_ms * 10);
}
}
// 缓存总数
int cached_size = is->audioq.size + is->videoq.size;
if (hwm_in_bytes > 0) {
buf_size_percent = (int)av_rescale(cached_size, 1005, hwm_in_bytes * 10);
}
int buf_percent = -1;
if (buf_time_percent >= 0) {
// alwas depend on cache duration if valid
if (buf_time_percent >= 100)
need_start_buffering = 1;
buf_percent = buf_time_percent;
} else {
if (buf_size_percent >= 100)
need_start_buffering = 1;
buf_percent = buf_size_percent;
}
if (buf_time_percent >= 0 && buf_size_percent >= 0) {
buf_percent = FFMIN(buf_time_percent, buf_size_percent);
}
if (buf_percent) {
// 通知Java层的改变缓存进度条的位置到buf_time_position,注意不是播放进度条
ffp_notify_msg3(ffp, FFP_MSG_BUFFERING_UPDATE, (int)buf_time_position, buf_percent);
}
// 是否需要开始播放缓存的数据
if (need_start_buffering) {
if (hwm_in_ms < ffp->dcc.next_high_water_mark_in_ms) {
hwm_in_ms = ffp->dcc.next_high_water_mark_in_ms;
} else {
hwm_in_ms *= 2;
}
if (hwm_in_ms > ffp->dcc.last_high_water_mark_in_ms)
hwm_in_ms = ffp->dcc.last_high_water_mark_in_ms;
// 设置下次需要缓存的时间
ffp->dcc.current_high_water_mark_in_ms = hwm_in_ms;
// 如果缓存队列中用packet
if (is->buffer_indicator_queue && is->buffer_indicator_queue->nb_packets > 0) {
// 如果音视频队列中有大于2个packet的数据 或者网络流中断 或者暂停,那么就唤醒播放器
if ( (is->audioq.nb_packets >= MIN_MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
&& (is->videoq.nb_packets >= MIN_MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)) {
// 参数0,表示请求播放器播放,而要让该函数要处理请求播放器播放的请求,需要满足一个条件:用户拖动了进度条
ffp_toggle_buffering(ffp, 0);
}
}
}
}
上面也是添加了详细的注释。最后总结下:
选项1:
max-buffer-size:表示最大的缓存大小
选项2:
min-frames:表示最大预读帧数,即最多缓存多少个AvPacket。
选项3:
first-high-water-mark-ms:表示开屏前(播放第一帧前)缓存时间。即播放前,先缓存一段时间的数据。多说一句,秒开屏的优 化与其有关
选项4:
next-high-water-mark-ms:表示播放后缓存时间
选项5:
last-high-water-mark-ms:表示播放后最大缓存时间