本篇主要是捋顺 scrcpy 客户端 解码流程,按照如下流程梳理:
1>. decoder_init(&s->decoder); ///> 5. decoder_init();
2>. stream_init(&s->stream, s->server.video_socket, &stream_cbs, NULL);
3>. stream_add_sink(&s->stream, &dec->packet_sink); ///> 8. stream_add_sink();
4>. screen_init(&s->screen, &screen_params); ///> 12. screen_init();
5>. decoder_add_sink(&s->decoder, &s->screen.frame_sink); ///> 13. decoder_add_sink();
6>. stream_start(&s->stream); ///> 14+.流启动配置
第 1 步 解码器初始化内容
void decoder_init(struct decoder *decoder) {
decoder->sink_count = 0;
static const struct sc_packet_sink_ops ops = {
.open = decoder_packet_sink_open,
.close = decoder_packet_sink_close,
.push = decoder_packet_sink_push,
};
decoder->packet_sink.ops = &ops;
}
///> 此处源码初始化 decoder->packet_sink.ops 结构体内容,程序在 run_stream() 线程中,就直接调用 sink->ops->push() 函数
///> 把数据流喂到编码器中,先看看 push 函数。
static bool
decoder_packet_sink_push(struct sc_packet_sink *sink, const AVPacket *packet) {
struct decoder *decoder = DOWNCAST(sink);
return decoder_push(decoder, packet);
}
static bool
decoder_push(struct decoder *decoder, const AVPacket *packet) {
bool is_config = packet->pts == AV_NOPTS_VALUE;
if (is_config) {
// nothing to do
return true;
}
int ret = avcodec_send_packet(decoder->codec_ctx, packet);
if (ret < 0 && ret != AVERROR(EAGAIN)) {
LOGE("Could not send video packet: %d", ret);
return false;
}
ret = avcodec_receive_frame(decoder->codec_ctx, decoder->frame);
if (!ret) {
// a frame was received
bool ok = push_frame_to_sinks(decoder, decoder->frame);
// A frame lost should not make the whole pipeline fail. The error, if
// any, is already logged.
(void) ok;
av_frame_unref(decoder->frame);
} else if (ret != AVERROR(EAGAIN)) {
LOGE("Could not receive video frame: %d", ret);
return false;
}
return true;
}
///> 此处最终调用 decoder_push 函数,从函数实现中我们看可以看到调用 ffmpeg 接口,avcodec_send_packet 函数送数据;
///> 调用 avcodec_receive_frame() 函数获取解码后frame数据,并把 frame 送到 sink 入口。
第 2 步 初始化流部分内容
static const struct stream_callbacks stream_cbs = {
.on_eos = stream_on_eos,
};
stream_init(&s->stream, s->server.video_socket, &stream_cbs, NULL);
void
///> 函数的实现
stream_init(struct stream *stream, socket_t socket,
const struct stream_callbacks *cbs, void *cbs_userdata) {
stream->socket = socket;
stream->pending = NULL;
stream->sink_count = 0;
assert(cbs && cbs->on_eos);
stream->cbs = cbs;
stream->cbs_userdata = cbs_userdata;
}
///> 流回调函数 stream_on_eos 函数实现内容
static void
stream_on_eos(struct stream *stream, void *userdata) {
(void) stream;
(void) userdata;
SDL_Event stop_event;
stop_event.type = EVENT_STREAM_STOPPED;
SDL_PushEvent(&stop_event);
}
///> 此处发送的 sdl event 给 event_loop() 函数,可以通过此方式停止解码显示流的工作。
static bool
event_loop(struct scrcpy *s, const struct scrcpy_options *options) {
SDL_Event event;
while (SDL_WaitEvent(&event)) {
enum event_result result = handle_event(s, options, &event);
switch (result) {
case EVENT_RESULT_STOPPED_BY_USER:
return true;
case EVENT_RESULT_STOPPED_BY_EOS:
LOGW("Device disconnected");
return false;
case EVENT_RESULT_CONTINUE:
break;
}
}
return false;
}
第 3 步 把 解码器槽加入到流中
void
stream_add_sink(struct stream *stream, struct sc_packet_sink *sink) {
assert(stream->sink_count < STREAM_MAX_SINKS);
assert(sink);
assert(sink->ops);
stream->sinks[stream->sink_count++] = sink;
}
///> 看看 sc_packet_sink 结构体定义
struct sc_packet_sink {
const struct sc_packet_sink_ops *ops;
};
struct sc_packet_sink_ops {
bool (*open)(struct sc_packet_sink *sink, const AVCodec *codec);
void (*close)(struct sc_packet_sink *sink);
bool (*push)(struct sc_packet_sink *sink, const AVPacket *packet);
};
///> 在 第1步 中已经描述过,暂时给槽定义为:具有打开、关闭功能,并且可以把 AVPacket 包数据放进去。
///> 我们就可以称之为 槽。
第 4 步 初始化本地屏幕
简化源码内容,保留相关内容。
bool screen_init(struct screen *screen, const struct screen_params *params)
{
screen->resize_pending = false;
screen->has_frame = false;
screen->fullscreen = false;
screen->maximized = false;
static const struct sc_video_buffer_callbacks cbs = {
.on_new_frame = sc_video_buffer_on_new_frame,
};
ok = sc_video_buffer_start(&screen->vb); ///> 此函数创建 run_buffering() 线程
if (!ok) {
LOGE("Could not start video_buffer");
goto error_destroy_video_buffer;
}
static const struct sc_frame_sink_ops ops = {
.open = screen_frame_sink_open,
.close = screen_frame_sink_close,
.push = screen_frame_sink_push,
};
screen->frame_sink.ops = &ops;
}
///> 此函数 创建 run_buffering 线程, 并设置 screen->frame_sink.ops 结构体内容
第 5 步 把屏幕槽添加到解码器中
通过 decoder_add_sink(&s->decoder, &s->screen.frame_sink) 函数把 screen.frame_sink 添加到解码器中,
void
decoder_add_sink(struct decoder *decoder, struct sc_frame_sink *sink) {
assert(decoder->sink_count < DECODER_MAX_SINKS);
assert(sink);
assert(sink->ops);
decoder->sinks[decoder->sink_count++] = sink;
}
在 第4步 中有描述,解下来我们看看 screen_frame_sink_push() 函数实现功能。
bool
sc_video_buffer_push(struct sc_video_buffer *vb, const AVFrame *frame) {
if (!vb->buffering_time) {
// No buffering
return sc_video_buffer_offer(vb, frame);
}
sc_mutex_lock(&vb->b.mutex);
sc_tick pts = SC_TICK_FROM_US(frame->pts);
sc_clock_update(&vb->b.clock, sc_tick_now(), pts);
sc_cond_signal(&vb->b.wait_cond);
if (vb->b.clock.count == 1) {
sc_mutex_unlock(&vb->b.mutex);
// First frame, offer it immediately, for two reasons:
// - not to delay the opening of the scrcpy window
// - the buffering estimation needs at least two clock points, so it
// could not handle the first frame
return sc_video_buffer_offer(vb, frame);
}
struct sc_video_buffer_frame *vb_frame = sc_video_buffer_frame_new(frame);
if (!vb_frame) {
sc_mutex_unlock(&vb->b.mutex);
LOGE("Could not allocate frame");
return false;
}
#ifndef SC_BUFFERING_NDEBUG
vb_frame->push_date = sc_tick_now();
#endif
sc_queue_push(&vb->b.queue, next, vb_frame);
sc_cond_signal(&vb->b.queue_cond);
sc_mutex_unlock(&vb->b.mutex);
return true;
}
///> 也就是说 decoder->sinks->push() 函数,就是执行此函数.
第 6 步 流启动配置
bool stream_start(struct stream *stream)
{
LOGD("Starting stream thread");
printf("%s, %d DEBUG\n", __FILE__, __LINE__);
bool ok = sc_thread_create(&stream->thread, run_stream, "stream", stream);
if (!ok) {
LOGC("Could not start stream thread");
return false;
}
return true;
}
///> 此处创建 run_stream 线程,内容如下:
static int run_stream(void *data)
{
struct stream *stream = data;
AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H264);
stream->codec_ctx = avcodec_alloc_context3(codec);
stream_open_sinks(stream, codec);
stream->parser = av_parser_init(AV_CODEC_ID_H264);
AVPacket *packet = av_packet_alloc();
for (;;) {
bool ok = stream_recv_packet(stream, packet);
if (!ok) {
// end of stream
break;
}
ok = stream_push_packet(stream, packet);
av_packet_unref(packet);
if (!ok) {
// cannot process packet (error already logged)
break;
}
}
return 0;
}
///> 此函数是相关功能的简化版,可以看到初始化 解码器参数并初始化解码器
///> 打开流 sink 的开关,可以向 sink 中 push 流数据。
///< 在循环中 stream_recv_packet() 读取网络socket的数据,stream_push_packet()函数调用第1步中
/// 描述的 decoder_push() 函数,数据就被解码出来,push_frame_to_sinks(decoder, decoder->frame)函数
/// 执行的是 sc_video_buffer_push()函数,也就是说数据被送到 screen 对象中,在这个对象中是如何出来这些数据呢?
第 7 步 run_buffering 线程
在 第4步的时候创建 run_buffering 线程,我们看看该线程在做什么。
static int
run_buffering(void *data) {
struct sc_video_buffer *vb = data;
assert(vb->buffering_time > 0);
for (;;) {
sc_mutex_lock(&vb->b.mutex);
while (!vb->b.stopped && sc_queue_is_empty(&vb->b.queue)) {
sc_cond_wait(&vb->b.queue_cond, &vb->b.mutex);
}
if (vb->b.stopped) {
sc_mutex_unlock(&vb->b.mutex);
goto stopped;
}
struct sc_video_buffer_frame *vb_frame;
sc_queue_take(&vb->b.queue, next, &vb_frame); ///> 拿 frame 数据
sc_tick max_deadline = sc_tick_now() + vb->buffering_time;
// PTS (written by the server) are expressed in microseconds
sc_tick pts = SC_TICK_TO_US(vb_frame->frame->pts);
bool timed_out = false;
while (!vb->b.stopped && !timed_out) {
sc_tick deadline = sc_clock_to_system_time(&vb->b.clock, pts)
+ vb->buffering_time;
if (deadline > max_deadline) {
deadline = max_deadline;
}
timed_out =
!sc_cond_timedwait(&vb->b.wait_cond, &vb->b.mutex, deadline);
}
if (vb->b.stopped) {
sc_video_buffer_frame_delete(vb_frame);
sc_mutex_unlock(&vb->b.mutex);
goto stopped;
}
sc_mutex_unlock(&vb->b.mutex);
#ifndef SC_BUFFERING_NDEBUG
LOGD("Buffering: %" PRItick ";%" PRItick ";%" PRItick,
pts, vb_frame->push_date, sc_tick_now());
#endif
sc_video_buffer_offer(vb, vb_frame->frame); ///> 把frame数据push到渲染器中
sc_video_buffer_frame_delete(vb_frame);
}
stopped:
// Flush queue
while (!sc_queue_is_empty(&vb->b.queue)) {
struct sc_video_buffer_frame *vb_frame;
sc_queue_take(&vb->b.queue, next, &vb_frame);
sc_video_buffer_frame_delete(vb_frame);
}
LOGD("Buffering thread ended");
return 0;
}
此部分描述是 "stream -> decoder -> render " 过程,H264裸流解码详解过程。