grpc的初始化

0、结构

1、GrpcLibraryInitializer初始化

通过全局静态变量static grpc::internal::GrpcLibraryInitializer g_gli_initializer来初始化g_glip和g_core_codegen_interface。

GrpcLibraryInitializer类结构为

 其初始化全局变量是在其构造函数中完成的。

2、grpc_completion_queue的初始化

在src/core/lib/surface下completion_queue_factory.cc和complection_queue.cc文件中,其结构定义如下

cq_poller_vtable子类包含三种,GRPC_CQ_DEFAULT_POLLING,GRPC_CQ_NON_LISTENING和GRPC_CQ_NON_POLLING

cq_vtable包含三个子类GRPC_CQ_NEXT,GRPC_CQ_PLUCK和GRPC_CQ_CALLBACK

其是通过工厂方法来创建的

最终创建代码如下 

grpc_completion_queue* grpc_completion_queue_create_internal(
    grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type,
    grpc_completion_queue_functor* shutdown_callback) {
  GPR_TIMER_SCOPE("grpc_completion_queue_create_internal", 0);

  grpc_completion_queue* cq;

  GRPC_API_TRACE(
      "grpc_completion_queue_create_internal(completion_type=%d, "
      "polling_type=%d)",
      2, (completion_type, polling_type));

  const cq_vtable* vtable = &g_cq_vtable[completion_type];
  const cq_poller_vtable* poller_vtable =
      &g_poller_vtable_by_poller_type[polling_type];

  grpc_core::ExecCtx exec_ctx;
  GRPC_STATS_INC_CQS_CREATED();

  cq = static_cast<grpc_completion_queue*>(
      gpr_zalloc(sizeof(grpc_completion_queue) + vtable->data_size +
                 poller_vtable->size()));

  cq->vtable = vtable;
  cq->poller_vtable = poller_vtable;

  /* One for destroy(), one for pollset_shutdown */
  new (&cq->owning_refs) grpc_core::RefCount(2);

  poller_vtable->init(POLLSET_FROM_CQ(cq), &cq->mu);
  vtable->init(DATA_FROM_CQ(cq), shutdown_callback);

  GRPC_CLOSURE_INIT(&cq->pollset_shutdown_done, on_pollset_shutdown_done, cq,
                    grpc_schedule_on_exec_ctx);
  return cq;
}

 grcp_completion_queue的结构图如下

3、GrpcLibrary初始化

其类结构为

 初始化是通过GrpcLibrary来初始化的,其会调用grpc_init(/src/core/lib/surface/init.cc)。

class GrpcLibrary final : public GrpcLibraryInterface {
 public:
  void init() override { grpc_init(); }
  void shutdown() override { grpc_shutdown(); }
};

void grpc_init(void) {
  gpr_once_init(&g_basic_init, do_basic_init);

  grpc_core::MutexLock lock(g_init_mu);
  if (++g_initializations == 1) {
    if (g_shutting_down) {
      g_shutting_down = false;
      g_shutting_down_cv->SignalAll();
    }
    grpc_core::Fork::GlobalInit();
    grpc_fork_handlers_auto_register();
    grpc_stats_init();
    grpc_slice_intern_init();
    grpc_mdctx_global_init();
    grpc_core::channelz::ChannelzRegistry::Init();
    grpc_security_pre_init();
    grpc_core::ApplicationCallbackExecCtx::GlobalInit();
    grpc_core::ExecCtx::GlobalInit();
    grpc_iomgr_init();
    gpr_timers_global_init();
    for (int i = 0; i < g_number_of_plugins; i++) {
      if (g_all_of_the_plugins[i].init != nullptr) {
        g_all_of_the_plugins[i].init();
      }
    }
    grpc_tracer_init();
    grpc_iomgr_start();
  }

  GRPC_API_TRACE("grpc_init(void)", 0, ());
}

grpc_iomgr_init(/src/core/lib/iomgr/iomgr.cc)初始化iomgr,根据是否决定使用的iomgr平台,如果没有,则设置默认的iomgr平台

void grpc_iomgr_init() {
  grpc_core::ExecCtx exec_ctx;
  if (!grpc_have_determined_iomgr_platform()) {
    grpc_set_default_iomgr_platform();
  }
  g_shutdown = 0;
  gpr_mu_init(&g_mu);
  gpr_cv_init(&g_rcv);
  grpc_core::Executor::InitAll();
  g_root_object.next = g_root_object.prev = &g_root_object;
  g_root_object.name = const_cast<char*>("root");
  grpc_iomgr_platform_init();
  grpc_timer_list_init();
  grpc_core::grpc_errqueue_init();
  g_grpc_abort_on_leaks = GPR_GLOBAL_CONFIG_GET(grpc_abort_on_leaks);
}

grpc_set_default_iomgr_platform决定使用哪个iomgr平台。

iomgr.cc(/src/core/lib/iomgr/event_engine/iomgr.cc)下配置如下

grpc_iomgr_platform_vtable vtable = {
    iomgr_platform_init,
    iomgr_platform_flush,
    iomgr_platform_shutdown,
    iomgr_platform_shutdown_background_closure,
    iomgr_platform_is_any_background_poller_thread,
    iomgr_platform_add_closure_to_background_poller};

}  // namespace

void grpc_set_default_iomgr_platform() {
  grpc_set_tcp_client_impl(&grpc_event_engine_tcp_client_vtable);
  grpc_set_tcp_server_impl(&grpc_event_engine_tcp_server_vtable);
  grpc_set_timer_impl(&grpc_event_engine_timer_vtable);
  grpc_set_pollset_vtable(&grpc_event_engine_pollset_vtable);
  grpc_set_pollset_set_vtable(&grpc_event_engine_pollset_set_vtable);
  grpc_set_resolver_impl(&grpc_event_engine_resolver_vtable);
  grpc_set_iomgr_platform_vtable(&vtable);
}

iomgr_posix.cc(/src/core/lib/iomgr/iomgr_posix.cc)下的配置如下

static grpc_iomgr_platform_vtable vtable = {
    iomgr_platform_init,
    iomgr_platform_flush,
    iomgr_platform_shutdown,
    iomgr_platform_shutdown_background_closure,
    iomgr_platform_is_any_background_poller_thread,
    iomgr_platform_add_closure_to_background_poller};

void grpc_set_default_iomgr_platform() {
  grpc_set_tcp_client_impl(&grpc_posix_tcp_client_vtable);
  grpc_set_tcp_server_impl(&grpc_posix_tcp_server_vtable);
  grpc_set_timer_impl(&grpc_generic_timer_vtable);
  grpc_set_pollset_vtable(&grpc_posix_pollset_vtable);
  grpc_set_pollset_set_vtable(&grpc_posix_pollset_set_vtable);
  grpc_set_resolver_impl(&grpc_posix_resolver_vtable);
  grpc_set_iomgr_platform_vtable(&vtable);
}

3.1 tcp_client接口

用于处理客户端的,其接口其对应实现有

 3.2 tcp_server接口

用于处理服务器端的, 包含服务创建,启动,添加端口以及添加句柄处理器

3.3 定时器接口

 用于处理定时器,包含初始化,取消,检查,列表初始化,列表关闭及继续

3.4 pollset接口

用于管理pollset的,包含全局初始化,全局关闭,初始化、关闭,销毁,运行等

 3.5 pollset_set接口

用于管理pollset_set,包含创建,销毁,添加、删除pollset以及添加、删除pollset_set

3.6 地址解析器

用于处理地址解析的,包含解析以及阻塞式解析。

 3.7 iomgr平台接口

用于管理iomgr_platform的,包含初始化,刷新,关闭,后台关闭,添加闭包等。

 3.8 事件引擎

其初始化是通过grpc_iomgr_platform_vtable中的init初始化中来进行事件引擎初始化的。

static void iomgr_platform_init(void) {
  grpc_wakeup_fd_global_init();
  grpc_event_engine_init();
  grpc_tcp_posix_init();
}

通过事件引擎工厂来创建,其类结构如下

工厂类包含

static event_engine_factory g_factories[] = {
    {ENGINE_HEAD_CUSTOM, nullptr},        {ENGINE_HEAD_CUSTOM, nullptr},
    {ENGINE_HEAD_CUSTOM, nullptr},        {ENGINE_HEAD_CUSTOM, nullptr},
    {"epollex", grpc_init_epollex_linux}, {"epoll1", grpc_init_epoll1_linux},
    {"poll", grpc_init_poll_posix},       {"none", init_non_polling},
    {ENGINE_TAIL_CUSTOM, nullptr},        {ENGINE_TAIL_CUSTOM, nullptr},
    {ENGINE_TAIL_CUSTOM, nullptr},        {ENGINE_TAIL_CUSTOM, nullptr},
};

事件引擎初始化是通过grpc_event_engine_init(/src/core/lib/iomgr/ev_posix.h),在g_factories中遍历找到匹配的创建事件引擎。

void grpc_event_engine_init(void) {
  grpc_core::UniquePtr<char> value = GPR_GLOBAL_CONFIG_GET(grpc_poll_strategy);

  char** strings = nullptr;
  size_t nstrings = 0;
  split(value.get(), &strings, &nstrings);

  for (size_t i = 0; g_event_engine == nullptr && i < nstrings; i++) {
    try_engine(strings[i]);
  }

  for (size_t i = 0; i < nstrings; i++) {
    gpr_free(strings[i]);
  }
  gpr_free(strings);

  if (g_event_engine == nullptr) {
    gpr_log(GPR_ERROR, "No event engine could be initialized from %s",
            value.get());
    abort();
  }
}

static void try_engine(const char* engine) {
  for (size_t i = 0; i < GPR_ARRAY_SIZE(g_factories); i++) {
    if (g_factories[i].factory != nullptr && is(engine, g_factories[i].name)) {
      if ((g_event_engine = g_factories[i].factory(
               0 == strcmp(engine, g_factories[i].name)))) {
        g_poll_strategy_name = g_factories[i].name;
        gpr_log(GPR_DEBUG, "Using polling engine: %s", g_factories[i].name);
        return;
      }
    }
  }
}

3.9 Executor初始化

创建default-executor和resolver-executor两个Executor,并且初始化。

void Executor::InitAll() {
  EXECUTOR_TRACE0("Executor::InitAll() enter");

  // Return if Executor::InitAll() is already called earlier
  if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] != nullptr) {
    GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] !=
               nullptr);
    return;
  }

  executors[static_cast<size_t>(ExecutorType::DEFAULT)] =
      new Executor("default-executor");
  executors[static_cast<size_t>(ExecutorType::RESOLVER)] =
      new Executor("resolver-executor");

  executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Init();
  executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Init();

  EXECUTOR_TRACE0("Executor::InitAll() done");
}

void Executor::Init() { SetThreading(true); }

void Executor::SetThreading(bool threading) {
  gpr_atm curr_num_threads = gpr_atm_acq_load(&num_threads_);
  EXECUTOR_TRACE("(%s) SetThreading(%d) begin", name_, threading);

  if (threading) {
    if (curr_num_threads > 0) {
      EXECUTOR_TRACE("(%s) SetThreading(true). curr_num_threads > 0", name_);
      return;
    }

    GPR_ASSERT(num_threads_ == 0);
    gpr_atm_rel_store(&num_threads_, 1);
    thd_state_ = static_cast<ThreadState*>(
        gpr_zalloc(sizeof(ThreadState) * max_threads_));

    for (size_t i = 0; i < max_threads_; i++) {
      gpr_mu_init(&thd_state_[i].mu);
      gpr_cv_init(&thd_state_[i].cv);
      thd_state_[i].id = i;
      thd_state_[i].name = name_;
      thd_state_[i].thd = Thread();
      thd_state_[i].elems = GRPC_CLOSURE_LIST_INIT;
    }

    thd_state_[0].thd = Thread(name_, &Executor::ThreadMain, &thd_state_[0]);
    thd_state_[0].thd.Start();
  } else {  // !threading
    if (curr_num_threads == 0) {
      EXECUTOR_TRACE("(%s) SetThreading(false). curr_num_threads == 0", name_);
      return;
    }

    for (size_t i = 0; i < max_threads_; i++) {
      gpr_mu_lock(&thd_state_[i].mu);
      thd_state_[i].shutdown = true;
      gpr_cv_signal(&thd_state_[i].cv);
      gpr_mu_unlock(&thd_state_[i].mu);
    }

    /* Ensure no thread is adding a new thread. Once this is past, then no
     * thread will try to add a new one either (since shutdown is true) */
    gpr_spinlock_lock(&adding_thread_lock_);
    gpr_spinlock_unlock(&adding_thread_lock_);

    curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
    for (gpr_atm i = 0; i < curr_num_threads; i++) {
      thd_state_[i].thd.Join();
      EXECUTOR_TRACE("(%s) Thread %" PRIdPTR " of %" PRIdPTR " joined", name_,
                     i + 1, curr_num_threads);
    }

    gpr_atm_rel_store(&num_threads_, 0);
    for (size_t i = 0; i < max_threads_; i++) {
      gpr_mu_destroy(&thd_state_[i].mu);
      gpr_cv_destroy(&thd_state_[i].cv);
      RunClosures(thd_state_[i].name, thd_state_[i].elems);
    }

    gpr_free(thd_state_);

    // grpc_iomgr_shutdown_background_closure() will close all the registered
    // fds in the background poller, and wait for all pending closures to
    // finish. Thus, never call Executor::SetThreading(false) in the middle of
    // an application.
    // TODO(guantaol): create another method to finish all the pending closures
    // registered in the background poller by Executor.
    grpc_iomgr_platform_shutdown_background_closure();
  }

  EXECUTOR_TRACE("(%s) SetThreading(%d) done", name_, threading);
}

Executor的类结构为

4、Server初始化

创建grpc::Server,主要是创建SyncRequestThreadManager和grcp_core::Server

std::unique_ptr<grpc::Server> server(new grpc::Server(
      &args, sync_server_cqs, sync_server_settings_.min_pollers,
      sync_server_settings_.max_pollers, sync_server_settings_.cq_timeout_msec,
      std::move(acceptors_), server_config_fetcher_, resource_quota_,
      std::move(interceptor_creators_)));


Server::Server(
    grpc::ChannelArguments* args,
    std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>
        sync_server_cqs,
    int min_pollers, int max_pollers, int sync_cq_timeout_msec,
    std::vector<std::shared_ptr<grpc::internal::ExternalConnectionAcceptorImpl>>
        acceptors,
    grpc_server_config_fetcher* server_config_fetcher,
    grpc_resource_quota* server_rq,
    std::vector<
        std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>>
        interceptor_creators)
    : acceptors_(std::move(acceptors)),
      interceptor_creators_(std::move(interceptor_creators)),
      max_receive_message_size_(INT_MIN),
      sync_server_cqs_(std::move(sync_server_cqs)),
      started_(false),
      shutdown_(false),
      shutdown_notified_(false),
      server_(nullptr),
      server_initializer_(new ServerInitializer(this)),
      health_check_service_disabled_(false) {
  g_gli_initializer.summon();
  gpr_once_init(&grpc::g_once_init_callbacks, grpc::InitGlobalCallbacks);
  global_callbacks_ = grpc::g_callbacks;
  global_callbacks_->UpdateArguments(args);

  if (sync_server_cqs_ != nullptr) {
    bool default_rq_created = false;
    if (server_rq == nullptr) {
      server_rq = grpc_resource_quota_create("SyncServer-default-rq");
      grpc_resource_quota_set_max_threads(server_rq,
                                          DEFAULT_MAX_SYNC_SERVER_THREADS);
      default_rq_created = true;
    }

    for (const auto& it : *sync_server_cqs_) {
      sync_req_mgrs_.emplace_back(new SyncRequestThreadManager(
          this, it.get(), global_callbacks_, server_rq, min_pollers,
          max_pollers, sync_cq_timeout_msec));
    }

    if (default_rq_created) {
      grpc_resource_quota_unref(server_rq);
    }
  }

  for (auto& acceptor : acceptors_) {
    acceptor->SetToChannelArgs(args);
  }

  grpc_channel_args channel_args;
  args->SetChannelArgs(&channel_args);

  for (size_t i = 0; i < channel_args.num_args; i++) {
    if (0 == strcmp(channel_args.args[i].key,
                    grpc::kHealthCheckServiceInterfaceArg)) {
      if (channel_args.args[i].value.pointer.p == nullptr) {
        health_check_service_disabled_ = true;
      } else {
        health_check_service_.reset(
            static_cast<grpc::HealthCheckServiceInterface*>(
                channel_args.args[i].value.pointer.p));
      }
    }
    if (0 ==
        strcmp(channel_args.args[i].key, GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH)) {
      max_receive_message_size_ = channel_args.args[i].value.integer;
    }
  }
  server_ = grpc_server_create(&channel_args, nullptr);
  grpc_server_set_config_fetcher(server_, server_config_fetcher);
}

grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
  grpc_core::ExecCtx exec_ctx;
  GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
  const grpc_channel_args* new_args = grpc_core::CoreConfiguration::Get()
                                          .channel_args_preconditioning()
                                          .PreconditionChannelArgs(args);
  grpc_core::Server* server = new grpc_core::Server(new_args);
  grpc_channel_args_destroy(new_args);
  return server->c_ptr();
}

4.1 SyncRequestThreadManager

处理网络请求的线程管理器,其结构为

参考资料:

https://www.grpc.io/community/

https://github.com/grpc

https://github.com/grpc-ecosystem

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

kgduu

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值