grpc-builder 创建server实例对象
1.准备ip 端口
2.创建server实例对象
此时只是一个对象 没有任何连接 网络 之类的东西
@Override
public Server build() {
ServerImpl server = new ServerImpl( //new 实例对象
this,
buildTransportServer(Collections.unmodifiableList(getTracerFactories())),
Context.ROOT);
for (InternalNotifyOnServerBuild notifyTarget : notifyOnBuildList) {
notifyTarget.notifyOnBuild(server);
}
return server;
}
复制代码
/**
* Construct a server.
*
* @param builder builder with configuration for server
* @param transportServer transport server that will create new incoming transports
* @param rootContext context that callbacks for new RPCs should be derived from
*/
ServerImpl(
AbstractServerImplBuilder<?> builder,
InternalServer transportServer,
Context rootContext) {
this.executorPool = Preconditions.checkNotNull(builder.executorPool, "executorPool");
this.registry = Preconditions.checkNotNull(builder.registryBuilder.build(), "registryBuilder");
this.fallbackRegistry =
Preconditions.checkNotNull(builder.fallbackRegistry, "fallbackRegistry");
this.transportServer = Preconditions.checkNotNull(transportServer, "transportServer");
// Fork from the passed in context so that it does not propagate cancellation, it only
// inherits values.
this.rootContext = Preconditions.checkNotNull(rootContext, "rootContext").fork();
this.decompressorRegistry = builder.decompressorRegistry;
this.compressorRegistry = builder.compressorRegistry;
this.transportFilters = Collections.unmodifiableList(
new ArrayList<>(builder.transportFilters));
this.interceptors =
builder.interceptors.toArray(new ServerInterceptor[builder.interceptors.size()]);
this.handshakeTimeoutMillis = builder.handshakeTimeoutMillis;
this.binlog = builder.binlog;
this.channelz = builder.channelz;
this.serverCallTracer = builder.callTracerFactory.create();
channelz.addServer(this);
}
复制代码
grpc-server start
/**
* Bind and start the server.
*
* @return {@code this} object
* @throws IllegalStateException if already started
* @throws IOException if unable to bind
*/
@Override
public ServerImpl start() throws IOException {
synchronized (lock) {
checkState(!started, "Already started");
checkState(!shutdown, "Shutting down");
// Start and wait for any port to actually be bound.
transportServer.start(new ServerListenerImpl()); //启动server
NettyServer{logId=1, address=0.0.0.0/0.0.0.0:8980} 底层封装了netty
executor = Preconditions.checkNotNull(executorPool.getObject(), "executor");
started = true;
return this;
}
}
复制代码
grpc-netty
grpc 集成了netty 具体来说 就是 grpc-netty.jar
@Override
public void start(ServerListener serverListener) throws IOException {
listener = checkNotNull(serverListener, "serverListener");
// If using the shared groups, get references to them.
allocateSharedGroups();
ServerBootstrap b = new ServerBootstrap(); //netty server启动类
b.group(bossGroup, workerGroup);
b.channel(channelType);
if (NioServerSocketChannel.class.isAssignableFrom(channelType)) {
b.option(SO_BACKLOG, 128);
b.childOption(SO_KEEPALIVE, true);
}
if (channelOptions != null) {
for (Map.Entry<ChannelOption<?>, ?> entry : channelOptions.entrySet()) {
@SuppressWarnings("unchecked")
ChannelOption<Object> key = (ChannelOption<Object>) entry.getKey();
b.childOption(key, entry.getValue());
}
}
b.childHandler(new ChannelInitializer<Channel>() {
@Override
public void initChannel(Channel ch) throws Exception {
ChannelPromise channelDone = ch.newPromise();
long maxConnectionAgeInNanos = NettyServer.this.maxConnectionAgeInNanos;
if (maxConnectionAgeInNanos != MAX_CONNECTION_AGE_NANOS_DISABLED) {
// apply a random jitter of +/-10% to max connection age
maxConnectionAgeInNanos =
(long) ((.9D + Math.random() * .2D) * maxConnectionAgeInNanos);
}
NettyServerTransport transport =
new NettyServerTransport(
ch,
channelDone,
protocolNegotiator,
streamTracerFactories,
transportTracerFactory.create(),
maxStreamsPerConnection,
flowControlWindow,
maxMessageSize,
maxHeaderListSize,
keepAliveTimeInNanos,
keepAliveTimeoutInNanos,
maxConnectionIdleInNanos,
maxConnectionAgeInNanos,
maxConnectionAgeGraceInNanos,
permitKeepAliveWithoutCalls,
permitKeepAliveTimeInNanos);
ServerTransportListener transportListener;
// This is to order callbacks on the listener, not to guard access to channel.
synchronized (NettyServer.this) {
if (channel != null && !channel.isOpen()) {
// Server already shutdown.
ch.close();
return;
}
// `channel` shutdown can race with `ch` initialization, so this is only safe to increment
// inside the lock.
eventLoopReferenceCounter.retain();
transportListener = listener.transportCreated(transport);
}
/**
* Releases the event loop if the channel is "done", possibly due to the channel closing.
*/
final class LoopReleaser implements ChannelFutureListener {
boolean done;
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!done) {
done = true;
eventLoopReferenceCounter.release();
}
}
}
transport.start(transportListener);
ChannelFutureListener loopReleaser = new LoopReleaser();
channelDone.addListener(loopReleaser);
ch.closeFuture().addListener(loopReleaser);
}
});
// Bind and start to accept incoming connections.
ChannelFuture future = b.bind(address); //
try {
future.await();
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted waiting for bind");
}
if (!future.isSuccess()) {
throw new IOException("Failed to bind", future.cause());
}
channel = future.channel();
Future<?> channelzFuture = channel.eventLoop().submit(new Runnable() {
@Override
public void run() {
InternalInstrumented<SocketStats> listenSocket = new ListenSocket(channel);
listenSockets = ImmutableList.of(listenSocket);
channelz.addListenSocket(listenSocket);
}
});
try {
channelzFuture.await();
} catch (InterruptedException ex) {
throw new RuntimeException("Interrupted while registering listen socket to channelz", ex);
}
}
复制代码
private ChannelFuture doBind(final SocketAddress localAddress) {
final ChannelFuture regFuture = initAndRegister();
final Channel channel = regFuture.channel();
if (regFuture.cause() != null) {
return regFuture;
}
if (regFuture.isDone()) {
// At this point we know that the registration was complete and successful.
ChannelPromise promise = channel.newPromise();
doBind0(regFuture, channel, localAddress, promise);
return promise;
} else {
// Registration future is almost always fulfilled already, but just in case it's not.
final PendingRegistrationPromise promise = new PendingRegistrationPromise(channel);
regFuture.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
Throwable cause = future.cause();
if (cause != null) {
// Registration on the EventLoop failed so fail the ChannelPromise directly to not cause an
// IllegalStateException once we try to access the EventLoop of the Channel.
promise.setFailure(cause);
} else {
// Registration was successful, so set the correct executor to use.
// See https://github.com/netty/netty/issues/2586
promise.registered();
doBind0(regFuture, channel, localAddress, promise);
}
}
});
return promise;
}
}
private static void doBind0(
final ChannelFuture regFuture, final Channel channel,
final SocketAddress localAddress, final ChannelPromise promise) {
// This method is invoked before channelRegistered() is triggered. Give user handlers a chance to set up
// the pipeline in its channelRegistered() implementation.
channel.eventLoop().execute(new Runnable() { //SingleThreadEventExecutor
@Override
public void run() {
if (regFuture.isSuccess()) {
channel.bind(localAddress, promise).addListener(ChannelFutureListener.CLOSE_ON_FAILURE);
} else {
promise.setFailure(regFuture.cause());
}
}
});
}
复制代码
//SingleThreadEventExecutor
@Override
public void execute(Runnable task) {
if (task == null) {
throw new NullPointerException("task");
}
boolean inEventLoop = inEventLoop();
addTask(task);
if (!inEventLoop) {
startThread();
if (isShutdown() && removeTask(task)) {
reject();
}
}
if (!addTaskWakesUp && wakesUpForTask(task)) {
wakeup(inEventLoop);
}
}
复制代码
总结
底层和套接字一样 最终有一个serverSocket.accept()。