read 事件:
NioEvenLoop中线程循环监控网络上数据进入
|
|
\|/
有数据进入,读取数据存入ByteBuf
|
|
\|/
channel.pipeline().fireChannelRead(byteBuf)被调用:
channel.pipeline()中ChannelInboundHandler链,从head-->tail顺序依次调用 ((ChannelInboundHandler) handler()).channelRead(this, msg);
|
|
\|/
channel.pipeline().fireChannelReadComplete()被调用:
channel.pipeline()中ChannelInboundHandler链,从head-->tail顺序依次调用 ((ChannelInboundHandler) handler()).channelReadComplete(this);
public final class NioEventLoop extends SingleThreadEventLoop {
/**
* NioEventLoop内部线程无限循环监控IO事件及执行IO任务。
* 循环内调用processSelectedKeys方法来查找网络事件产生,如connected,read等
*
*/
private void processSelectedKey(SelectionKey k, AbstractNioChannel ch) {
final AbstractNioChannel.NioUnsafe unsafe = ch.unsafe();
try {
int readyOps = k.readyOps();
// We first need to call finishConnect() before try to trigger a read(...) or write(...) as otherwise
// the NIO JDK channel implementation may throw a NotYetConnectedException.
if ((readyOps & SelectionKey.OP_CONNECT) != 0) {
// remove OP_CONNECT as otherwise Selector.select(..) will always return without blocking
// See https://github.com/netty/netty/issues/924
int ops = k.interestOps();
ops &= ~SelectionKey.OP_CONNECT;
k.interestOps(ops);
unsafe.finishConnect();
}
// Process OP_WRITE first as we may be able to write some queued buffers and so free memory.
if ((readyOps & SelectionKey.OP_WRITE) != 0) {
// Call forceFlush which will also take care of clear the OP_WRITE once there is nothing left to write
ch.unsafe().forceFlush();
}
// Also check for readOps of 0 to workaround possible JDK bug which may otherwise lead
// to a spin loop
if ((readyOps & (SelectionKey.OP_READ | SelectionKey.OP_ACCEPT)) != 0 || readyOps == 0) {
unsafe.read(); //有可读取的数据流入,触发读取事件
}
} catch (CancelledKeyException ignored) {
unsafe.close(unsafe.voidPromise());
}
}
}
public abstract class AbstractNioByteChannel extends AbstractNioChannel {
public final void read() {
final ChannelConfig config = config();
final ChannelPipeline pipeline = pipeline();
final ByteBufAllocator allocator = config.getAllocator();
final RecvByteBufAllocator.Handle allocHandle = recvBufAllocHandle();
allocHandle.reset(config);
ByteBuf byteBuf = null;
boolean close = false;
try {
do {
byteBuf = allocHandle.allocate(allocator);
allocHandle.lastBytesRead(doReadBytes(byteBuf));//从channel中读取数据存入byteBuf
if (allocHandle.lastBytesRead() <= 0) {
// nothing was read. release the buffer.
byteBuf.release();
byteBuf = null;
close = allocHandle.lastBytesRead() < 0;
break;
}
allocHandle.incMessagesRead(1);
readPending = false;
pipeline.fireChannelRead(byteBuf);
//channel.pipeline()中ChannelInboundHandler链,从head-->tail顺序依次调用 ((ChannelInboundHandler) handler()).channelRead(this, msg);
byteBuf = null;
} while (allocHandle.continueReading());
allocHandle.readComplete();
pipeline.fireChannelReadComplete();
//channel.pipeline()中ChannelInboundHandler链,从head-->tail顺序依次调用 ((ChannelInboundHandler) handler()).channelReadComplete(this);
if (close) {
closeOnRead(pipeline);
}
} catch (Throwable t) {
handleReadException(pipeline, byteBuf, t, close, allocHandle);
} finally {
//ignore some code
}
}
}
}