NioEndpoint组件
UNIX 系统下的 I/O 模型有 5 种:
- 同步阻塞 I/O
- 同步非阻塞I/O
- I/O多路复用
- 信号驱动I/O(不了解)
- 异步I/O
所谓I/O,就是计算机内存与外部设备之间拷贝数据的过程。
JAVA I/O模型
当用记发起I/O操作后,经历2个步骤
- 用户线程等待内核将数据从网卡(外设)中拷贝到内核空间
- 内核将数据从内核空间拷贝到用户空间
同步阻塞I/O
用户线程发起read调用后就阻塞了,让出CPU。内核等待网卡数据到来,把数据从网卡拷贝到内核空间,接着把数据拷贝到用户空间,再把用户线程叫醒
同步非阻塞I/O
用户线程不断的发起read调用,数据没到内核空间时,每次都返回失败,直到数据到内核空间,这一次read调用后,在等待数据从内核空间拷贝到用户空间这段时间时,线程还是阻塞的,等数据到用户空间后再把线程叫醒
I/O多路复用
用户线程的读取操作分成2步,线程先发起select调用,目的是问内核数据准备好了吗?等内核把数据准备好了,用户线程再发起read调用。在等待数据从内核空间拷贝到用户空间这段时间里,线程还是阻塞。I/O多路复用?因为一次select调可以向内核多数据通道(Channel)的状态,所以叫多路复用.
异步 I/O
用户线程发起 read 调用的同时注册一个回调函数,,read 立即返回,等内核将数据准备好后,再调用指定的回调函数完成处理,在这个过程中,用户线程一直没有阻塞=
NioEndpoint 组件
Tomcat中的NioEndpoint组件实现了I/O多路复用模型
工作流程
对JAVA的多路利用的使用就是2步:
-
- 创建一个Selector,在它上面注册各种,感兴趣的事件,然后调用select方法,等待感兴趣的事件发生.
-
- 感兴趣的事件发生了,比如可以读取了,这时便创建一个线程从Channel中读数据.
####### NioEndpoit组件工作流程:
包含组件
- LimitLatch
- Accpetor
- Poller
- SocketProcessor
- Executor
- LimitLatch 是连接控制器,它表现控制最大连接数据,在NIO模式下默认是10000,达到这个阈值后就拒绝连接请求
org.apache.tomcat.util.net.AbstractEndpoint
public final void start() throws Exception {
if (bindState == BindState.UNBOUND) {
bindWithCleanup();
bindState = BindState.BOUND_ON_START;
}
startInternal();
}
org.apache.tomcat.util.net.NioEndpoint
@Override
public void startInternal() throws Exception {
if (!running) {
running = true;
paused = false;
if (socketProperties.getProcessorCache() != 0) {
processorCache = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
socketProperties.getProcessorCache());
}
if (socketProperties.getEventCache() != 0) {
eventCache = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
socketProperties.getEventCache());
}
if (socketProperties.getBufferPool() != 0) {
nioChannels = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
socketProperties.getBufferPool());
}
// Create worker collection
if (getExecutor() == null) {
createExecutor();
}
//初始化连接数量
initializeConnectionLatch();
// Start poller thread
poller = new Poller();
Thread pollerThread = new Thread(poller, getName() + "-ClientPoller");
pollerThread.setPriority(threadPriority);
pollerThread.setDaemon(true);
pollerThread.start();
startAcceptorThread();
}
}
org.apache.tomcat.util.net.AbstractEndpoint
protected LimitLatch initializeConnectionLatch() {
if (maxConnections==-1) return null;
if (connectionLimitLatch==null) {
connectionLimitLatch = new LimitLatch(getMaxConnections());
}
return connectionLimitLatch;
}
private int maxConnections = 10000;
public int getMaxConnections() { return this.maxConnections; }
public void setMaxConnections(int maxCon) {
this.maxConnections = maxCon;
LimitLatch latch = this.connectionLimitLatch;
if (latch != null) {
// Update the latch that enforces this
if (maxCon == -1) {
releaseConnectionLatch();
} else {
latch.setLimit(maxCon);
}
} else if (maxCon > 0) {
initializeConnectionLatch();
}
}
- Accpetor 跑在单个线程里,它在一个死循环里调用accept方法了接收新的连接,一旦有新的连接请求到来,accept方法就返回一个Channel对象,接着到Channel对象交给Poller去处理
public class Acceptor<U> implements Runnable {
public enum AcceptorState {
NEW, RUNNING, PAUSED, ENDED
}
@Override
public void run() {
int errorDelay = 0;
// Loop until we receive a shutdown command
while (endpoint.isRunning()) {
// Loop if endpoint is paused
while (endpoint.isPaused() && endpoint.isRunning()) {
state = AcceptorState.PAUSED;
try {
Thread.sleep(50);
} catch (InterruptedException e) {
// Ignore
}
}
if (!endpoint.isRunning()) {
break;
}
state = AcceptorState.RUNNING;
try {
//if we have reached max connections, wait
//判断有没有起 过最大连接数据
endpoint.countUpOrAwaitConnection();
// Endpoint might have been paused while waiting for latch
// If that is the case, don't accept new connections
if (endpoint.isPaused()) {
continue;
}
U socket = null;
try {
// Accept the next incoming connection from the server
// socket
//阻塞监听socket
socket = endpoint.serverSocketAccept();
} catch (Exception ioe) {
// We didn't get a socket
//获取Socket失败
endpoint.countDownConnection();
if (endpoint.isRunning()) {
// Introduce delay if necessary
errorDelay = handleExceptionWithDelay(errorDelay);
// re-throw
throw ioe;
} else {
break;
}
}
// Successful accept, reset the error delay
errorDelay = 0;
// Configure the socket
if (endpoint.isRunning() && !endpoint.isPaused()) {
//配置Socket
// setSocketOptions() will hand the socket off to
// an appropriate processor if successful
//将socket交给NioEndpoint处理
if (!endpoint.setSocketOptions(socket)) {
//处理失败就关闭连接
endpoint.closeSocket(socket);
}
} else {
//销毁Socket 关闭连接
endpoint.destroySocket(socket);
}
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
String msg = sm.getString("endpoint.accept.fail");
// APR specific.
// Could push this down but not sure it is worth the trouble.
if (t instanceof Error) {
Error e = (Error) t;
if (e.getError() == 233) {
// Not an error on HP-UX so log as a warning
// so it can be filtered out on that platform
// See bug 50273
log.warn(msg, t);
} else {
log.error(msg, t);
}
} else {
log.error(msg, t);
}
}
}
//状态改为启用
state = AcceptorState.ENDED;
}
org.apache.tomcat.util.net.NioEndpoint
@Override 通过SocketChannel
protected boolean setSocketOptions(SocketChannel socket) {
// Process the connection
try {
// Disable blocking, polling will be used
//设置为非阻塞
socket.configureBlocking(false);
//获取Socket
Socket sock = socket.socket();
//设置Socket属性
socketProperties.setProperties(sock);
//获取Channel
NioChannel channel = null;
//判断栈中是否为空,不为空就出栈一个
if (nioChannels != null) {
channel = nioChannels.pop();
}
if (channel == null) {
//NioChannel如果为空就New一个
SocketBufferHandler bufhandler = new SocketBufferHandler(
socketProperties.getAppReadBufSize(),
socketProperties.getAppWriteBufSize(),
socketProperties.getDirectBuffer());
if (isSSLEnabled()) {
channel = new SecureNioChannel(socket, bufhandler, selectorPool, this);
} else {
//创建NioChannel
channel = new NioChannel(socket, bufhandler);
}
} else {
channel.setIOChannel(socket);
channel.reset();
}
//将NioChannel封装为SocketWrapper
NioSocketWrapper socketWrapper = new NioSocketWrapper(channel, this);
channel.setSocketWrapper(socketWrapper);
socketWrapper.setReadTimeout(getConnectionTimeout());
socketWrapper.setWriteTimeout(getConnectionTimeout());
socketWrapper.setKeepAliveLeft(NioEndpoint.this.getMaxKeepAliveRequests());
socketWrapper.setSecure(isSSLEnabled());
//将SocketWrapper与NioChannel交给poller来注册
poller.register(channel, socketWrapper);
//返回true,不用关闭
return true;
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
try {
log.error(sm.getString("endpoint.socketOptionsError"), t);
} catch (Throwable tt) {
ExceptionUtils.handleThrowable(tt);
}
}
// Tell to close the socket 关闭连接
return false;
}
//关闭连接
@Override
protected void closeSocket(SocketChannel socket) {
countDownConnection();
try {
socket.close();
} catch (IOException ioe) {
if (log.isDebugEnabled()) {
log.debug(sm.getString("endpoint.err.close"), ioe);
}
}
}
//通过Socket设置属性
public void setProperties(Socket socket) throws SocketException{
if (rxBufSize != null)
socket.setReceiveBufferSize(rxBufSize.intValue());
if (txBufSize != null)
socket.setSendBufferSize(txBufSize.intValue());
if (ooBInline !=null)
socket.setOOBInline(ooBInline.booleanValue());
if (soKeepAlive != null)
socket.setKeepAlive(soKeepAlive.booleanValue());
if (performanceConnectionTime != null && performanceLatency != null &&
performanceBandwidth != null)
socket.setPerformancePreferences(
performanceConnectionTime.intValue(),
performanceLatency.intValue(),
performanceBandwidth.intValue());
if (soReuseAddress != null)
socket.setReuseAddress(soReuseAddress.booleanValue());
if (soLingerOn != null && soLingerTime != null)
socket.setSoLinger(soLingerOn.booleanValue(),
soLingerTime.intValue());
if (soTimeout != null && soTimeout.intValue() >= 0)
socket.setSoTimeout(soTimeout.intValue());
if (tcpNoDelay != null)
socket.setTcpNoDelay(tcpNoDelay.booleanValue());
}
- Poller
Poller 的本质是一个 Selector,也跑在单独线程里,Poller在内部维护一个Channel数组,它在一个死循环不断检测Channel的数据就绪状态,一旦有Channel可读,就生成一个SocketProcessor任务对象扔给Executor去处理
org.apache.tomcat.util.net.NioEndpoint
private final SynchronizedQueue<PollerEvent> events = new SynchronizedQueue<>();
public void register(final NioChannel socket, final NioSocketWrapper socketWrapper) {
socketWrapper.interestOps(SelectionKey.OP_READ);//this is what OP_REGISTER turns into.
PollerEvent r = null;
if (eventCache != null) {
r = eventCache.pop();
}
if (r == null) {
r = new PollerEvent(socket, OP_REGISTER);
} else {
r.reset(socket, OP_REGISTER);
}
addEvent(r);
}
private void addEvent(PollerEvent event) {
//添加到队列中
events.offer(event);
//等待唤醒的队列中线程数量为0
if (wakeupCounter.incrementAndGet() == 0) {
//唤醒
selector.wakeup();
}
}
- Executor executor 就是线程池,负责运行 SocketProcessor任务类,SocketProcessor的Run方法会调用Http11Processor来读取和解析 请求数据。Http11Processor是应用 层协议的封装,它会调用容器,获取 得响应,再把响应通过Channel写出。
一个请求
-
- Acceptor->run方法
U socket = null;
try {
// Accept the next incoming connection from the server
// socket
socket = endpoint.serverSocketAccept();
} catch (Exception ioe) {
// We didn't get a socket
endpoint.countDownConnection();
if (endpoint.isRunning()) {
// Introduce delay if necessary
errorDelay = handleExceptionWithDelay(errorDelay);
// re-throw
throw ioe;
} else {
break;
}
}
// Successful accept, reset the error delay
errorDelay = 0;
// Configure the socket
if (endpoint.isRunning() && !endpoint.isPaused()) {
// setSocketOptions() will hand the socket off to
// an appropriate processor if successful
//设置socketChannel
if (!endpoint.setSocketOptions(socket)) {
endpoint.closeSocket(socket);
}
} else {
-
- NioEndpoint->setSocketOptions
@Override
protected boolean setSocketOptions(SocketChannel socket) {
// Process the connection
try {
// Disable blocking, polling will be used
socket.configureBlocking(false);
Socket sock = socket.socket();
socketProperties.setProperties(sock);
NioChannel channel = null;
if (nioChannels != null) {
channel = nioChannels.pop();
}
if (channel == null) {
SocketBufferHandler bufhandler = new SocketBufferHandler(
socketProperties.getAppReadBufSize(),
socketProperties.getAppWriteBufSize(),
socketProperties.getDirectBuffer());
if (isSSLEnabled()) {
channel = new SecureNioChannel(socket, bufhandler, selectorPool, this);
} else {
channel = new NioChannel(socket, bufhandler);
}
} else {
channel.setIOChannel(socket);
channel.reset();
}
//封装为NioSocketWrapper
NioSocketWrapper socketWrapper = new NioSocketWrapper(channel, this);
channel.setSocketWrapper(socketWrapper);
socketWrapper.setReadTimeout(getConnectionTimeout());
socketWrapper.setWriteTimeout(getConnectionTimeout());
socketWrapper.setKeepAliveLeft(NioEndpoint.this.getMaxKeepAliveRequests());
socketWrapper.setSecure(isSSLEnabled());
//注册SocketWrapper和Socket通道
poller.register(channel, socketWrapper);
return true;
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
try {
log.error(sm.getString("endpoint.socketOptionsError"), t);
} catch (Throwable tt) {
ExceptionUtils.handleThrowable(tt);
}
}
// Tell to close the socket
return false;
}
public void register(final NioChannel socket, final NioSocketWrapper socketWrapper) {
socketWrapper.interestOps(SelectionKey.OP_READ);//this is what OP_REGISTER turns into.
PollerEvent r = null;
if (eventCache != null) {
r = eventCache.pop();
}
if (r == null) {
r = new PollerEvent(socket, OP_REGISTER);
} else {
r.reset(socket, OP_REGISTER);
}
//添加事件列表
addEvent(r);
}
private void addEvent(PollerEvent event) {
//添加一队列
events.offer(event);
if (wakeupCounter.incrementAndGet() == 0) {
selector.wakeup();
}
}
- 4 public class Poller implements Runnable {
@Override
public void run() {
// Loop until destroy() is called
while (true) {
boolean hasEvents = false;
try {
if (!close) {
//发现有事件
hasEvents = events();
if (wakeupCounter.getAndSet(-1) > 0) {
// If we are here, means we have other stuff to do
// Do a non blocking select
keyCount = selector.selectNow();
} else {
keyCount = selector.select(selectorTimeout);
}
wakeupCounter.set(0);
}
if (close) {
events();
timeout(0, false);
try {
selector.close();
} catch (IOException ioe) {
log.error(sm.getString("endpoint.nio.selectorCloseFail"), ioe);
}
break;
}
} catch (Throwable x) {
ExceptionUtils.handleThrowable(x);
log.error(sm.getString("endpoint.nio.selectorLoopError"), x);
continue;
}
// Either we timed out or we woke up, process events first
if (keyCount == 0) {
hasEvents = (hasEvents | events());
}
Iterator<SelectionKey> iterator =
keyCount > 0 ? selector.selectedKeys().iterator() : null;
// Walk through the collection of ready keys and dispatch
// any active event.
while (iterator != null && iterator.hasNext()) {
SelectionKey sk = iterator.next();
NioSocketWrapper socketWrapper = (NioSocketWrapper) sk.attachment();
// Attachment may be null if another thread has called
// cancelledKey()
if (socketWrapper == null) {
iterator.remove();
} else {
iterator.remove();
//执行
processKey(sk, socketWrapper);
}
}
// Process timeouts
timeout(keyCount,hasEvents);
}
getStopLatch().countDown();
}
//执行
protected void processKey(SelectionKey sk, NioSocketWrapper socketWrapper) {
try {
if (close) {
cancelledKey(sk, socketWrapper);
} else if (sk.isValid() && socketWrapper != null) {
if (sk.isReadable() || sk.isWritable()) {
if (socketWrapper.getSendfileData() != null) {
processSendfile(sk, socketWrapper, false);
} else {
unreg(sk, socketWrapper, sk.readyOps());
boolean closeSocket = false;
// Read goes before write
if (sk.isReadable()) {
if (socketWrapper.readOperation != null) {
if (!socketWrapper.readOperation.process()) {
closeSocket = true;
}
//执行Socket,读
} else if (!processSocket(socketWrapper, SocketEvent.OPEN_READ, true)) {
closeSocket = true;
}
}
if (!closeSocket && sk.isWritable()) {
if (socketWrapper.writeOperation != null) {
if (!socketWrapper.writeOperation.process()) {
closeSocket = true;
}
} else if (!processSocket(socketWrapper, SocketEvent.OPEN_WRITE, true)) {
closeSocket = true;
}
}
if (closeSocket) {
cancelledKey(sk, socketWrapper);
}
}
}
} else {
// Invalid key
cancelledKey(sk, socketWrapper);
}
} catch (CancelledKeyException ckx) {
cancelledKey(sk, socketWrapper);
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
log.error(sm.getString("endpoint.nio.keyProcessingError"), t);
}
}
public SendfileState processSendfile(SelectionKey sk, NioSocketWrapper socketWrapper,
boolean calledByProcessor) {
NioChannel sc = null;
try {
unreg(sk, socketWrapper, sk.readyOps());
SendfileData sd = socketWrapper.getSendfileData();
if (log.isTraceEnabled()) {
log.trace("Processing send file for: " + sd.fileName);
}
if (sd.fchannel == null) {
// Setup the file channel
File f = new File(sd.fileName);
@SuppressWarnings("resource") // Closed when channel is closed
FileInputStream fis = new FileInputStream(f);
sd.fchannel = fis.getChannel();
}
// Configure output channel
sc = socketWrapper.getSocket();
// TLS/SSL channel is slightly different
WritableByteChannel wc = ((sc instanceof SecureNioChannel) ? sc : sc.getIOChannel());
// We still have data in the buffer
if (sc.getOutboundRemaining() > 0) {
if (sc.flushOutbound()) {
socketWrapper.updateLastWrite();
}
} else {
long written = sd.fchannel.transferTo(sd.pos, sd.length, wc);
if (written > 0) {
sd.pos += written;
sd.length -= written;
socketWrapper.updateLastWrite();
} else {
// Unusual not to be able to transfer any bytes
// Check the length was set correctly
if (sd.fchannel.size() <= sd.pos) {
throw new IOException(sm.getString("endpoint.sendfile.tooMuchData"));
}
}
}
if (sd.length <= 0 && sc.getOutboundRemaining()<=0) {
if (log.isDebugEnabled()) {
log.debug("Send file complete for: " + sd.fileName);
}
socketWrapper.setSendfileData(null);
try {
sd.fchannel.close();
} catch (Exception ignore) {
}
// For calls from outside the Poller, the caller is
// responsible for registering the socket for the
// appropriate event(s) if sendfile completes.
if (!calledByProcessor) {
switch (sd.keepAliveState) {
case NONE: {
if (log.isDebugEnabled()) {
log.debug("Send file connection is being closed");
}
poller.cancelledKey(sk, socketWrapper);
break;
}
case PIPELINED: {
if (log.isDebugEnabled()) {
log.debug("Connection is keep alive, processing pipe-lined data");
}
if (!processSocket(socketWrapper, SocketEvent.OPEN_READ, true)) {
poller.cancelledKey(sk, socketWrapper);
}
break;
}
case OPEN: {
if (log.isDebugEnabled()) {
log.debug("Connection is keep alive, registering back for OP_READ");
}
reg(sk, socketWrapper, SelectionKey.OP_READ);
break;
}
}
}
return SendfileState.DONE;
} else {
if (log.isDebugEnabled()) {
log.debug("OP_WRITE for sendfile: " + sd.fileName);
}
if (calledByProcessor) {
add(socketWrapper, SelectionKey.OP_WRITE);
} else {
reg(sk, socketWrapper, SelectionKey.OP_WRITE);
}
return SendfileState.PENDING;
}
} catch (IOException e) {
if (log.isDebugEnabled()) {
log.debug("Unable to complete sendfile request:", e);
}
if (!calledByProcessor && sc != null) {
poller.cancelledKey(sk, socketWrapper);
}
return SendfileState.ERROR;
} catch (Throwable t) {
log.error(sm.getString("endpoint.sendfile.error"), t);
if (!calledByProcessor && sc != null) {
poller.cancelledKey(sk, socketWrapper);
}
return SendfileState.ERROR;
}
}
-
- AbstractEndpoint
public boolean processSocket(SocketWrapperBase<S> socketWrapper,
SocketEvent event, boolean dispatch) {
try {
if (socketWrapper == null) {
return false;
}
SocketProcessorBase<S> sc = null;
if (processorCache != null) {
sc = processorCache.pop();
}
if (sc == null) {
sc = createSocketProcessor(socketWrapper, event);
} else {
sc.reset(socketWrapper, event);
}
Executor executor = getExecutor();
if (dispatch && executor != null) {
executor.execute(sc);
} else {
sc.run();
}
} catch (RejectedExecutionException ree) {
getLog().warn(sm.getString("endpoint.executor.fail", socketWrapper) , ree);
return false;
} catch (Throwable t) {
ExceptionUtils.handleThrowable(t);
// This means we got an OOM or similar creating a thread, or that
// the pool and its queue are full
getLog().error(sm.getString("endpoint.process.fail"), t);
return false;
}
return true;
}
SocketProcessor
public abstract class SocketProcessorBase<S> implements Runnable {
protected SocketWrapperBase<S> socketWrapper;
protected SocketEvent event;
public SocketProcessorBase(SocketWrapperBase<S> socketWrapper, SocketEvent event) {
reset(socketWrapper, event);
}
public void reset(SocketWrapperBase<S> socketWrapper, SocketEvent event) {
Objects.requireNonNull(event);
this.socketWrapper = socketWrapper;
this.event = event;
}
@Override
public final void run() {
synchronized (socketWrapper) {
// It is possible that processing may be triggered for read and
// write at the same time. The sync above makes sure that processing
// does not occur in parallel. The test below ensures that if the
// first event to be processed results in the socket being closed,
// the subsequent events are not processed.
if (socketWrapper.isClosed()) {
return;
}
doRun();
}
}
protected abstract void doRun();
}
之后的流程 见
https://blog.csdn.net/ko0491/article/details/98222332