一、Reactor模式
1、传统BIO的服务端模式为:
while(true){
//监听到一个连接 则将socket放到一个子线程中处理
socket = accept();
handle(socket)
}
2、基于nio的Reactor模式
Reactor模式是将连接、读、写步骤分离
连接放在一个单独的线程中 读写放在子线程中 可以提高响应的速度 具体的读写操作放在线程池中 即是主从Reactor模式
传统BIO的服务端也可以将读、写、连接的步骤进行多线程的分离
但是基于nio 可以使用Selector监听特定的事件 避免轮询 减少线程的占用,基于buff 可以提高读写的速度。
3、主从Reactor模式demo
主要是调用attach将回调注册
监听到感兴趣的事件返回时 SelectionKey.attachment()获取注册的Runnable执行相应的处理
服务端
package com.reactor.server_sub;
import java.io.IOException;
public class Starter {
public static void main(String[] args) throws IOException {
new Thread(new Reactor(2333)).start();
}
}
package com.reactor.server_sub;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.ServerSocketChannel;
import java.util.Iterator;
import java.util.Set;
public class Reactor implements Runnable {
private final Selector selector;
private final ServerSocketChannel serverSocketChannel;
public Reactor(int port) throws IOException {
selector = Selector.open();
serverSocketChannel = ServerSocketChannel.open();
serverSocketChannel.socket().bind(new InetSocketAddress(port));
serverSocketChannel.configureBlocking(false);
SelectionKey sk = serverSocketChannel.register(selector, SelectionKey.OP_ACCEPT);
sk.attach(new Acceptor(serverSocketChannel));
}
@Override
public void run() {
try {
while (!Thread.interrupted()) {
int count = selector.select();
if (count == 0) {
continue;
}
Set selected = selector.selectedKeys(); //拿到本次select获取的就绪事件
Iterator it = selected.iterator();
while (it.hasNext()) {
//这里进行任务分发
dispatch((SelectionKey) (it.next()));
}
selected.clear();
}
} catch (IOException e) {
e.printStackTrace();
}
}
void dispatch(SelectionKey k) {
//Reactor注册Acceptor处理连接 AsyncHandler中注册AsyncHandler处理读写
Runnable r = (Runnable) (k.attachment());
if (r != null) {
r.run();
}
}
}
package com.reactor.server_sub;
import java.io.IOException;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
//Acceptor只监听连接 读写的监听在子线程中完成 具体的读写由线程池完成
public class Acceptor implements Runnable {
private final ServerSocketChannel serverSocketChannel;
private final int coreNum = Runtime.getRuntime().availableProcessors();
private final Selector[] selectors = new Selector[coreNum];
private int next = 0;
private SubReactor[] reactors = new SubReactor[coreNum];
private Thread[] threads = new Thread[coreNum];
Acceptor(ServerSocketChannel serverSocketChannel) throws IOException {
this.serverSocketChannel = serverSocketChannel;
for (int i = 0; i < coreNum; i++) {
selectors[i] = Selector.open();
reactors[i] = new SubReactor(selectors[i], i);
threads[i] = new Thread(reactors[i]);
threads[i].start();
}
}
@Override
public void run() {
SocketChannel socketChannel;
try {
socketChannel = serverSocketChannel.accept(); // 连接
if (socketChannel != null) {
System.out.println(String.format("收到来自 %s 的连接", socketChannel.getRemoteAddress()));
socketChannel.configureBlocking(false);
reactors[next].registering(true); // 注意一个selector在select时是无法注册新事件的,因此这里要先暂停下select方法触发的程序段,下面的weakup和这里的setRestart都是做这个事情的,具体参考SubReactor里的run方法
selectors[next].wakeup();
SelectionKey selectionKey = socketChannel.register(selectors[next],
SelectionKey.OP_READ);
selectors[next].wakeup(); // 使一個阻塞住的selector操作立即返回
reactors[next].registering(false); // 本次事件注册完成后,需要再次触发select的执行,因此这里Restart要在设置回false(具体参考SubReactor里的run方法)
selectionKey.attach(new AsyncHandler(socketChannel, selectors[next], next));
if (++next == selectors.length) {
next = 0;
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
package com.reactor.server_sub;
import java.io.IOException;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.util.Iterator;
import java.util.Set;
public class SubReactor implements Runnable {
private final Selector selector;
private boolean register = false;
private int num; //Acceptor初始化SubReactor时的下标
SubReactor(Selector selector, int num) {
this.selector = selector;
this.num = num;
}
@Override
public void run() {
while (!Thread.interrupted()) {
System.out.println(String.format("%d号SubReactor等待注册中...", num));
while (!Thread.interrupted() && !register) {
try {
if (selector.select() == 0) {
continue;
}
} catch (IOException e) {
e.printStackTrace();
}
Set<SelectionKey> selectedKeys = selector.selectedKeys();
Iterator<SelectionKey> it = selectedKeys.iterator();
while (it.hasNext()) {
dispatch(it.next());
it.remove();
}
}
}
}
private void dispatch(SelectionKey key) {
Runnable r = (Runnable) (key.attachment());
if (r != null) {
r.run();
}
}
void registering(boolean register) {
this.register = register;
}
}
package com.reactor.server_sub;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.SocketChannel;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class AsyncHandler implements Runnable {
private final Selector selector;
private final SelectionKey selectionKey;
private final SocketChannel socketChannel;
private ByteBuffer readBuffer = ByteBuffer.allocate(1024);
private ByteBuffer sendBuffer = ByteBuffer.allocate(2048);
private final static int READ = 0; //读取就绪
private final static int SEND = 1; //响应就绪
private final static int PROCESSING = 2; //处理中
private int status = READ;
private int num;
private static final ExecutorService workers = Executors.newFixedThreadPool(5);
AsyncHandler(SocketChannel socketChannel, Selector selector, int num) throws IOException {
this.num = num; //为了区分Handler被哪个从反应堆触发执行做的标记
this.socketChannel = socketChannel;
this.socketChannel.configureBlocking(false);
//0表示不注册任何事件 后续由interestOps添加 清除作用??
selectionKey = socketChannel.register(selector, 0);
selectionKey.attach(this);
selectionKey.interestOps(SelectionKey.OP_READ);
this.selector = selector;
this.selector.wakeup();
}
@Override
public void run() {
switch (status) {
case READ:
read();
break;
case SEND:
send();
break;
default:
}
}
private void read() {
if (selectionKey.isValid()) {
try {
readBuffer.clear();
int count = socketChannel.read(readBuffer);
if (count > 0) {
status = PROCESSING;
workers.execute(this::readWorker);
} else {
selectionKey.cancel();
socketChannel.close();
System.out.println(String.format("%d号SubReactor触发:read时-------连接关闭", num));
}
} catch (IOException e) {
System.err.println("处理read业务时发生异常!异常信息:" + e.getMessage());
selectionKey.cancel();
try {
socketChannel.close();
} catch (IOException e1) {
System.err.println("处理read业务关闭通道时发生异常!异常信息:" + e.getMessage());
}
}
}
}
void send() {
if (selectionKey.isValid()) {
status = PROCESSING;
workers.execute(this::sendWorker);
selectionKey.interestOps(SelectionKey.OP_READ);
}
}
private void readWorker() {
try {
Thread.sleep(5000L); //性能瓶颈模拟
} catch (InterruptedException e) {
e.printStackTrace();
}
try {
System.out.println(String.format("%d号SubReactor触发:收到来自客户端%s的消息: %s",
num, socketChannel.getRemoteAddress(), new String(readBuffer.array())));
} catch (IOException e) {
System.err.println("异步处理read业务时发生异常!异常信息:" + e.getMessage());
}
status = SEND;
selectionKey.interestOps(SelectionKey.OP_WRITE);
this.selector.wakeup();
}
private void sendWorker() {
try {
sendBuffer.clear();
sendBuffer.put(String.format("%d号SubReactor触发:我收到来自%s的信息辣:%s, 200ok;",
num, socketChannel.getRemoteAddress(),
new String(readBuffer.array())).getBytes());
sendBuffer.flip();
int count = socketChannel.write(sendBuffer);
if (count < 0) {
//同上,write场景下,取到-1,也意味着客户端断开连接
selectionKey.cancel();
socketChannel.close();
System.out.println(String.format("%d号SubReactor触发:send时-------连接关闭", num));
}
status = READ;
} catch (IOException e) {
System.err.println("异步处理send业务时发生异常!异常信息:" + e.getMessage());
selectionKey.cancel();
try {
socketChannel.close();
} catch (IOException e1) {
System.err.println("异步处理send业务关闭通道时发生异常!异常信息:" + e.getMessage());
}
}
}
}
二、netty
netty是基于java nio Reactor模式的框架
1、demo
package com.netty.server;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.IntBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.Selector;
import java.nio.channels.ServerSocketChannel;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioDatagramChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.channel.group.ChannelGroup;
import io.netty.channel.group.DefaultChannelGroup;
import io.netty.util.concurrent.GlobalEventExecutor;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.string.StringDecoder;
import io.netty.handler.codec.string.StringEncoder;
public class NettyServer {
private int port;
public NettyServer(int port) {
this.port = port;
try {
Selector.open();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public void run() {
EventLoopGroup bossGroup = new NioEventLoopGroup();
EventLoopGroup workerGroup = new NioEventLoopGroup();
ServerBootstrap bootstrap = new ServerBootstrap();
bootstrap.group(bossGroup, workerGroup);
bootstrap.channel(NioServerSocketChannel.class);
bootstrap.childHandler(new ServerIniterHandler());
//BACKLOG用于构造服务端套接字ServerSocket对象,
// 标识当服务器请求处理线程全满时,用于临时存放已完成三次握手的请求的队列的最大长度
bootstrap.option(ChannelOption.SO_BACKLOG, 1024);
//是否启用心跳保活机制
bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
try {
//绑定服务端口监听
Channel channel = bootstrap.bind(port).sync().channel();
System.out.println("server run in port " + port);
//服务器关闭监听
/*channel.closeFuture().sync()实际是如何工作:
channel.closeFuture()不做任何操作,只是简单的返回channel对象中的closeFuture对象,对于每个Channel对象,都会有唯一的一个CloseFuture,用来表示关闭的Future,
所有执行channel.closeFuture().sync()就是执行的CloseFuturn的sync方法,从上面的解释可以知道,这步是会将当前线程阻塞在CloseFuture上*/
channel.closeFuture().sync();
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
//关闭事件流组
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
public static void main(String[] args) {
FileChannel che;
NioSocketChannel nioSocketChannel;
NioServerSocketChannel nServerSocketChannel;
NioDatagramChannel nDatagramChannel;
new NettyServer(8899).run();
}
}
class ServerIniterHandler extends ChannelInitializer<SocketChannel> {
@Override
protected void initChannel(SocketChannel socketChannel) throws Exception {
//管道注册handler
ChannelPipeline pipeline = socketChannel.pipeline();
//编码通道处理
pipeline.addLast("decode", new StringDecoder());
//转码通道处理
pipeline.addLast("encode", new StringEncoder());
//聊天服务通道处理
pipeline.addLast("chat", new ServerHandler());
}
}
class ServerHandler extends SimpleChannelInboundHandler<String> {
public static final ChannelGroup group = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE);
/**
* 读取消息通道
*/
@Override
protected void channelRead0(ChannelHandlerContext context, String s)
throws Exception {
Channel channel = context.channel();
//当有用户发送消息的时候,对其他的用户发送消息
for (Channel ch : group) {
if (ch == channel) {
ch.writeAndFlush("recv: " + s + "\n");
} else {
ch.writeAndFlush("[" + channel.remoteAddress() + "]: " + s + "\n");
}
}
System.out.println("[" + channel.remoteAddress() + "]: " + s + "\n");
}
/**
* 处理新加的消息通道
*/
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
Channel channel = ctx.channel();
for (Channel ch : group) {
if (ch == channel) {
ch.writeAndFlush("[" + channel.remoteAddress() + "] coming");
}
}
group.add(channel);
}
/**
* 处理退出消息通道
*/
@Override
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
Channel channel = ctx.channel();
for (Channel ch : group) {
if (ch == channel) {
ch.writeAndFlush("[" + channel.remoteAddress() + "] leaving");
}
}
group.remove(channel);
}
/**
* 在建立连接时发送消息
*/
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
Channel channel = ctx.channel();
boolean active = channel.isActive();
if (active) {
System.out.println("[" + channel.remoteAddress() + "] is online");
} else {
System.out.println("[" + channel.remoteAddress() + "] is offline");
}
ctx.writeAndFlush("[server]: welcome");
}
/**
* 退出时发送消息
*/
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
Channel channel = ctx.channel();
if (!channel.isActive()) {
System.out.println("[" + channel.remoteAddress() + "] is offline");
} else {
System.out.println("[" + channel.remoteAddress() + "] is online");
}
}
/**
* 异常捕获
*/
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable e) throws Exception {
Channel channel = ctx.channel();
System.out.println("[" + channel.remoteAddress() + "] leave the room");
ctx.close().sync();
}
}
package com.netty.client;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.Channel;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.http.HttpClientCodec;
import io.netty.handler.codec.string.StringDecoder;
import io.netty.handler.codec.string.StringEncoder;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
public class NettyClient {
private String ip;
private int port;
private boolean stop = false;
public NettyClient(String ip, int port) {
this.ip = ip;
this.port = port;
}
public void run() throws IOException {
EventLoopGroup workerGroup = new NioEventLoopGroup();
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(workerGroup);
bootstrap.channel(NioSocketChannel.class);
bootstrap.handler(new ClientIniterHandler());
try {
Channel channel = bootstrap.connect(ip, port).sync().channel();
while (true) {
//向服务端发送内容
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
String content = reader.readLine();
if (content!=null && !content.isEmpty()) {
if (content.equals("q")) {
System.exit(1);
}
channel.writeAndFlush(content);
}
}
} catch (InterruptedException e) {
e.printStackTrace();
System.exit(1);
} finally {
workerGroup.shutdownGracefully();
}
}
public static void main(String[] args) throws Exception {
new NettyClient("127.0.0.1", 8899).run();
}
}
class ClientIniterHandler extends ChannelInitializer<SocketChannel> {
@Override
protected void initChannel(SocketChannel socketChannel) throws Exception {
ChannelPipeline pipeline = socketChannel.pipeline();
pipeline.addLast("stringD", new StringDecoder());
pipeline.addLast("stringC", new StringEncoder());
pipeline.addLast("http", new HttpClientCodec());
pipeline.addLast("chat", new ClientHandler());
}
}
class ClientHandler extends SimpleChannelInboundHandler<String> {
@Override
protected void channelRead0(ChannelHandlerContext ctx, String s) throws Exception {
System.out.println("client recv==>" + s);
}
}
2、netty整体框架
方法 | 说明 |
---|---|
Channel | I/O操作的对象 (包括读、写、连接等) |
ChannelPipeline | 用于维护ChannelInboundHandler/ChannelOutboundHandler队列 接收/发送事件会经过队列中的Handler |
ChannelHandlerContext | 它的多个实例组成了一个双向链表,并由ChannelPipeline负责维护 |
ChannelHandler | 真正实现服务器通讯协议和业务功能功能的地方 子类有ChannelInboundHandler/ChannelOutboundHandler |
EventLoopGroup | 在Channel上执行I/O的线程组 |
EventExecutorGroup | 执行ChannelHandlerContext和ChannelHandler中回调方法的线程 |
EventLoopGroup会执行ChannelHandler的方法 应该避免耗时操作
2.0 netty框架与Reactor模式
Reactor | Netty |
---|---|
Main Reactor | ServerBootstrap.group的parentGroup |
Sub Reactor | ServerBootstrap.group的childGroup |
Acceptor | ServerBootStrapAcceptor(ServerBootstrap.bind实现中) |
Handler | 具体的ChannelHandler接口实现 |
2.1 Channel
主要有三类方法
辅助方法:pipeline()
outbound方法:bind(SocketAddress) / connect(SocketAddress) / write(Object) / writeAndFlush()
inbound方法:read()
2.2 netty线程体系
netty的线程体系
ThreadPoolExecutor
EventExecutorGroup
netty线程体系基类为EventExecutorGroup EventExecutorGroup继承自jdk的ScheduleExecutorService
netty主要实现了两类线程池SingleThreadEventLoop MultithreadEventLoopGroup
-
SingleThreadEventLoop
类似SingleThreadExecutor 一个线程 有一个有序任务队列 可以使任务按照顺序执行
SingleThreadEventExecutor继承自Executor 需要实现execute方法
如果是当前线程 则直接加入队列执行
否则调用startThread()启动一个线程执行 新线程在DefaultEventExecutor中创建 默认为DefaultThreadFactory
https://blog.csdn.net/prestigeding/article/details/64443479 -
MultithreadEventLoopGroup
MultithreadEventLoopGroup实际包装了一组SingleThreadEventLoop -
ThreadPoolExecutor与MultithreadEventLoopGroup
ThreadPoolExecutor的线程公用一个阻塞任务队列 如果出现大量锁碰撞 会导致性能下降,且任务的顺序无法保证
MultithreadEventLoopGroup只是维护多个线程 每个线程有独立的任务队列 避免了阻塞 也可以保证任务按顺序执行 减少了锁碰撞和线程切换
保证任务按顺序执行比较符合netty的实际功能
NioEventLoopGroup在MultithreadEventLoopGroup的基础上封装了nio 它会处理Selector的事件
2.3 ChannelHandler
是netty为开发者提供的实现定制业务的主要接口 有一系列的实现:
-
单一功能性的Handler实现:
IP过滤,实现IP黑白名单功能。
写日志。
SSL实现。
超时处理。 -
编码和序列化格式的支持:
base64
gzip,snappy压缩算法
protoBuf
String
自定义数据包格式。 -
常见应用层协议:
http/https(后面的版本还支持http2)
haprox
ChannelInboundHandler是处理inbound事件的接口,ChannelOutboundHandler是处理outbound事件的接口
ChannelInboundHandlerAdapter和ChannelOutboundHandlerAdapter 实现与I/O无关的抽象实现 CombinedChannelDuplexhandler同时处理读写
- ChannelHandler的共享和独占模式
ChannelHandler的实现类添加@ChannelHandler.Sharable注解 即实现了共享模式 默认是独占模式
共享模式可以使ChannelHandler同时加入多个pipeline上
如果一次处理不完(如tcp收到的数据不完整) 则不能使用共享模式 (类似java的)
- ChannelPipeline拦截过滤器
InboundHandler只处理读 和OutBoundHandler只处理写
ChannelPipeline p = ...;
p.addLast("1", new InboundHandlerA());
p.addLast("2", new InboundHandlerB());
p.addLast("3", new OutboundHandlerA());
p.addLast("4", new OutboundHandlerB());
p.addLast("5", new InboundOutboundHandlerX());
输入: 1 -> 2 -> 5
输出: 5 -> 4 -> 3
2.4 特殊的Handler
- ChannelInitializer用于将Channel注册到EventLoop
3、补充
3.1 ChannelFuture
Channel的读、写、监听等操作会返回ChannelFuture 继承自Future
监听操作是否完成 应该使用addListeners 使用await()会阻塞造成死锁
3.2 Promise接口
Promise继承Future
实现类DefaultChannelPromise持有Channel和EventExecutor 实现了addListener、sync等方法
3.3 ReplayingDecoder
所有的数据已经接收完毕再处理,不用判断接收数据的长度
3.4 ByteBuf
可以实现缓冲池或者非缓冲池类型
也可以将数据放在java堆或者native堆或者CompositeByteBuf
参考:
https://www.cnblogs.com/winner-0715/p/8733787.html
https://www.cnblogs.com/crazymakercircle/p/9833847.html
https://www.jianshu.com/c/dfa503d2feac
https://www.cnblogs.com/brandonli/tag/netty/