Netty框架的工作原理
1. 基本过程描述如下
1)初始化创建 2 个 NioEventLoopGroup:其中 boosGroup 用于 Accetpt 连接建立事件并分发请求,workerGroup 用于处理 I/O 读写事件和业务逻辑。
2)基于 ServerBootstrap(服务端启动引导类):配置 EventLoopGroup、Channel 类型,连接参数、配置入站、出站事件 handler。
3)绑定端口:开始工作。
Server 端包含 1 个 Boss NioEventLoopGroup 和 1 个 Worker NioEventLoopGroup。
NioEventLoopGroup 相当于 1 个事件循环组,这个组里包含多个事件循环 NioEventLoop,每个 NioEventLoop 包含 1 个 Selector 和 1 个事件循环线程。
每个 Boss NioEventLoop 循环执行的任务包含 3 步:
1)轮询 Accept 事件;
2)处理 Accept I/O 事件,与 Client 建立连接,生成 NioSocketChannel,并将 NioSocketChannel 注册到某个 Worker NioEventLoop 的 Selector 上;
3)处理任务队列中的任务,runAllTasks。任务队列中的任务包括用户调用 eventloop.execute 或 schedule 执行的任务,或者其他线程提交到该 eventloop 的任务。
每个 Worker NioEventLoop 循环执行的任务包含 3 步:
1)轮询 Read、Write 事件;
2)处理 I/O 事件,即 Read、Write 事件,在 NioSocketChannel 可读、可写事件发生时进行处理;
3)处理任务队列中的任务,runAllTasks。
2. 高性能netty应用
实际工作中为处理业务逻辑的Handler定义单独的Group线程组DefaultEventExecutorGroup(netty提供),所有业务处理都交给DefaultEventExecutorGroup异步处理,进而提升workerGroup的处理能力,实现高性能;(应用netty实现的开源框架都是应用多worker线程组模式实现的高性能)
package com.cc.netty.best.multi.woker;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.util.concurrent.DefaultEventExecutorGroup;
public class Server {
public static void main(String[] args) throws InterruptedException {
//1. 创建三个线程组: 一个用于进行网络连接的接收,另一个用于网络通信的读写,最后一个用于业务逻辑处理
EventLoopGroup bossGroup = new NioEventLoopGroup();
EventLoopGroup workGroup = new NioEventLoopGroup();
//构建业务逻辑处理Group
DefaultEventExecutorGroup defaultEventExecutorGroup = new DefaultEventExecutorGroup(4,
new ThreadFactory() {
private AtomicInteger threadIndex = new AtomicInteger(0);
@Override
public Thread newThread(Runnable r) {
return new Thread(r, "NettyServerCodecThread_" + this.threadIndex.incrementAndGet());
}
});
//2 通过辅助类去构造server/client
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 3000)
.option(ChannelOption.SO_BACKLOG, 1024)
.childOption(ChannelOption.TCP_NODELAY, true)
.childOption(ChannelOption.SO_RCVBUF, 1024 * 32)
.childOption(ChannelOption.SO_SNDBUF, 1024 * 32)
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(defaultEventExecutorGroup, new ServerHandler()); //应用defaultEventExecutorGroup处理业务逻辑
}
});
//服务器端绑定端口并启动服务
ChannelFuture cf = b.bind(8765).sync();
//使用channel级别的监听close端口 阻塞的方式
cf.channel().closeFuture().sync();
bossGroup.shutdownGracefully();
workGroup.shutdownGracefully();
}
}
package com.cc.netty.best.multi.woker;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
public class ServerHandler extends ChannelInboundHandlerAdapter {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
System.err.println("server channel active..");
}
/**
* 真正的数据最终会走到这个方法进行处理
*/
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
//读取客户端的数据
ByteBuf buf = (ByteBuf) msg;
byte[] request = new byte[buf.readableBytes()];
buf.readBytes(request);
String requestBody = new String(request, "utf-8");
System.err.println("Server: " + requestBody);
//返回响应数据
String responseBody = "返回响应数据" + requestBody;
ctx.writeAndFlush(Unpooled.copiedBuffer(responseBody.getBytes()));
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
ctx.fireExceptionCaught(cause);
}
}
package com.cc.netty.best.multi.woker;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
import io.netty.bootstrap.Bootstrap;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.concurrent.DefaultEventExecutorGroup;
public class Client {
public static void main(String[] args) throws InterruptedException {
//1. 创建线程组: 只需要一个线程组用于实际处理(网络通信的读写)
EventLoopGroup workGroup = new NioEventLoopGroup();
//构建业务逻辑处理Group
DefaultEventExecutorGroup defaultEventExecutorGroup = new DefaultEventExecutorGroup(4,
new ThreadFactory() {
private AtomicInteger threadIndex = new AtomicInteger(0);
@Override
public Thread newThread(Runnable r) {
return new Thread(r, "NettyClientCodecThread_" + this.threadIndex.incrementAndGet());
}
});
//2 通过辅助类去构造server/client
Bootstrap b = new Bootstrap();
b.group(workGroup)
.channel(NioSocketChannel.class)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 3000)
.option(ChannelOption.SO_RCVBUF, 1024 * 32)
.option(ChannelOption.SO_SNDBUF, 1024 * 32)
.handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
//ch.pipeline().addLast(new ClientHandler()); //1
ch.pipeline().addLast(defaultEventExecutorGroup, new ClientHandler());
}
});
//服务器端绑定端口并启动服务
ChannelFuture cf = b.connect("127.0.0.1", 8765).syncUninterruptibly();
//使用channel级别的监听close端口 阻塞的方式
cf.channel().writeAndFlush(Unpooled.copiedBuffer("hello netty!".getBytes()));
Thread.sleep(1000);
cf.channel().writeAndFlush(Unpooled.copiedBuffer("hello netty!".getBytes()));
cf.channel().closeFuture().sync();
workGroup.shutdownGracefully();
}
}
package com.cc.netty.best.multi.woker;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.util.ReferenceCountUtil;
public class ClientHandler extends ChannelInboundHandlerAdapter {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
System.err.println("client channel active..");
}
/**
* 真正的数据最终会走到这个方法进行处理
*/
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
try {
ByteBuf resp = (ByteBuf)msg;
byte[] responsebody = new byte[resp.readableBytes()];
resp.readBytes(responsebody);
String str = new String(responsebody, "utf-8");
System.err.println("Client: " + str);
} finally {
ReferenceCountUtil.release(msg);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
ctx.fireExceptionCaught(cause);
}
}