Java基础之《netty(20)—netty心跳机制》

一、实例要求

1、编写一个netty心跳检测机制案例,当服务器超过3秒没有读时,就提示读空闲
2、当服务器超过5秒没有写操作时,就提示写空闲
3、当服务器超过7秒没有读或者写操作时,就提示读写空闲

二、服务端

1、MyServer.java

package netty.heartbeat;

import java.util.concurrent.TimeUnit;

import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler;
import io.netty.handler.timeout.IdleStateHandler;

public class MyServer {

	public static void main(String[] args) {
		//创建两个线程组
		EventLoopGroup bossGroup = new NioEventLoopGroup(1);
		EventLoopGroup workerGroup = new NioEventLoopGroup(); //默认cpu核数*2
		
		try {
			ServerBootstrap server = new ServerBootstrap();
			server.group(bossGroup, workerGroup)
				.channel(NioServerSocketChannel.class)
				//在boosGroup增加一个日志处理器
				.handler(new LoggingHandler(LogLevel.INFO))
				.childHandler(new ChannelInitializer<SocketChannel>() {

					@Override
					protected void initChannel(SocketChannel ch) throws Exception {
						ChannelPipeline pipeline = ch.pipeline();
						//加入一个netty提供的IdleStateHandler
						/**
						 * 说明
						 * 1. IdleStateHandler是netty提供的处理空闲状态的处理器
						 * 2. public IdleStateHandler(long readerIdleTime, long writerIdleTime, long allIdleTime, TimeUnit unit)
						 * 3. 参数
						 * long readerIdleTime:表示多长时间没有读了,就会发送一个心跳检测包,检测是否还是连接的状态
						 * long writerIdleTime:表示多长时间没有写了,也会发送一个心跳检测包
						 * long allIdleTime:表示多长时间既没有读也没有写了,也会发送一个心跳检测包
						 * 4. 文档说明
						 * Triggers an {@link IdleStateEvent} when a {@link Channel} has not performed read, write, or both operation for a while.
						 * 5. 当IdleStateEvent触发后,就会传递给管道的下一个handler
						 * 6. 通过调用(触发)下一个handler的userEventTriggered,在该方法中去处理IdleStateEvent事件
						 * 
						 */
						pipeline.addLast(new IdleStateHandler(3, 5, 7, TimeUnit.SECONDS));
						
						//加入一个对空闲检测进一步处理的自定义handler
						pipeline.addLast(new MyServerHandler());
					}
				});
			
			//启动服务器
			ChannelFuture cf = server.bind(7000).sync();
			cf.channel().closeFuture().sync();
			
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			bossGroup.shutdownGracefully();
			workerGroup.shutdownGracefully();
		}
	}
}

2、MyServerHandler.java

package netty.heartbeat;

import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.timeout.IdleStateEvent;

public class MyServerHandler extends ChannelInboundHandlerAdapter {

	/**
	 * ctx:上下文
	 * evt:事件
	 */
	@Override
	public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
		
		if (evt instanceof IdleStateEvent) {
			
			//将evt向下转型 IdleStateEvent
			IdleStateEvent event = (IdleStateEvent) evt;
			
			String eventType = null;
			
			switch (event.state()) {
				case READER_IDLE:
					eventType = "读空闲";
					break;
				case WRITER_IDLE:
					eventType = "写空闲";
					break;
				case ALL_IDLE:
					eventType = "读写空闲";
					break;
			}
			
			System.out.println(ctx.channel().remoteAddress() + "超时事件:" + eventType);
			System.out.println("服务器做相应的处理......");
		}
	}
}

三、测试

用群聊的客户端连接即可。

15:17:11.383 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using SLF4J as the default logging framework
15:17:11.393 [main] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 16
15:17:11.424 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024
15:17:11.425 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096
15:17:11.442 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false
15:17:11.442 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512
15:17:11.472 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: Windows
15:17:11.476 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false
15:17:11.476 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 8
15:17:11.479 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available
15:17:11.480 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available
15:17:11.480 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available
15:17:11.481 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: available
15:17:11.483 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true
15:17:11.483 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable prior to Java9
15:17:11.483 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.<init>(long, int): available
15:17:11.483 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available
15:17:11.485 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: C:\Users\sjcui\AppData\Local\Temp (java.io.tmpdir)
15:17:11.485 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model)
15:17:11.487 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: 3767533568 bytes
15:17:11.487 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1
15:17:11.489 [main] DEBUG io.netty.util.internal.CleanerJava6 - java.nio.ByteBuffer.cleaner(): available
15:17:11.490 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false
15:17:11.505 [main] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available
15:17:12.008 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.processId: 7028 (auto-detected)
15:17:12.010 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv4Stack: false
15:17:12.010 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv6Addresses: false
15:17:12.396 [main] DEBUG io.netty.util.NetUtil - Loopback interface: lo (Software Loopback Interface 1, 127.0.0.1)
15:17:12.397 [main] DEBUG io.netty.util.NetUtil - Failed to get SOMAXCONN from sysctl and file \proc\sys\net\core\somaxconn. Default: 200
15:17:12.780 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.machineId: 00:50:56:ff:fe:c0:00:01 (auto-detected)
15:17:12.794 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.level: simple
15:17:12.794 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.targetRecords: 4
15:17:12.826 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numHeapArenas: 16
15:17:12.826 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numDirectArenas: 16
15:17:12.826 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.pageSize: 8192
15:17:12.826 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxOrder: 11
15:17:12.826 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.chunkSize: 16777216
15:17:12.826 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.tinyCacheSize: 512
15:17:12.826 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.smallCacheSize: 256
15:17:12.826 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.normalCacheSize: 64
15:17:12.826 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedBufferCapacity: 32768
15:17:12.827 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimInterval: 8192
15:17:12.827 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimIntervalMillis: 0
15:17:12.827 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.useCacheForAllThreads: true
15:17:12.827 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedByteBuffersPerChunk: 1023
15:17:12.839 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.allocator.type: pooled
15:17:12.839 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.threadLocalDirectBufferSize: 0
15:17:12.839 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.maxThreadLocalCharBufferSize: 16384
15:17:12.871 [nioEventLoopGroup-2-1] INFO io.netty.handler.logging.LoggingHandler - [id: 0x2865274b] REGISTERED
15:17:12.875 [nioEventLoopGroup-2-1] INFO io.netty.handler.logging.LoggingHandler - [id: 0x2865274b] BIND: 0.0.0.0/0.0.0.0:7000
15:17:12.879 [nioEventLoopGroup-2-1] INFO io.netty.handler.logging.LoggingHandler - [id: 0x2865274b, L:/0:0:0:0:0:0:0:0:7000] ACTIVE
15:23:48.925 [nioEventLoopGroup-2-1] INFO io.netty.handler.logging.LoggingHandler - [id: 0x2865274b, L:/0:0:0:0:0:0:0:0:7000] READ: [id: 0xbccd3289, L:/127.0.0.1:7000 - R:/127.0.0.1:51818]
15:23:48.927 [nioEventLoopGroup-2-1] INFO io.netty.handler.logging.LoggingHandler - [id: 0x2865274b, L:/0:0:0:0:0:0:0:0:7000] READ COMPLETE
/127.0.0.1:51818超时事件:读空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:写空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:读空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:读写空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:读空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:写空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:读空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:读写空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:写空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:读空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:读空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:写空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:读写空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:读空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:读空闲
服务器做相应的处理......
/127.0.0.1:51818超时事件:写空闲
服务器做相应的处理......
15:24:15.372 [nioEventLoopGroup-3-1] DEBUG io.netty.util.Recycler - -Dio.netty.recycler.maxCapacityPerThread: 4096
15:24:15.373 [nioEventLoopGroup-3-1] DEBUG io.netty.util.Recycler - -Dio.netty.recycler.maxSharedCapacityFactor: 2
15:24:15.373 [nioEventLoopGroup-3-1] DEBUG io.netty.util.Recycler - -Dio.netty.recycler.linkCapacity: 16
15:24:15.373 [nioEventLoopGroup-3-1] DEBUG io.netty.util.Recycler - -Dio.netty.recycler.ratio: 8
15:24:15.382 [nioEventLoopGroup-3-1] DEBUG io.netty.buffer.AbstractByteBuf - -Dio.netty.buffer.checkAccessible: true
15:24:15.382 [nioEventLoopGroup-3-1] DEBUG io.netty.buffer.AbstractByteBuf - -Dio.netty.buffer.checkBounds: true
15:24:15.384 [nioEventLoopGroup-3-1] DEBUG io.netty.util.ResourceLeakDetectorFactory - Loaded default ResourceLeakDetector: io.netty.util.ResourceLeakDetector@50d10e68
15:24:15.390 [nioEventLoopGroup-3-1] WARN io.netty.channel.DefaultChannelPipeline - An exceptionCaught() event was fired, and it reached at the tail of the pipeline. It usually means the last handler in the pipeline did not handle the exception.
java.io.IOException: 远程主机强迫关闭了一个现有的连接。
	at sun.nio.ch.SocketDispatcher.read0(Native Method)
	at sun.nio.ch.SocketDispatcher.read(SocketDispatcher.java:43)
	at sun.nio.ch.IOUtil.readIntoNativeBuffer(IOUtil.java:223)
	at sun.nio.ch.IOUtil.read(IOUtil.java:192)
	at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:378)
	at io.netty.buffer.PooledByteBuf.setBytes(PooledByteBuf.java:247)
	at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:1140)
	at io.netty.channel.socket.nio.NioSocketChannel.doReadBytes(NioSocketChannel.java:347)
	at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:148)
	at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:697)
	at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:632)
	at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:549)
	at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:511)
	at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:918)
	at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
	at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
	at java.lang.Thread.run(Thread.java:748)

四、要点

1、netty日志处理器
LoggingHandler(LogLevel.INFO)

2、netty空闲检测处理器
IdleStateHandler

3、说明
1) IdleStateHandler是netty提供的处理空闲状态的处理器
2) public IdleStateHandler(long readerIdleTime, long writerIdleTime, long allIdleTime, TimeUnit unit)
3) 参数
long readerIdleTime:表示多长时间没有读了,就会发送一个心跳检测包,检测是否还是连接的状态
long writerIdleTime:表示多长时间没有写了,也会发送一个心跳检测包
long allIdleTime:表示多长时间既没有读也没有写了,也会发送一个心跳检测包
4) 文档说明
Triggers an {@link IdleStateEvent} when a {@link Channel} has not performed read, write, or both operation for a while.
5) 当IdleStateEvent触发后,就会传递给管道的下一个handler
6) 通过调用(触发)下一个handler的userEventTriggered,在该方法中去处理IdleStateEvent事件
 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Netty 服务端实现心跳机制,可以使用 `IdleStateHandler` 处理器来检测连接的空闲状态并触发相应的事件。下面是一个示例代码: ```java import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.EventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.timeout.IdleStateEvent; import io.netty.handler.timeout.IdleStateHandler; import java.util.concurrent.TimeUnit; public class HeartbeatServer { private final int port; public HeartbeatServer(int port) { this.port = port; } public void start() throws InterruptedException { EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(NioServerSocketChannel.class) .childHandler(new ChannelInitializer<NioSocketChannel>() { @Override protected void initChannel(NioSocketChannel ch) { // 添加 IdleStateHandler 处理器 ch.pipeline().addLast(new IdleStateHandler(0, 0, 10, TimeUnit.SECONDS)); // 添加自定义的处理器 ch.pipeline().addLast(new HeartbeatHandler()); } }) .option(ChannelOption.SO_BACKLOG, 128) .childOption(ChannelOption.SO_KEEPALIVE, true); ChannelFuture future = b.bind(port).sync(); future.channel().closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } } public static void main(String[] args) throws InterruptedException { int port = 8080; HeartbeatServer server = new HeartbeatServer(port); server.start(); } } ``` 在上述示例代码中,我们添加了一个 `IdleStateHandler` 处理器到 ChannelPipeline 中。该处理器有三个参数:readerIdleTime、writerIdleTime 和 allIdleTime。这些参数分别表示读空闲时间、写空闲时间和读写空闲时间。在本例中,我们将读写空闲时间设置为 10 秒。 当连接的读或写操作空闲超过指定的时间时,`IdleStateHandler` 会触发一个 `IdleStateEvent` 事件。你可以在自定义的处理器中重写 `userEventTriggered` 方法来处理这个事件。在这个方法中,你可以编写发送心跳响应的逻辑。 需要注意的是,上述示例中的 `HeartbeatHandler` 是一个自定义的处理器,你需要根据你的业务逻辑来实现该处理器。该处理器负责接收客户端的心跳请求,并发送心跳响应。 希望这个示例对你有帮助!如果有任何其他问题,请随时提问。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值