netty-对象池实现Recycler用法测试

2017年写这篇文章的时候,关于Netty对象池Recylcer.java源码解读的文章还很少,但当时由于时间匆忙,仅仅给出了Recycler使用的示例代码,但是没有做深入的源码解读。
今天(2019-02-18)回头过来整理的时候,发现网上一下子涌现出了许多篇介绍Recycler.java的文章,并且,已经有介绍的比较详细的文章,所以本人已经不打算进一步分析源码了。此处暂且列出几篇优秀的博文供大家查阅。
1. Netty之Recycler
2. netty源码分析4 - Recycler对象池的设计
但是网站的这些文章,虽然已经详细分析了Recycler.java。但是却没有一张个人认为拿得出手的Recycler图示。所以,本人仅仅提供一张更加详细的Recycler内部结构图,希望这张图能够帮助有兴趣的朋友们深入的理解这个类。
Recycler结构简图:

Recycler结构图


Recycler用法示例

import org.junit.Test;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;

public class RecyclerTest01 {

    private static AtomicInteger objId = new AtomicInteger(1);
    private static CountDownLatch latch = new CountDownLatch(1);
    private static PooledObj[] objSlot = new PooledObj[4];

    private static  CountDownLatch async_thread_latch = new CountDownLatch(2);
    private static  AtomicInteger async_thread_id = new AtomicInteger(99);

    private static Recycler<PooledObj> pool = new Recycler<PooledObj>() {
        @Override
        protected PooledObj newObject(Handle<PooledObj> handle) {
            PooledObj obj = new PooledObj("obj-" + objId.getAndIncrement(), handle);

            return obj;
        }
    };


    static class PooledObj {

        private Recycler.Handle<PooledObj> handle = null;
        private String name = null;

        public PooledObj(String name, Recycler.Handle<PooledObj> handle) {
            this.name = name;
            this.handle = handle;
        }

        public Recycler.Handle<PooledObj> getHandle() {
            return handle;
        }

        public void setHandle(Recycler.Handle<PooledObj> handle) {
            this.handle = handle;
        }

        public String getName() {
            return name;
        }

        public void setName(String name) {
            this.name = name;
        }

        @Override
        public String toString() {
            return name;
        }

        public void recycle() {
            handle.recycle(this);
        }
    }

    static class GetRunnable implements Runnable {
        private CountDownLatch latch;
        private boolean nosync;
        private int index;


        public GetRunnable(CountDownLatch latch, boolean nosync, int index) {
            this.latch = latch;
            this.nosync = nosync;
            this.index = index;
        }

        @Override
        public void run() {
            PooledObj obj = pool.get();
            objSlot[index] = obj;

            System.out.println(Thread.currentThread().getName() + " 获取对象: " + obj);

            if (!nosync) {
                try {
                    System.out.println(Thread.currentThread().getName() + " 僵住等待停止");
                    latch.await();

                    PooledObj anotherObj = pool.get();
                    System.out.println(Thread.currentThread().getName() + " 线程被唤醒, 并且得到对象" + anotherObj);

                    String name = Thread.currentThread().getName();

                    Thread asyncRecycleThread = new Thread(() -> {
                        anotherObj.recycle();
                        async_thread_latch.countDown();
                    }, "async-thread-" + async_thread_id.getAndIncrement() + "-versus-" + name );
                    asyncRecycleThread.setDaemon(false);
                    asyncRecycleThread.start();


                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }
    }


    @Test
    public void testRecycler() {

        //主线程从对象池中取对象obj-1,赋值给objSlot[0]
        PooledObj obj0 = pool.get();
        objSlot[0] = obj0;

        System.out.println("主线程从对象池获取对象" + obj0);

        //线程thread-1从对象池中取对象obj-2,赋值给objSlot[1]
        Thread g1 = new Thread(new GetRunnable(latch, false, 1), "thread-1");
        g1.start();

        //线程thread-2从对象池中取对象obj-3,赋值给objSlot[2]
        Thread g2 = new Thread(new GetRunnable(latch, true, 2), "thread-2");
        g2.start();

        //线程thread-3从对象池中取对象obj-4,赋值给objSlot[3]
        Thread g3 = new Thread(new GetRunnable(latch, false, 3), "thread-3");
        g3.start();


        try {
            Thread.sleep(3000L);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

        //主线程回收对象obj-1到主线程的对象池,保存到Stack对象中
        objSlot[0].recycle();

        //主线程回收对象obj-2到thread-1的对象池,保存到WeakOrderQueue节点中
        objSlot[1].recycle();

        //主线程回收对象obj-3到thread-2的对象池,保存到WeakOrderQueue节点中
        objSlot[2].recycle();

        //主线程回收对象obj-4到thread-3的对象池,保存到WeakOrderQueue节点中
        objSlot[3].recycle();

        //主线程取出obj-1
        PooledObj obj = pool.get();
        System.out.println("回收后取到对象: " + obj);

        //主线程取出对象,但由于obj-1已经被取出,所以新建对象obj-5返回
        PooledObj obj2 = pool.get();
        System.out.println("回收后取到对象: " + obj2);

        //主线程取出对象,但由于obj-1已经被取出,所以新建对象obj-6返回
        PooledObj obj3 = pool.get();
        System.out.println("回收后取到对象: " + obj3);

        //主线程取出对象,但由于obj-1已经被取出,所以新建对象obj-7返回
        PooledObj obj4 = pool.get();
        System.out.println("回收后取到对象: " + obj4);

        //主线程回收obj-1到Stack对象中
        obj.recycle();
        //主线程回收obj-5到Stack对象中
        obj2.recycle();
        //主线程回收obj-6到Stack对象中
        obj3.recycle();
        //主线程回收obj-7到Stack对象中
        obj4.recycle();

        latch.countDown();

        try {
            async_thread_latch.await();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }


}


运行上述测试代码,控制台打印如下日志,可见示例代码已经模拟了对象被自身线程回收和被其他线程回收两种情况
main get STACK: main-stack
主线程从对象池获取对象obj-1
thread-1 get STACK: thread-1-stack
thread-1 获取对象: obj-2
thread-1 僵住等待停止
thread-2 get STACK: thread-2-stack
thread-2 获取对象: obj-3
thread-3 get STACK: thread-3-stack
thread-3 获取对象: obj-4
thread-3 僵住等待停止
回收后取到对象: obj-1
main get STACK: main-stack
回收后取到对象: obj-5
main get STACK: main-stack
回收后取到对象: obj-6
main get STACK: main-stack
回收后取到对象: obj-7
thread-1 线程被唤醒, 并且得到对象obj-2
thread-3 线程被唤醒, 并且得到对象obj-4
异步回收线程async-thread-99-versus-thread-1 开始回收对象
异步回收线程async-thread-100-versus-thread-3 开始回收对象
Recycler源码如下,下述的源码为了配合上面的用法示例,本人已经在必要的地方添加了日志。注: Recycler对象池相关的源码已经抽取为一个框架

/*
 * Copyright 2013 The Netty Project
 *
 * The Netty Project licenses this file to you under the Apache License,
 * version 2.0 (the "License"); you may not use this file except in compliance
 * with the License. You may obtain a copy of the License at:
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 */



import io.netty.util.NettyRuntime;
import io.netty.util.concurrent.FastThreadLocal;
import io.netty.util.internal.SystemPropertyUtil;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;

import java.lang.ref.WeakReference;
import java.util.Arrays;
import java.util.Map;
import java.util.WeakHashMap;
import java.util.concurrent.atomic.AtomicInteger;

import static   io.netty.util.internal.MathUtil.safeFindNextPositivePowerOfTwo;
import static java.lang.Math.max;
import static java.lang.Math.min;

/**
 * Light-weight object pool based on a thread-local stack.
 *
 * @param <T> the type of the pooled object
 */
public abstract class Recycler<T> {

    private static final InternalLogger logger = InternalLoggerFactory.getInstance(     Recycler.class);

    @SuppressWarnings("rawtypes")
    private static final      Handle NOOP_HANDLE = new      Handle() {
        @Override
        public void recycle(Object object) {
            // NOOP
        }
    };
    private static final AtomicInteger ID_GENERATOR = new AtomicInteger(Integer.MIN_VALUE);
    private static final int OWN_THREAD_ID = ID_GENERATOR.getAndIncrement();
    private static final int DEFAULT_INITIAL_MAX_CAPACITY_PER_THREAD = 4 * 1024; // Use 4k instances as default.
    private static final int DEFAULT_MAX_CAPACITY_PER_THREAD;
    private static final int INITIAL_CAPACITY;
    private static final int MAX_SHARED_CAPACITY_FACTOR;
    private static final int MAX_DELAYED_QUEUES_PER_THREAD;
    private static final int LINK_CAPACITY;
    private static final int RATIO;

    static {
        // In the future, we might have different maxCapacity for different object types.
        // e.g. io.netty.   maxCapacity.writeTask
        //      io.netty.   maxCapacity.outboundBuffer
        int maxCapacityPerThread = SystemPropertyUtil.getInt("io.netty.   maxCapacityPerThread",
                SystemPropertyUtil.getInt("io.netty.   maxCapacity", DEFAULT_INITIAL_MAX_CAPACITY_PER_THREAD));
        if (maxCapacityPerThread < 0) {
            maxCapacityPerThread = DEFAULT_INITIAL_MAX_CAPACITY_PER_THREAD;
        }

        DEFAULT_MAX_CAPACITY_PER_THREAD = maxCapacityPerThread;

        MAX_SHARED_CAPACITY_FACTOR = max(2,
                SystemPropertyUtil.getInt("io.netty.   maxSharedCapacityFactor",
                        2));

        MAX_DELAYED_QUEUES_PER_THREAD = max(0,
                SystemPropertyUtil.getInt("io.netty.   maxDelayedQueuesPerThread",
                        // We use the same value as default EventLoop number
                        NettyRuntime.availableProcessors() * 2));

        LINK_CAPACITY = safeFindNextPositivePowerOfTwo(
                max(SystemPropertyUtil.getInt("io.netty.   linkCapacity", 16), 16));

        // By default we allow one push to a Recycler for each 8th try on handles that were never recycled before.
        // This should help to slowly increase the capacity of the recycler while not be too sensitive to allocation
        // bursts.
        RATIO = safeFindNextPositivePowerOfTwo(SystemPropertyUtil.getInt("io.netty.   ratio", 8));

        if (logger.isDebugEnabled()) {
            if (DEFAULT_MAX_CAPACITY_PER_THREAD == 0) {
                logger.debug("-Dio.netty.   maxCapacityPerThread: disabled");
                logger.debug("-Dio.netty.   maxSharedCapacityFactor: disabled");
                logger.debug("-Dio.netty.   linkCapacity: disabled");
                logger.debug("-Dio.netty.   ratio: disabled");
            } else {
                logger.debug("-Dio.netty.   maxCapacityPerThread: {}", DEFAULT_MAX_CAPACITY_PER_THREAD);
                logger.debug("-Dio.netty.   maxSharedCapacityFactor: {}", MAX_SHARED_CAPACITY_FACTOR);
                logger.debug("-Dio.netty.   linkCapacity: {}", LINK_CAPACITY);
                logger.debug("-Dio.netty.   ratio: {}", RATIO);
            }
        }

        INITIAL_CAPACITY = min(DEFAULT_MAX_CAPACITY_PER_THREAD, 256);
    }

    private final int maxCapacityPerThread;
    private final int maxSharedCapacityFactor;
    private final int ratioMask;
    private final int maxDelayedQueuesPerThread;

    private final FastThreadLocal<  Stack<T>> threadLocal = new FastThreadLocal<     Stack<T>>() {
        @Override
        protected      Stack<T> initialValue() {
            return new      Stack<T>(Recycler.this, Thread.currentThread(), maxCapacityPerThread, maxSharedCapacityFactor,
                    ratioMask, maxDelayedQueuesPerThread);
        }

        @Override
        protected void onRemoval(     Stack<T> value) {
            // Let us remove the WeakOrderQueue from the WeakHashMap directly if its safe to remove some overhead
            if (value.threadRef.get() == Thread.currentThread()) {
                if (DELAYED_RECYCLED.isSet()) {
                    DELAYED_RECYCLED.get().remove(value);
                }
            }
        }
    };

    protected Recycler() {
        this(DEFAULT_MAX_CAPACITY_PER_THREAD);
    }

    protected Recycler(int maxCapacityPerThread) {
        this(maxCapacityPerThread, MAX_SHARED_CAPACITY_FACTOR);
    }

    protected Recycler(int maxCapacityPerThread, int maxSharedCapacityFactor) {
        this(maxCapacityPerThread, maxSharedCapacityFactor, RATIO, MAX_DELAYED_QUEUES_PER_THREAD);
    }

    protected Recycler(int maxCapacityPerThread, int maxSharedCapacityFactor,
                       int ratio, int maxDelayedQueuesPerThread) {
        ratioMask = safeFindNextPositivePowerOfTwo(ratio) - 1;
        if (maxCapacityPerThread <= 0) {
            this.maxCapacityPerThread = 0;
            this.maxSharedCapacityFactor = 1;
            this.maxDelayedQueuesPerThread = 0;
        } else {
            this.maxCapacityPerThread = maxCapacityPerThread;
            this.maxSharedCapacityFactor = max(1, maxSharedCapacityFactor);
            this.maxDelayedQueuesPerThread = max(0, maxDelayedQueuesPerThread);
        }
    }

    @SuppressWarnings("unchecked")
    public final T get() {
        if (maxCapacityPerThread == 0) {
            return newObject((     Handle<T>) NOOP_HANDLE);
        }
             Stack<T> stack = threadLocal.get();
             DefaultHandle<T> handle = stack.pop();
        if (handle == null) {
            handle = stack.newHandle();
            handle.value = newObject(handle);
        }
        return (T) handle.value;
    }

    /**
     * @deprecated use {@link      Handle#recycle(Object)}.
     */
    @Deprecated
    public final boolean recycle(T o,      Handle<T> handle) {
        if (handle == NOOP_HANDLE) {
            return false;
        }

             DefaultHandle<T> h = (     DefaultHandle<T>) handle;
        if (h.stack.parent != this) {
            return false;
        }

        h.recycle(o);
        return true;
    }

    final int threadLocalCapacity() {
        return threadLocal.get().elements.length;
    }

    final int threadLocalSize() {
        return threadLocal.get().size;
    }

    protected abstract T newObject(     Handle<T> handle);

    public interface Handle<T> {
        void recycle(T object);
    }

    static final class DefaultHandle<T> implements      Handle<T> {
        private int lastRecycledId;
        private int recycleId;

        boolean hasBeenRecycled;

        private      Stack<?> stack;
        private Object value;

        DefaultHandle(     Stack<?> stack) {
            System.out.println(Thread.currentThread().getName() + " get STACK: " + stack);
            this.stack = stack;
        }

        @Override
        public void recycle(Object object) {
            if (object != value) {
                throw new IllegalArgumentException("object does not belong to handle");
            }

                 Stack<?> stack = this.stack;
            if (lastRecycledId != recycleId || stack == null) {
                throw new IllegalStateException("recycled already");
            }

            stack.push(this);
        }

        @Override
        public String toString() {
            return "handle-" + value;
        }
    }

    private static final FastThreadLocal<Map<     Stack<?>,      WeakOrderQueue>> DELAYED_RECYCLED =
            new FastThreadLocal<Map<     Stack<?>,      WeakOrderQueue>>() {
                @Override
                protected Map<     Stack<?>,      WeakOrderQueue> initialValue() {
                    return new WeakHashMap<     Stack<?>,      WeakOrderQueue>();
                }
            };

    // a queue that makes only moderate guarantees about visibility: items are seen in the correct order,
    // but we aren't absolutely guaranteed to ever see anything at all, thereby keeping the queue cheap to maintain
    private static final class WeakOrderQueue {

        static final WeakOrderQueue DUMMY = new WeakOrderQueue();

        // Let Link extend AtomicInteger for intrinsics. The Link itself will be used as writerIndex.
        @SuppressWarnings("serial")
        static final class Link extends AtomicInteger {
            private final DefaultHandle<?>[] elements = new DefaultHandle[LINK_CAPACITY];

            private int readIndex;
            Link next;
        }

        // This act as a place holder for the head Link but also will reclaim space once finalized.
        // Its important this does not hold any reference to either Stack or WeakOrderQueue.
        static final class Head {
            private final AtomicInteger availableSharedCapacity;

            Link link;

            Head(AtomicInteger availableSharedCapacity) {
                this.availableSharedCapacity = availableSharedCapacity;
            }

            /// TODO: In the future when we move to Java9+ we should use java.lang.ref.Cleaner.
            @Override
            protected void finalize() throws Throwable {
                try {
                    super.finalize();
                } finally {
                    Link head = link;
                    link = null;
                    while (head != null) {
                        reclaimSpace(LINK_CAPACITY);
                        Link next = head.next;
                        // Unlink to help GC and guard against GC nepotism.
                        head.next = null;
                        head = next;
                    }
                }
            }

            void reclaimSpace(int space) {
                assert space >= 0;
                availableSharedCapacity.addAndGet(space);
            }

            boolean reserveSpace(int space) {
                return reserveSpace(availableSharedCapacity, space);
            }

            static boolean reserveSpace(AtomicInteger availableSharedCapacity, int space) {
                assert space >= 0;
                for (;;) {
                    int available = availableSharedCapacity.get();
                    if (available < space) {
                        return false;
                    }
                    if (availableSharedCapacity.compareAndSet(available, available - space)) {
                        return true;
                    }
                }
            }
        }

        // chain of data items
        private final Head head;
        private Link tail;
        // pointer to another queue of delayed items for the same stack
        private WeakOrderQueue next;
        private final WeakReference<Thread> owner;
        private final int id = ID_GENERATOR.getAndIncrement();

        private WeakOrderQueue() {
            owner = null;
            head = new Head(null);
        }

        private WeakOrderQueue(Stack<?> stack, Thread thread) {
            tail = new Link();

            // Its important that we not store the Stack itself in the WeakOrderQueue as the Stack also is used in
            // the WeakHashMap as key. So just store the enclosed AtomicInteger which should allow to have the
            // Stack itself GCed.
            head = new Head(stack.availableSharedCapacity);
            head.link = tail;
            owner = new WeakReference<Thread>(thread);
        }

        static WeakOrderQueue newQueue(Stack<?> stack, Thread thread) {
            final WeakOrderQueue queue = new WeakOrderQueue(stack, thread);
            // Done outside of the constructor to ensure WeakOrderQueue.this does not escape the constructor and so
            // may be accessed while its still constructed.
            stack.setHead(queue);

            if (thread.getName().startsWith("async-thread-")) {
                System.out.println("异步回收线程"+ thread.getName() + " 开始回收对象");
            }

            return queue;
        }

        private void setNext(WeakOrderQueue next) {
            assert next != this;
            this.next = next;
        }

        /**
         * Allocate a new {@link WeakOrderQueue} or return {@code null} if not possible.
         */
        static WeakOrderQueue allocate(Stack<?> stack, Thread thread) {


            // We allocated a Link so reserve the space
            return Head.reserveSpace(stack.availableSharedCapacity, LINK_CAPACITY)
                    ? newQueue(stack, thread) : null;
        }

        void add(DefaultHandle<?> handle) {
            handle.lastRecycledId = id;

            Link tail = this.tail;
            int writeIndex;
            if ((writeIndex = tail.get()) == LINK_CAPACITY) {
                if (!head.reserveSpace(LINK_CAPACITY)) {
                    // Drop it.
                    return;
                }
                // We allocate a Link so reserve the space
                this.tail = tail = tail.next = new Link();

                writeIndex = tail.get();
            }
            tail.elements[writeIndex] = handle;
            handle.stack = null;
            // we lazy set to ensure that setting stack to null appears before we unnull it in the owning thread;
            // this also means we guarantee visibility of an element in the queue if we see the index updated
            tail.lazySet(writeIndex + 1);
        }

        boolean hasFinalData() {
            return tail.readIndex != tail.get();
        }

        // transfer as many items as we can from this queue to the stack, returning true if any were transferred
        @SuppressWarnings("rawtypes")
        boolean transfer(Stack<?> dst) {
            Link head = this.head.link;
            if (head == null) {
                return false;
            }

            if (head.readIndex == LINK_CAPACITY) {
                if (head.next == null) {
                    return false;
                }
                this.head.link = head = head.next;
            }

            final int srcStart = head.readIndex;
            int srcEnd = head.get();
            final int srcSize = srcEnd - srcStart;
            if (srcSize == 0) {
                return false;
            }

            final int dstSize = dst.size;
            final int expectedCapacity = dstSize + srcSize;

            if (expectedCapacity > dst.elements.length) {
                final int actualCapacity = dst.increaseCapacity(expectedCapacity);
                srcEnd = min(srcStart + actualCapacity - dstSize, srcEnd);
            }

            if (srcStart != srcEnd) {
                final DefaultHandle[] srcElems = head.elements;
                final DefaultHandle[] dstElems = dst.elements;
                int newDstSize = dstSize;
                for (int i = srcStart; i < srcEnd; i++) {
                    DefaultHandle element = srcElems[i];
                    if (element.recycleId == 0) {
                        element.recycleId = element.lastRecycledId;
                    } else if (element.recycleId != element.lastRecycledId) {
                        throw new IllegalStateException("recycled already");
                    }
                    srcElems[i] = null;

                    if (dst.dropHandle(element)) {
                        // Drop the object.
                        continue;
                    }
                    element.stack = dst;
                    dstElems[newDstSize ++] = element;
                }

                if (srcEnd == LINK_CAPACITY && head.next != null) {
                    // Add capacity back as the Link is GCed.
                    this.head.reclaimSpace(LINK_CAPACITY);
                    this.head.link = head.next;
                }

                head.readIndex = srcEnd;
                if (dst.size == newDstSize) {
                    return false;
                }
                dst.size = newDstSize;
                return true;
            } else {
                // The destination stack is full already.
                return false;
            }
        }
    }

    static final class Stack<T> {

        // we keep a queue of per-thread queues, which is appended to once only, each time a new thread other
        // than the stack owner recycles: when we run out of items in our stack we iterate this collection
        // to scavenge those that can be reused. this permits us to incur minimal thread synchronisation whilst
        // still recycling all items.
        final Recycler<T> parent;

        // We store the Thread in a WeakReference as otherwise we may be the only ones that still hold a strong
        // Reference to the Thread itself after it died because DefaultHandle will hold a reference to the Stack.
        //
        // The biggest issue is if we do not use a WeakReference the Thread may not be able to be collected at all if
        // the user will store a reference to the DefaultHandle somewhere and never clear this reference (or not clear
        // it in a timely manner).
        final WeakReference<Thread> threadRef;
        final AtomicInteger availableSharedCapacity;
        final int maxDelayedQueues;

        private final int maxCapacity;
        private final int ratioMask;
        private DefaultHandle<?>[] elements;
        private int size;
        private int handleRecycleCount = -1; // Start with -1 so the first one will be recycled.
        private WeakOrderQueue cursor, prev;
        private volatile WeakOrderQueue head;

        private String stackThreadName;

        Stack(Recycler<T> parent, Thread thread, int maxCapacity, int maxSharedCapacityFactor,
              int ratioMask, int maxDelayedQueues) {
            this.parent = parent;
            threadRef = new WeakReference<Thread>(thread);
            this.maxCapacity = maxCapacity;
            availableSharedCapacity = new AtomicInteger(max(maxCapacity / maxSharedCapacityFactor, LINK_CAPACITY));
            elements = new DefaultHandle[min(INITIAL_CAPACITY, maxCapacity)];
            this.ratioMask = ratioMask;
            this.maxDelayedQueues = maxDelayedQueues;
            this.stackThreadName = thread.getName();
        }

        // Marked as synchronized to ensure this is serialized.
        synchronized void setHead(WeakOrderQueue queue) {
            queue.setNext(head);
            head = queue;
        }

        int increaseCapacity(int expectedCapacity) {
            int newCapacity = elements.length;
            int maxCapacity = this.maxCapacity;
            do {
                newCapacity <<= 1;
            } while (newCapacity < expectedCapacity && newCapacity < maxCapacity);

            newCapacity = min(newCapacity, maxCapacity);
            if (newCapacity != elements.length) {
                elements = Arrays.copyOf(elements, newCapacity);
            }

            return newCapacity;
        }

        @SuppressWarnings({ "unchecked", "rawtypes" })
        DefaultHandle<T> pop() {
            int size = this.size;
            if (size == 0) {
                if (!scavenge()) {
                    return null;
                }
                size = this.size;
            }
            size --;
            DefaultHandle ret = elements[size];
            elements[size] = null;
            if (ret.lastRecycledId != ret.recycleId) {
                throw new IllegalStateException("recycled multiple times");
            }
            ret.recycleId = 0;
            ret.lastRecycledId = 0;
            this.size = size;
            return ret;
        }

        boolean scavenge() {
            // continue an existing scavenge, if any
            if (scavengeSome()) {
                return true;
            }

            // reset our scavenge cursor
            prev = null;
            cursor = head;
            return false;
        }

        boolean scavengeSome() {
            WeakOrderQueue prev;
            WeakOrderQueue cursor = this.cursor;
            if (cursor == null) {
                prev = null;
                cursor = head;
                if (cursor == null) {
                    return false;
                }
            } else {
                prev = this.prev;
            }

            boolean success = false;
            do {
                if (cursor.transfer(this)) {
                    success = true;
                    break;
                }
                WeakOrderQueue next = cursor.next;
                if (cursor.owner.get() == null) {
                    // If the thread associated with the queue is gone, unlink it, after
                    // performing a volatile read to confirm there is no data left to collect.
                    // We never unlink the first queue, as we don't want to synchronize on updating the head.
                    if (cursor.hasFinalData()) {
                        for (;;) {
                            if (cursor.transfer(this)) {
                                success = true;
                            } else {
                                break;
                            }
                        }
                    }

                    if (prev != null) {
                        prev.setNext(next);
                    }
                } else {
                    prev = cursor;
                }

                cursor = next;

            } while (cursor != null && !success);

            this.prev = prev;
            this.cursor = cursor;
            return success;
        }

        void push(DefaultHandle<?> item) {
            Thread currentThread = Thread.currentThread();
            if (threadRef.get() == currentThread) {
                // The current Thread is the thread that belongs to the Stack, we can try to push the object now.
                pushNow(item);
            } else {
                // The current Thread is not the one that belongs to the Stack
                // (or the Thread that belonged to the Stack was collected already), we need to signal that the push
                // happens later.
                pushLater(item, currentThread);
            }
        }

        private void pushNow(DefaultHandle<?> item) {
            if ((item.recycleId | item.lastRecycledId) != 0) {
                throw new IllegalStateException("recycled already");
            }
            item.recycleId = item.lastRecycledId = OWN_THREAD_ID;

            int size = this.size;
            if (size >= maxCapacity || dropHandle(item)) {
                // Hit the maximum capacity or should drop - drop the possibly youngest object.
                return;
            }
            if (size == elements.length) {
                elements = Arrays.copyOf(elements, min(size << 1, maxCapacity));
            }

            elements[size] = item;
            this.size = size + 1;
        }

        private void pushLater(DefaultHandle<?> item, Thread thread) {
            // we don't want to have a ref to the queue as the value in our weak map
            // so we null it out; to ensure there are no races with restoring it later
            // we impose a memory ordering here (no-op on x86)
            Map<Stack<?>, WeakOrderQueue> delayedRecycled = DELAYED_RECYCLED.get();
            WeakOrderQueue queue = delayedRecycled.get(this);
            if (queue == null) {
                if (delayedRecycled.size() >= maxDelayedQueues) {
                    // Add a dummy queue so we know we should drop the object
                    delayedRecycled.put(this, WeakOrderQueue.DUMMY);
                    return;
                }
                // Check if we already reached the maximum number of delayed queues and if we can allocate at all.
                if ((queue = WeakOrderQueue.allocate(this, thread)) == null) {
                    // drop object
                    return;
                }
                delayedRecycled.put(this, queue);
            } else if (queue == WeakOrderQueue.DUMMY) {
                // drop object
                return;
            }

            queue.add(item);
        }

        boolean dropHandle(DefaultHandle<?> handle) {
            if (!handle.hasBeenRecycled) {
                if ((++handleRecycleCount & ratioMask) != 0) {
                    // Drop the object.
                    return true;
                }
                handle.hasBeenRecycled = true;
            }
            return false;
        }

        DefaultHandle<T> newHandle() {
            return new DefaultHandle<T>(this);
        }

        @Override
        public String toString() {
            return stackThreadName + "-stack";
        }
    }


}


Netty-socketio是一个用于构建实时通信应用程序的开源框架。它基于Netty框架,通过使用WebSocket协议来实现异步、高性能的网络通信。Netty-socketio具有以下特点: 1. 强大的异步处理能力:Netty-socketio采用事件驱动的方式处理客户端请求,能够高效地处理大量并发连接。 2. 完善的消息传递机制:Netty-socketio提供了灵活的消息传递方式,支持广播、点对点、房间等不同的消息发送方式,满足不同场景下的通信需求。 3. 多协议支持:Netty-socketio不仅支持WebSocket协议,还支持其他常用的协议,如TCP、HTTP等,便于与现有的系统集成。 4. 可扩展性强:Netty-socketio提供了丰富的拓展接口,用户可以根据自己的需求定制和扩展框架的功能。 5. 易于使用:Netty-socketio提供了简洁的API和丰富的文档,可以快速上手使用,并提供了相应的示例代码,方便开发者学习和理解。 对于客服应用来说,Netty-socketio作为一个实时通信框架,可以用于构建在线客服聊天系统。通过使用Netty-socketio,我们可以实现客户与客服人员之间的实时消息传递,支持文字、图片、文件等多种类型的消息。客户可以通过网页或移动端应用与客服人员进行沟通,实时解决问题,提升用户体验。 Netty-socketio提供了强大的异步处理能力和全双工通信机制,能够处理大量并发连接,并保持连接的稳定性和可靠性。同时,它的多协议支持和可扩展性强的特点,使得我们可以根据自己的业务需求进行定制和拓展,满足不同客服场景下的通信需求。 总之,Netty-socketio作为一个强大的实时通信框架,为客服应用提供了一种高效、稳定的解决方案,帮助企业构建更好的客服系统,并提升客户的满意度。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值