JDK1.8ConcurrentHashMap源码分析

    

    // table的最大容量
    private static final int MAXIMUM_CAPACITY = 1 << 30;

    // 默认table的大小,表的大小必须为2的幂次方
    private static final int DEFAULT_CAPACITY = 16;

    // 最大数组大小
    static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;

    //table默认最大最大并发数
    private static final int DEFAULT_CONCURRENCY_LEVEL = 16;

    // 默认装载(负载)因子
    private static final float LOAD_FACTOR = 0.75f;

    // 转化为红黑树的阈值
    static final int TREEIFY_THRESHOLD = 8;

    //由红黑树转化为链表的阈值
    static final int UNTREEIFY_THRESHOLD = 6;

    // 转化为红黑树的table的最小容量
    static final int MIN_TREEIFY_CAPACITY = 64;

    // 每次进行转移的最小值
    private static final int MIN_TRANSFER_STRIDE = 16;

    // 生成sizeCtl所使用的bit位数
    private static int RESIZE_STAMP_BITS = 16;

    // 进行扩容所允许的最大线程数
    private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;

    // 记录sizeCtl中的大小所需要进行的偏移位数
    private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;

    // 一系列的标识
    static final int MOVED     = -1; // hash for forwarding nodes
    static final int TREEBIN   = -2; // hash for roots of trees
    static final int RESERVED  = -3; // hash for transient reservations
    static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash

    // 获取可用的CPU个数
    static final int NCPU = Runtime.getRuntime().availableProcessors();

    // 进行序列化的属性
    private static final ObjectStreamField[] serialPersistentFields = {
        new ObjectStreamField("segments", Segment[].class),
        new ObjectStreamField("segmentMask", Integer.TYPE),
        new ObjectStreamField("segmentShift", Integer.TYPE)
    };
    
    //不进行序列化的node数组
    //所有数据都存在table中,table的容量会根据实际情况进行扩容,
    //table[i]存放的数据类型有以下3种: 
        //- TreeBin 用于包装红黑树结构的结点类型 
        //- ForwardingNode 扩容时存放的结点类型,并发扩容的实现关键之一 
        //- Node 普通结点类型,表示链表头结点
    transient volatile Node<K,V>[] table;

    //不进行序列化的nextnode数组  nextTable用于扩容时存放数据的变量,扩容完成后会置为null。
    private transient volatile Node<K,V>[] nextTable;

    //基本计数
    private transient volatile long baseCount;

    //sizeCtl用于数组初始化与扩容控制
    if table未完成初始化:
        =0  //未指定初始容量时的默认值
        >0  //指定初始容量(非传入值,是2的幂次修正值)大小的两倍
        =-1 //表明table正在初始化
    else if nextTable为空:
        if 扩容时发生错误(如内存不足、table.length * 2 > Integer.MAX_VALUE等):
            =Integer.MAX_VALUE    //不必再扩容了!
        else:
            =table.length * 0.75  //扩容阈值调为table容量大小的0.75倍
    else:
        =-(1+N)
    private transient volatile int sizeCtl;

    // 扩容下另一个表的索引
    private transient volatile int transferIndex;

    // 旋转锁
    private transient volatile int cellsBusy;

    // counterCell表,当不为空时,为2的幂次方
    private transient volatile CounterCell[] counterCells;

    //基础视图
    private transient KeySetView<K,V> keySet;
    private transient ValuesView<K,V> values;
    private transient EntrySetView<K,V> entrySet;
    
static class Node<K,V> implements Map.Entry<K,V> {
    final int hash;//final类型的hash
    final K key;//key值
    volatile V val;//value值
    volatile Node<K,V> next;//next下个节点
    构造函数
    Node(int hash, K key, V val, Node<K,V> next) {
        this.hash = hash;
        this.key = key;
        this.val = val;
        this.next = next;
    }

    public final K getKey()       { return key; }
    public final V getValue()     { return val; }
    public final int hashCode()   { return key.hashCode() ^ val.hashCode(); }
    public final String toString(){ return key + "=" + val; }
    public final V setValue(V value) {
        throw new UnsupportedOperationException();
    }

    public final boolean equals(Object o) {
        Object k, v, u; Map.Entry<?,?> e;
        return ((o instanceof Map.Entry) &&
             (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
             (v = e.getValue()) != null &&
             (k == key || k.equals(key)) &&
             (v == (u = val) || v.equals(u)));
    }

        /**
         * Virtualized support for map.get(); overridden in subclasses.
         */
    Node<K,V> find(int h, Object k) {
        Node<K,V> e = this;
        if (k != null) {
            do {
                K ek;
                if (e.hash == h &&
                    ((ek = e.key) == k || (ek != null && k.equals(ek))))
                   return e;
            } while ((e = e.next) != null);
        }
        return null;
    }
}

    

    // Unsafe mechanics
    private static final sun.misc.Unsafe U;
    private static final long SIZECTL;
    private static final long TRANSFERINDEX;
    private static final long BASECOUNT;
    private static final long CELLSBUSY;
    private static final long CELLVALUE;
    private static final long ABASE;
    private static final int ASHIFT;

    static {
        try {
            U = sun.misc.Unsafe.getUnsafe();
            Class<?> k = ConcurrentHashMap.class;
            //获取ConcurrentHashMap这个对象字段sizeCtl在内存中的偏移量
            SIZECTL = U.objectFieldOffset
                (k.getDeclaredField("sizeCtl"));
            TRANSFERINDEX = U.objectFieldOffset
                (k.getDeclaredField("transferIndex"));
            BASECOUNT = U.objectFieldOffset
                (k.getDeclaredField("baseCount"));
            CELLSBUSY = U.objectFieldOffset
                (k.getDeclaredField("cellsBusy"));
            Class<?> ck = CounterCell.class;
            CELLVALUE = U.objectFieldOffset
                (ck.getDeclaredField("value"));
            Class<?> ak = Node[].class;
            //可以获取数组第一个元素的偏移地址
            ABASE = U.arrayBaseOffset(ak);
            //arrayIndexScale可以获取数组的转换因子,也就是数组中元素的增量地址
            //将arrayBaseOffset与arrayIndexScale配合使用,可以定位数组中每个元素在内存中的位置。
            int scale = U.arrayIndexScale(ak);
            if ((scale & (scale - 1)) != 0)
                throw new Error("data type scale not a power of two");
            ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
        } catch (Exception e) {
            throw new Error(e);
        }
    }
    //默认空构造函数
    public ConcurrentHashMap() {}
    

    //ConcurrentHashMap(int)型构造函数
    public ConcurrentHashMap(int initialCapacity) {
        //判断initialCapacity 是否小于0
        if (initialCapacity < 0)
            throw new IllegalArgumentException();
        // 找到最接近该容量的2的幂次方数
        int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
                   MAXIMUM_CAPACITY :
                   tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
        // 初始化sizeCtl
        this.sizeCtl = cap;
    }


    //ConcurrentHashMap(int,float)型构造函数
    public ConcurrentHashMap(int initialCapacity, float loadFactor) {
        //调用ConcurrentHashMap(int,float,int)型构造函数
        this(initialCapacity, loadFactor, 1);
    }

    //ConcurrentHashMap(int,float,int)型构造函数
    public ConcurrentHashMap(int initialCapacity,
                             float loadFactor, int concurrencyLevel) {
        //判断loadFactor initialCapacity concurrencyLevel 三个参数
        if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
            throw new IllegalArgumentException();
        if (initialCapacity < concurrencyLevel)   // Use at least as many bins
            initialCapacity = concurrencyLevel;   // as estimated threads
        //根据输入的initialCapacity的大小计算table的阈值
        long size = (long)(1.0 + (long)initialCapacity / loadFactor);
        //使用size来确定一个最小的且大于等于initialCapacity大小的2的n次幂
        int cap = (size >= (long)MAXIMUM_CAPACITY) ?
            MAXIMUM_CAPACITY : tableSizeFor((int)size);
        //初始化sizeCtl 
        this.sizeCtl = cap;
    }

    
    //ConcurrentHashMap(Map<? extends K, ? extends V>)型构造函数
    public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
        //使用DEFAULT_CAPACITY=16初始化sizeCtl 
        this.sizeCtl = DEFAULT_CAPACITY;
        putAll(m);
    }
    putAll()函数
 

    public void putAll(Map<? extends K, ? extends V> m) {
        tryPresize(m.size());
        for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
            putVal(e.getKey(), e.getValue(), false);
    }

    private final void tryPresize(int size) {
         //如果大小为MAXIMUM_CAPACITY最大总量的一半,那么直接扩容为MAXIMUM_CAPACITY,否则计算最小幂次方
        int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
            tableSizeFor(size + (size >>> 1) + 1);
        int sc;
        /判断sizeCtl是否为正数或0,代表table还未初始化,或还没有其他线程正在进行扩容
        while ((sc = sizeCtl) >= 0) {
            Node<K,V>[] tab = table; int n;
            //如果table还未进行初始化
            if (tab == null || (n = tab.length) == 0) {
                n = (sc > c) ? sc : c;
                // 设置sizeCtl,告诉其他线程,table现在正处于初始化状态
                //cas操作修改sizeCtl为-1
                if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
                    try {
                        if (table == tab) {
                            @SuppressWarnings("unchecked")
                            Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
                            table = nt;
                            // 计算下次触发扩容的阈值
                            sc = n - (n >>> 2);
                        }
                    } finally {
                        // 将阈值赋给sizeCtl
                        sizeCtl = sc;
                    }
                }
            }
            // 没有超过阈值或者大于容量的上限,中断循环
            else if (c <= sc || n >= MAXIMUM_CAPACITY)
                break;
            // 否则进行扩容,与addCount()后半段的逻辑一致
            else if (tab == table) {
                /**生成表的生成戳,每个n都有不同的生成戳
             * static final int resizeStamp(int n) {
             *   return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
             *    }
             *   Integer.numberOfLeadingZeros(n)在指定 int 值的二进制补码表示形式中最高位(最左边)的 1 位之前,返回零位的数量
             * 例如 n为16 0001 0000 则Integer.numberOfLeadingZeros(n)为27,因为n为2的幂次方,因此不同的n此结果也不同
             * 然后与(1 << (RESIZE_STAMP_BITS - 1)) | ,相当于2^15 | n中0的个数。
             * (因此其左移16位后符号位为1,结果肯定是个负数)
             */
                int rs = resizeStamp(n);
                if (sc < 0) {
                    Node<K,V>[] nt;
                    /**1.第一个判断 sc右移RESIZE_STAMP_SHIFT位,也就是比较高ESIZE_STAMP_BITS位生成戳和rs是否相等
                    * 相等则代表是同一个n,是在同一容量下进行的扩容,
                    *  2.第二个和第三个判断 判断当前帮助扩容线程数是否已达到MAX_RESIZERS最大扩容线程数
                    *  3.第四个和第五个判断 为了确保transfer()方法初始化完毕
                    */
                    if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
                        sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
                        transferIndex <= 0)
                        break;
                    if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
                        transfer(tab, nt);
                }
                /**如果没有线程在进行扩容,那么cas修改sizeCtl值,作为扩容的发起,rs左移RESIZE_STAMP_SHIFT位+2
                 * 上面说了,左移RESIZE_STAMP_SHIFT位,肯定是个负数,代表有一个线程正在进行扩容
                 * 此时sizeCtl高RESIZE_STAMP_BITS位为生成戳,低RESIZE_STAMP_SHIFT位为扩容线程数
                 */
                else if (U.compareAndSwapInt(this, SIZECTL, sc,
                                             (rs << RESIZE_STAMP_SHIFT) + 2))
                    transfer(tab, null);
            }
        }
    }
    initTable()函数


    private final Node<K,V>[] initTable() {
        Node<K,V>[] tab; int sc;
        //如果table没有进行初始化操作,或者table的length等于0时确认其他线程未对table修改
        while ((tab = table) == null || tab.length == 0) {
            //其他线程正在进行初始化或转移操作,让出CPU执行时间片,继续自旋
            if ((sc = sizeCtl) < 0)
                Thread.yield(); // lost initialization race; just spin
            //CAS设置sizectl为-1 表示当前线程正在进行初始化
            else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
                try {
                    if ((tab = table) == null || tab.length == 0) {
                        //获取table的长度
                        int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
                        @SuppressWarnings("unchecked")
                        Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
                        table = tab = nt;
                        //0.75*n 设置扩容阈值
                        sc = n - (n >>> 2);
                    }
                } finally {
                    //初始化sizeCtl=0.75*n
                    sizeCtl = sc;
                }
                break;
            }
        }
        return tab;
    }

 

    tranfer()函数(目前不分析)

    private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
        int n = tab.length, stride;
        // 根据当前机器的CPU数量来决定每个线程负责的bucket数
        // 避免因为扩容线程过多,反而影响到性能
        if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
            stride = MIN_TRANSFER_STRIDE; // subdivide range
        if (nextTab == null) {            // initiating
            try {
                @SuppressWarnings("unchecked")
                Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
                nextTab = nt;
            } catch (Throwable ex) {      // try to cope with OOME
                sizeCtl = Integer.MAX_VALUE;
                return;
            }
            nextTable = nextTab;
            transferIndex = n;
        }
        int nextn = nextTab.length;
        ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
        boolean advance = true;
        boolean finishing = false; // to ensure sweep before committing nextTab
        for (int i = 0, bound = 0;;) {
            Node<K,V> f; int fh;
            while (advance) {
                int nextIndex, nextBound;
                if (--i >= bound || finishing)
                    advance = false;
                else if ((nextIndex = transferIndex) <= 0) {
                    i = -1;
                    advance = false;
                }
                else if (U.compareAndSwapInt
                         (this, TRANSFERINDEX, nextIndex,
                          nextBound = (nextIndex > stride ?
                                       nextIndex - stride : 0))) {
                    bound = nextBound;
                    i = nextIndex - 1;
                    advance = false;
                }
            }
            if (i < 0 || i >= n || i + n >= nextn) {
                int sc;
                if (finishing) {
                    nextTable = null;
                    table = nextTab;
                    sizeCtl = (n << 1) - (n >>> 1);
                    return;
                }
                if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
                    if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
                        return;
                    finishing = advance = true;
                    i = n; // recheck before commit
                }
            }
            else if ((f = tabAt(tab, i)) == null)
                advance = casTabAt(tab, i, null, fwd);
            else if ((fh = f.hash) == MOVED)
                advance = true; // already processed
            else {
                synchronized (f) {
                    if (tabAt(tab, i) == f) {
                        Node<K,V> ln, hn;
                        if (fh >= 0) {
                            int runBit = fh & n;
                            Node<K,V> lastRun = f;
                            for (Node<K,V> p = f.next; p != null; p = p.next) {
                                int b = p.hash & n;
                                if (b != runBit) {
                                    runBit = b;
                                    lastRun = p;
                                }
                            }
                            if (runBit == 0) {
                                ln = lastRun;
                                hn = null;
                            }
                            else {
                                hn = lastRun;
                                ln = null;
                            }
                            for (Node<K,V> p = f; p != lastRun; p = p.next) {
                                int ph = p.hash; K pk = p.key; V pv = p.val;
                                if ((ph & n) == 0)
                                    ln = new Node<K,V>(ph, pk, pv, ln);
                                else
                                    hn = new Node<K,V>(ph, pk, pv, hn);
                            }
                            setTabAt(nextTab, i, ln);
                            setTabAt(nextTab, i + n, hn);
                            setTabAt(tab, i, fwd);
                            advance = true;
                        }
                        else if (f instanceof TreeBin) {
                            TreeBin<K,V> t = (TreeBin<K,V>)f;
                            TreeNode<K,V> lo = null, loTail = null;
                            TreeNode<K,V> hi = null, hiTail = null;
                            int lc = 0, hc = 0;
                            for (Node<K,V> e = t.first; e != null; e = e.next) {
                                int h = e.hash;
                                TreeNode<K,V> p = new TreeNode<K,V>
                                    (h, e.key, e.val, null, null);
                                if ((h & n) == 0) {
                                    if ((p.prev = loTail) == null)
                                        lo = p;
                                    else
                                        loTail.next = p;
                                    loTail = p;
                                    ++lc;
                                }
                                else {
                                    if ((p.prev = hiTail) == null)
                                        hi = p;
                                    else
                                        hiTail.next = p;
                                    hiTail = p;
                                    ++hc;
                                }
                            }
                            ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
                                (hc != 0) ? new TreeBin<K,V>(lo) : t;
                            hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
                                (lc != 0) ? new TreeBin<K,V>(hi) : t;
                            setTabAt(nextTab, i, ln);
                            setTabAt(nextTab, i + n, hn);
                            setTabAt(tab, i, fwd);
                            advance = true;
                        }
                    }
                }
            }
        }
    }


 

    //putVal()函数
    
    final V putVal(K key, V value, boolean onlyIfAbsent) {
        
        //对key value进行判null操作,如果为null,则抛出异常
        if (key == null || value == null) throw new NullPointerException();
        //计算hash值
        int hash = spread(key.hashCode());
        int binCount = 0;
        //因为在table的初始化和casTabAt用到了compareAndSwapInt、compareAndSwapObject
        //因为如果其他线程正在修改tab,那么尝试就会失败,所以这边要加一个for循环,不断的自旋,直至插入成功
        for (Node<K,V>[] tab = table;;) {
            //f:索引节点; n:tab.length; i:新节点索引 (n - 1) & hash; fh:f.hash
            Node<K,V> f; int n, i, fh;
            //如果当前table=null或者length为0则进行初始化操作
            if (tab == null || (n = tab.length) == 0)
                //初始化操作
                tab = initTable();
                /*
                i=(n-1)&hash 等价于i=hash%n(前提是n为2的幂次方).即取出table中位置的节点用f表示。
                有如下两种情况:
                1、如果table[i]==null(即该位置的节点为空,没有发生碰撞),则利用CAS操作直接存储在该位置,如果CAS操作成功则退出死循环。
                2、如果table[i]!=null(即该位置已经有其它节点,发生碰撞)
                */
            else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
                if (casTabAt(tab, i, null,
                             new Node<K,V>(hash, key, value, null)))
                    break;                   // no lock when adding to empty bin
            }
            //检查table[i]的节点的hash是否等于MOVED,如果等于,则检测到正在扩容,则帮助其扩容
            else if ((fh = f.hash) == MOVED)
                //帮助其扩容
                tab = helpTransfer(tab, f);
            else {
                V oldVal = null;
                //这个地方设计非常的巧妙,内置锁synchronized锁住了f,因为f是指定特定的tab[i]的,
                // 所以就锁住了整行链表,这个设计跟分段锁有异曲同工之妙,只是其他读取操作需要用cas来保证
                synchronized (f) {
                    //双重检查i处结点未变化
                    if (tabAt(tab, i) == f) {
                        //如果fh>=0,表明是链表结点类型,hash值是大于0的,即spread()方法计算而来
                        if (fh >= 0) {
                            binCount = 1;//记录链表节点数,用于后面是否转换为红黑树做判断
                            /*
                            下面的代码就是先查找链表中是否出现了此key,如果出现,则更新value,            并跳出循环,否则将节点加入到末尾并跳出循环
                            */
                            for (Node<K,V> e = f;; ++binCount) {
                                K ek;
                                if (e.hash == hash &&
                                    ((ek = e.key) == key ||
                                     (ek != null && key.equals(ek)))) {
                                    oldVal = e.val;
                                    if (!onlyIfAbsent)
                                        e.val = value;
                                    break;
                                }
                                Node<K,V> pred = e;
                                //到这里说明已经是链表尾,把当前值作为新的节点插入到队尾
                                if ((e = e.next) == null) {
                                    pred.next = new Node<K,V>(hash, key,
                                                              value, null);
                                    break;
                                }
                            }
                        }
                        // 如果节点是红黑树
                        else if (f instanceof TreeBin) {
                            Node<K,V> p;
                            //设置bigCount的值
                            binCount = 2;
                            //将hash值,key,value插入红黑树中
                            if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
                                                           value)) != null) {
                                oldVal = p.val;
                                if (!onlyIfAbsent)
                                    p.val = value;
                            }
                        }
                    }
                }
                //插入成功后,如果插入的是链表节点,则要判断下该桶位是否要转化为树
                if (binCount != 0) {
                    //判断bigCount与默认红黑树阈值TREEIFY_THRESHOLD
                    if (binCount >= TREEIFY_THRESHOLD)
                        //将table转化为红黑树。
                        treeifyBin(tab, i);
                    // 旧值不为空
                    if (oldVal != null)
                        return oldVal;
                    break;
                }
            }
        }
        //更新size,检测扩容
        addCount(1L, binCount);
        return null;
    }
putVal(K key, V value, boolean onlyIfAbsent)方法干的工作如下:
1、检查key/value是否为空,如果为空,则抛异常,否则进行2
2、进入for死循环,进行3
3、检查table是否初始化了,如果没有,则调用initTable()进行初始化然后进行 2,否则进行4
4、根据key的hash值计算出其应该在table中储存的位置i,取出table[i]的节点用f表示。
    根据f的不同有如下三种情况:1)如果table[i]==null(即该位置的节点为空,没有发生碰撞),
                                则利用CAS操作直接存储在该位置,如果CAS操作成功则退出死循环。
                                2)如果table[i]!=null(即该位置已经有其它节点,发生碰撞),碰撞处理也有两种情况
                                    2.1)检查table[i]的节点的hash是否等于MOVED,如果等于,则检测到正在扩容,则帮助其扩容
                                    2.2)说明table[i]的节点的hash值不等于MOVED,如果table[i]为链表节点,则将此节点插入链表中即可
                                        如果table[i]为树节点,则将此节点插入树中即可。插入成功后,进行 5
5、如果table[i]的节点是链表节点,则检查table的第i个位置的链表是否需要转化为数,如果需要则调用treeifyBin函数进行转化

 


    addCoun()函数


    private final void addCount(long x, int check) {
        CounterCell[] as; long b, s;
        // 尝试使用CAS更新baseCount失败
        // 转用CounterCells进行更新
        if ((as = counterCells) != null ||
            !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {//counterCells为null,CAS更新baseCount
            CounterCell a; long v; int m;
            boolean uncontended = true;
            if (as == null || (m = as.length - 1) < 0 ||
                (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
                !(uncontended =
                  U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
                // 在CounterCells未初始化
                // 或尝试通过CAS更新当前线程的CounterCell失败时
                // 调用fullAddCount(),该函数负责初始化CounterCells和更新计数
                fullAddCount(x, uncontended);
                return;
            }
            if (check <= 1)
                return;
            //计算元素总数,用于后面的扩容操作
            s = sumCount();
        }
        //check就是binCount,有新元素加入成功才检查是否要扩容。
        if (check >= 0) {
            Node<K,V>[] tab, nt; int n, sc;
            //检查扩容
            while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
                   (n = tab.length) < MAXIMUM_CAPACITY) {
                int rs = resizeStamp(n);
                // sizeCtl为负数,代表正有其他线程进行扩容
                if (sc < 0) {
                    //已经有线程在进行扩容工作
                    //检查是原容量为n的情况下进行扩容,保证sizeCtl与n是一块修改好的,条件2与条件3在当前RESIZE_STAMP_BITS情况下应该不会成功,欢迎指正。条件4与条件5确保tranfer()中的nextTable相关初始化逻辑已走完。
                    if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
                        sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
                        transferIndex <= 0)
                        break;
                    //有新线程参与扩容则sizeCtl统计加1
                    if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
                        transfer(tab, nt);
                }
                //有线程检测到需要扩容时走这里,初始值为(rs << RESIZE_STAMP_SHIFT) + 2)),+2没什么特别,只是为符合-(1+扩容线程数)的定义。
                else if (U.compareAndSwapInt(this, SIZECTL, sc,
                                             (rs << RESIZE_STAMP_SHIFT) + 2))
                    transfer(tab, null);
                // 统计个数,用于循环检测是否还需要扩容
                s = sumCount();
            }
        }
    }


    get()


    public V get(Object key) {
        Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
        //计算hash值
        int h = spread(key.hashCode());
        //判断table是否为空,长度是否为0
        if ((tab = table) != null && (n = tab.length) > 0 &&
            (e = tabAt(tab, (n - 1) & h)) != null) {
            //总是检测头结点
            // 先尝试判断链表头是否为目标,如果是就直接返回
            if ((eh = e.hash) == h) {
                if ((ek = e.key) == key || (ek != null && key.equals(ek)))
                    return e.val;
            }
            //如果eh=-1就说明e节点为ForWordingNode,这说明什么,说明这个节点已经不存在了,被另一个线程正则扩容
        //所以要查找key对应的值的话,直接到新newtable找
            else if (eh < 0)
                return (p = e.find(h, key)) != null ? p.val : null;
            //循环遍历,判断key,hash,是否相等
            while ((e = e.next) != null) {
                if (e.hash == h &&
                    ((ek = e.key) == key || (ek != null && key.equals(ek))))
                    return e.val;
            }
        }
        return null;
    }

 

    public V remove(Object key) {
        return replaceNode(key, null, null);
    }

    final V replaceNode(Object key, V value, Object cv) {
        //计算hash值
        int hash = spread(key.hashCode());
        //因为如果其他线程正在修改tab,那么尝试就会失败,所以这边要加一个for循环,不断的自旋,直至插入成功
        for (Node<K,V>[] tab = table;;) {
            Node<K,V> f; int n, i, fh;
            //判断当前tab是否为空。
            if (tab == null || (n = tab.length) == 0 ||
                (f = tabAt(tab, i = (n - 1) & hash)) == null)
                break;
            //如果有其他线程在对bucket进行扩容,删除时也需要确实扩容完成后才可以操作。
            else if ((fh = f.hash) == MOVED)
                tab = helpTransfer(tab, f);
            else {
                V oldVal = null;
                boolean validated = false;
                //开始锁住当前桶,然后进行比对寻找满足(key,value)的节点
                synchronized (f) {
                    //重新检查,避免由于多线程的原因table[i]已经被修改
                    if (tabAt(tab, i) == f) {
                        //链表节点
                        if (fh >= 0) {
                            validated = true;
                            for (Node<K,V> e = f, pred = null;;) {
                                K ek;
                                //满足条件就是找到key出现的节点位置
                                if (e.hash == hash &&
                                    ((ek = e.key) == key ||
                                     (ek != null && key.equals(ek)))) {
                                    V ev = e.val;
                                    if (cv == null || cv == ev ||
                                        (ev != null && cv.equals(ev))) {
                                        oldVal = ev;
                                        if (value != null)//value不为空,则更新值
                                            e.val = value;
                                        else if (pred != null)
                                            pred.next = e.next;
                                        else
                                            setTabAt(tab, i, e.next);
                                    }
                                    break;
                                }
                                pred = e;
                                if ((e = e.next) == null)
                                    break;
                            }
                        }
                        //如果节点是类型是TreeBin,即红黑树节点
                        else if (f instanceof TreeBin) {
                            validated = true;
                            TreeBin<K,V> t = (TreeBin<K,V>)f;
                            TreeNode<K,V> r, p;
                            if ((r = t.root) != null &&
                                (p = r.findTreeNode(hash, key, null)) != null) {
                                V pv = p.val;
                                if (cv == null || cv == pv ||
                                    (pv != null && cv.equals(pv))) {
                                    oldVal = pv;
                                    if (value != null)
                                        p.val = value;
                                    else if (t.removeTreeNode(p))
                                        setTabAt(tab, i, untreeify(t.first));
                                }
                            }
                        }
                    }
                }
                if (validated) {
                    if (oldVal != null) {
                        if (value == null)
                            addCount(-1L, -1);//如果删除了节点,则要减1
                        return oldVal;
                    }
                    break;
                }
            }
        }
        return null;
    }
参考文档
https://blog.csdn.net/blingfeng/article/details/79855445
https://www.jianshu.com/p/0fb89aefac66
https://www.cnblogs.com/zyrblog/p/9881958.html
http://www.importnew.com/29832.html
https://blog.csdn.net/u010412719/article/details/52145145
https://blog.csdn.net/blingfeng/article/details/79861580
https://www.cnblogs.com/leesf456/p/5453341.html
http://www.cnblogs.com/huaizuo/archive/2016/04/20/5413069.html
https://blog.csdn.net/tp7309/article/details/76532366

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值