ConcurrentHashMap主要的核心设计有:
* 数据结构方面:相对于1.7,采用了单元素segment,采用了链表+红黑树德存储结构
* 并发安全方面:读取时采用CAS乐观锁,读取时采用Synchronized悲观锁。
从两个函数看源码:
添加函数:putVal
/**
* @param key
* @param value
* @param onlyIfAbsent: 如果数据key存在是否替换,false-替换,默认替换
* 字面意思:只有在缺少数据时,更改;absent:缺席
* @return
*/
private V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) {
throw new NullPointerException();
}
int hash = spread(key.hashCode()); //获取hash值
/**要插入的元素所在桶的元素个数;
* 三种情况:
* 如果未插入成功:0
* 如果是树,设定为固定值2
* 如果是链表:>0,用于判断是否扩容
*/
int binCount = 0;
// 自旋锁加入元素
for (Node<K, V>[] tab = table; ; ) {
Node<K, V> f;
int n, i, fh;
/**
* 初始化数组
*/
if (tab == null || (n = tab.length) == 0) {
tab = initTable();
} else if ((f = tabAt(tab, i = (n - 1) & hash)) != null) { tabAt方法通过CAS获取i的值
// 定义一个原来的旧值,用于返回
V oldVal = null;
// 如果数组中所在桶还没有元素,直接插入
// 注意由于n是2的倍数,所以(n-1)&hash求得就是hash对n的余数
if (casTabAt(tab, i, null, new Node<K, V>(hash, key, value, null))) {
// 通过CAS插入,成功,则结果;失败,测继续循环
break;
}
if ((fh = f.hash) == MOVED) {
// 如果要插入的元素所在的桶的第一个元素的hash是MOVED,则当前线程帮忙一起迁移元素
tab = helpTransfer(tab, f);
} else {
synchronized (f) {
if ((tabAt(tab, i) == f)) {
// 如果第一个元素的hash值fh大于0(说明不是在迁移,也不是树)
// 那就是桶中的元素使用的是链表方式存储
if (fh >= 0) {
binCount = 1; // 元素个数初始化为1
for (Node<K, V> e = f; ; ++binCount) {
K ek;
if ((e.hash == hash) && ((ek = e.key) == key || (ek != null && key.equals(ek)))) {
// hash值相同 且 节点key相等
oldVal = e.val;
if (!onlyIfAbsent) {
e.val = value;
}
break;
}
// 否则查找写一个节点
Node<K, V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<>(hash, key, value, null);
break;
}
}
} else if (f instanceof TreeBin) {
// 元素是树
Node<K, V> p;
binCount = 2;
if ((p = ((TreeBin<K, V>) f).putTreeVal(hash, key, value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent) {
p.val = value;
}
}
}
}
}
}
if (binCount != 0) {
if (binCount > TREEIFY_THRESHOLD) {
treeifyBin(tab, i);
}
if (oldVal != null) {
return oldVal;
}
break;
}
}
}
// 成功插入元素,元素个数加1(是否要扩容在这个里面)
addCount(1L, binCount);
return null;
}
过程:首先判断当前数组是否初始化,先初始化数组initTable;之后通过CAS操作获取当前元素对应的segment头结点;通过CAS操作插入数值,如果成功,则推出。否则判断当前数据是否在迁移,如果是,当前线程帮助迁移,否则,辨明有线程在竞争数据,采取悲观锁,对当前节点进行加锁,修改元素。并将元素数组+1
获取元素:get
//会发现源码中没有一处加了锁
//Volatile变量加unsafe类CAS可以实现原子操作
public V get(Object key) {
Node<K, V>[] tab;
Node<K, V> e, p;
int n, eh;
K ek;
// 获取hash
int h = spread(key.hashCode());
// table不为null,获取hash所在位置
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
// 1.是桶的头结点
if ((eh = e.hash) == h) {
// 如果就是桶的头结点,获取成功返回
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
//hash值为负值表示正在扩容,这个时候查的是ForwardingNode的find方法来定位到nextTable来
//eh=-1,说明该节点是一个ForwardingNode,正在迁移,此时调用ForwardingNode的find方法去nextTable里找。
//eh=-2,说明该节点是一个TreeBin,此时调用TreeBin的find方法遍历红黑树,由于红黑树有可能正在旋转变色,所以find里会有读写锁。
//eh>=0,说明该节点下挂的是一个链表,直接遍历该链表即可。
else if (eh < 0) {
// 2.正在扩容
// 3.是树节点
// e分别调用:ForwardingNode和TreeBin的find方法
return (p = e.find(h, key)) != null ? p.val : null;
}
// 4.是链表
// eh>0,直接遍历链表
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
过程:首先通过CAS获取hash值对应的节点,相等则返回;否则,判断是否正在迁移,如果是,通过节点find方法查找元素,此时如果是树,调用树的find方法,否则默认为链表;如果没有迁移,数据存储在链表中,遍历链表。
详细实现细节:
package concurrent;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.Map;
import java.util.concurrent.locks.LockSupport;
/**
* @description: 并发包原理实现
* @author: zoutai
* @create: 2019/4/14
**/
public class ConcurrentHashMapDemo<K, V> {
private static final long serialVersionUID = 7249069246763182397L;
// 默认容量和最大容量
private static final int MAXIMUM_CAPACITY = 1 << 30;
private static final int DEFAULT_CAPACITY = 16;
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
private static final float LOAD_FACTOR = 0.75f;
// 链表和树的转化阈值
static final int TREEIFY_THRESHOLD = 8;
static final int UNTREEIFY_THRESHOLD = 6;
static final int MIN_TREEIFY_CAPACITY = 64;
// 以下两个是用来控制扩容的时候 单线程进入的变量
// 扩容的线程数
private static final int MIN_TRANSFER_STRIDE = 16;
private static int RESIZE_STAMP_BITS = 16;
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
// 表示有个线程正在迁移
static final int MOVED = -1; // hash for forwarding nodes
// 表示这是一个TreeBin数节点
static final int TREEBIN = -2; // hash for roots of trees
static final int RESERVED = -3; // hash for transient reservations
// 哈市最高位为0,保持正值
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
static final int NCPU = Runtime.getRuntime().availableProcessors(); // CPU数量
static class Node<K, V> implements Map.Entry<K, V> {
final int hash;
final K key;
// 两个volatile同步锁
volatile V val;
volatile Node<K, V> next;
Node(int hash, K key, V val, Node<K, V> next) {
this.hash = hash;
this.key = key;
this.val = val;
this.next = next;
}
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return val;
}
//不允许调用setValue方法直接改变Node的value域
// 即只能通过get或者e.val获取值,
// 修改值时需要通过CAS原子操作
@Override
public V setValue(V value) {
throw new UnsupportedOperationException();
}
@Override
public final int hashCode() {
return key.hashCode() ^ val.hashCode();
}
@Override
public final String toString() {
return key + "=" + val;
}
@Override
public final boolean equals(Object o) {
Object k, v, u;
Map.Entry<?, ?> e;
return ((o instanceof Map.Entry) &&
(k = (e = (Map.Entry<?, ?>) o).getKey()) != null &&
(v = e.getValue()) != null &&
(k == key || k.equals(key)) &&
(v == (u = val) || v.equals(u)));
}
// 定义一个查找方法,相当于模板方法;
// 子类如果是treeNode,则重写方法;否则,默认是链表,直接遍历
Node<K, V> find(int h, Object k) {
Node<K, V> e = this;
if (k != null) {
do {
K ek;
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
} while ((e = e.next) != null);
}
return null;
}
}
// transient-短暂的:用于表示不用被序列化,可以通过内部元素被序列化
// 盛装Node元素的数组 它的大小是2的整数次幂
transient volatile Node<K, V>[] table;
// 扩容期间的写一个数组
private transient volatile Node<K, V>[] nextTable;
// 基础计数器,主要在没有竞争时使用?
private transient volatile long baseCount;
/**
* hash表/数组的初始化大小和数组扩展时的控制标志
* 1.默认初始化为0,表示数组还未初始化
* 2.创建后,即数组的大小容量,0.75n;超过就扩容
* 3.-1表示数组正在初始化
* 4.-(1+线程数):表示正在帮助扩容的线程
*/
private transient volatile int sizeCtl;
// 扩容时,写一个数组下标
private transient volatile int transferIndex;
private transient volatile int cellsBusy;
// 用于存储每个桶中数据的个数
private transient volatile CounterCell[] counterCells;
public ConcurrentHashMapDemo() {
}
public ConcurrentHashMapDemo(int initialCapacity) {
if (initialCapacity < 0) {
throw new IllegalArgumentException();
}
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
public ConcurrentHashMapDemo(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
public ConcurrentHashMapDemo(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) {
throw new IllegalArgumentException();
}
if (initialCapacity < concurrencyLevel) {
initialCapacity = concurrencyLevel;
}
long size = (long) (1.0 + (long) initialCapacity / loadFactor);
int cap = (size >= (long) MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int) size);
this.sizeCtl = cap;
}
private int tableSizeFor(int c) {
int n = c - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
/**
* 重要的几个方法
*
* @return
*/
public V put(K key, V value) {
return putVal(key, value, false);
}
/**
* @param key
* @param value
* @param onlyIfAbsent: 如果数据key存在是否替换,false-替换,默认替换
* 字面意思:只有在缺少数据时,更改;absent:缺席
* @return
*/
private V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) {
throw new NullPointerException();
}
int hash = spread(key.hashCode());
/**要插入的元素所在桶的元素个数;
* 三种情况:
* 如果未插入成功:0
* 如果是树,设定为固定值2
* 如果是链表:>0,用于判断是否扩容
*/
int binCount = 0;
// 自旋锁加入元素
for (Node<K, V>[] tab = table; ; ) {
Node<K, V> f;
int n, i, fh;
/**
* 初始化数组
*/
if (tab == null || (n = tab.length) == 0) {
tab = initTable();
} else if ((f = tabAt(tab, i = (n - 1) & hash)) != null) {
// 定义一个原来的旧值,用于返回
V oldVal = null;
// 如果数组中所在桶还没有元素,直接插入
// 注意由于n是2的倍数,所以(n-1)&hash求得就是hash对n的余数
if (casTabAt(tab, i, null, new Node<K, V>(hash, key, value, null))) {
// 通过CAS插入,成功,则结果;失败,测继续循环
break;
}
if ((fh = f.hash) == MOVED) {
// 如果要插入的元素所在的桶的第一个元素的hash是MOVED,则当前线程帮忙一起迁移元素
tab = helpTransfer(tab, f);
} else {
synchronized (f) {
if ((tabAt(tab, i) == f)) {
// 如果第一个元素的hash值fh大于0(说明不是在迁移,也不是树)
// 那就是桶中的元素使用的是链表方式存储
if (fh >= 0) {
binCount = 1; // 元素个数初始化为1
for (Node<K, V> e = f; ; ++binCount) {
K ek;
if ((e.hash == hash) && ((ek = e.key) == key || (ek != null && key.equals(ek)))) {
// hash值相同 且 节点key相等
oldVal = e.val;
if (!onlyIfAbsent) {
e.val = value;
}
break;
}
// 否则查找写一个节点
Node<K, V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<>(hash, key, value, null);
break;
}
}
} else if (f instanceof TreeBin) {
// 元素是树
Node<K, V> p;
binCount = 2;
if ((p = ((TreeBin<K, V>) f).putTreeVal(hash, key, value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent) {
p.val = value;
}
}
}
}
}
}
if (binCount != 0) {
if (binCount > TREEIFY_THRESHOLD) {
treeifyBin(tab, i);
}
if (oldVal != null) {
return oldVal;
}
break;
}
}
}
// 成功插入元素,元素个数加1(是否要扩容在这个里面)
addCount(1L, binCount);
return null;
}
//会发现源码中没有一处加了锁
//Volatile变量加unsafe类CAS可以实现原子操作
public V get(Object key) {
Node<K, V>[] tab;
Node<K, V> e, p;
int n, eh;
K ek;
// 获取hash
int h = spread(key.hashCode());
// table不为null,获取hash所在位置
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
// 1.是桶的头结点
if ((eh = e.hash) == h) {
// 如果就是桶的头结点,获取成功返回
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
//hash值为负值表示正在扩容,这个时候查的是ForwardingNode的find方法来定位到nextTable来
//eh=-1,说明该节点是一个ForwardingNode,正在迁移,此时调用ForwardingNode的find方法去nextTable里找。
//eh=-2,说明该节点是一个TreeBin,此时调用TreeBin的find方法遍历红黑树,由于红黑树有可能正在旋转变色,所以find里会有读写锁。
//eh>=0,说明该节点下挂的是一个链表,直接遍历该链表即可。
else if (eh < 0) {
// 2.正在扩容
// 3.是树节点
// e分别调用:ForwardingNode和TreeBin的find方法
return (p = e.find(h, key)) != null ? p.val : null;
}
// 4.是链表
// eh>0,直接遍历链表
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
public V remove(Object key) {
return replaceNode(key, null, null);
}
/**
* 参数value:当 value==null 时 ,删除节点 。否则 更新节点的值为value
* 参数cv:一个期望值, 当为null时直接更新为value;否则找到是否有key-value都对应的值,如果没有返回旧值
* <p>
* 即value是控制是更新还是删除
* cv是控制是根据单个key更新,还是根据两个值key-value更新
*/
private V replaceNode(Object key, V value, Object cv) {
int hash = spread(key.hashCode());
for (Node<K, V>[] tab = table; ; ) {
Node<K, V> f;
int i, n, fh;
if ((tab == null) || (n = tab.length) == 0
|| (f = (tabAt(tab, i = (n - 1) & hash))) == null) {
// 如果都为null,没有这个key
break;
} else if ((fh = f.hash) == MOVED) {
// 表示当前节点的桶正在迁移,帮忙迁移先
helpTransfer(tab, f);
} else {
// 原来的值,用于返回
V oldVal = null;
// 是否执行了完成了replace操作:替换或删除;
// 后面需要根据这个标志,进行计数操作,如果是删除,计数需要-1
boolean validated = false;
synchronized (f) {
// 头结点对应
if (tabAt(tab, i) == f) {
// >=0表示是链表
if (fh >= 0) {
// 遍历链表
validated = true;
for (Node<K, V> e = f, pred = null; ; ) {
K ek;
// hash相等保证对应的桶一样
// (ek=e.key)==key是指:可能就是同一个对象,比如数字100,常量池相同的对象;
// (ek!=null && ek.equals(key))判断字符串,同一个new String("hello");
// key应该判定是一个,但是他们不==
if (e.hash == hash && ((ek = e.key) == key || (ek != null && ek.equals(key)))) {
V ev = e.val; // 当前查询节点的值
if (cv == null || ((ev == cv || (ev != null && ev.equals(cv))))) {
// 如果参考调换值参数为null或者找到了指定值
oldVal = ev;
// 更新
if (value != null) {
e.val = value;
} else if (pred != null) {
// value为null,删除找到的这个节点
//前一个节点的next指向下一个节点
pred.next = e.next;
} else {
//pred == null,即为初始化值,表明这个头结点。直接删除头结点e,将当前桶头结点设置为下一个节点
setTabAt(tab, i, e.next);
}
}
// 保存先前节点,用于遍历删除
pred = e;
// 到达尾结点,退出,否则遍历,继续replace
if ((e = e.next) == null) {
break;
}
}
}
}
// 如果是树
else if (f instanceof TreeBin) {
validated = true;
TreeBin<K, V> t = (TreeBin<K, V>) f;
TreeNode<K, V> r, p;
//调用树节点的findTreeNode方法查找key值
if ((r = t.root) != null && (p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || ((cv == pv || (pv != null && pv.equals(cv))))) {
oldVal = pv;
if (value != null) {
p.val = value; // null则修改节点,否则删除
} else if (t.removeTreeNode(p)) {
//调用树节点removeTreeNode方法删除节点,返回true说明节点太少,转化为链表结构
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
}
// 1.如果成功执行操作
if(validated){
// 2. 找到目标值
if(oldVal!=null) {
// 3. 是删除操作,计数-1;否则直接返回找到的值
if (value == null) {
addCount(-1L, -1);
}
return oldVal;
}
// 未找到目标值,直接退出返回null
break;
}
}
}
return null;
}
// 节点数量加减操作
private final void addCount(long x, int check) {
CounterCell[] as;
long b, s;
// 尝试CAS操作计数baseCount+1
// 如果counterCells不为null或者增加baseCount变量值+1失败
if ((as = counterCells) != null ||
!U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
CounterCell a;
long v;
int m;
//表示无竞争
boolean uncontended = true;
if (as == null || (m = as.length - 1) < 0 ||
// 随机获取一个桶节点,看是否能修改
(a = as[ThreadLocalRandom.getProbe() & m]) == null ||
!(uncontended =
U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
/**补充:
* U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)
* 参数1为操作的变量所属对象;参数2为变量的偏移地址;参数3是原始数据;参数4是更新数据
* 这里的偏移地址在静态代码块;类加载时就初始化
*/
/**
* 如果as为null或者length为0
* 或者数组中随机一个元素为null,ThreadLocalRandom.getProbe()获取一个随机值
* 或者CAS加值失败
* 调用fullAddCount增加count值
*/
fullAddCount(x, uncontended); // 强制加数
return;
}
if (check <= 1) {
// 如果==1,只插入了一个节点
return;
}
// 遍历计数
s = sumCount();
}
if (check >= 0) {
Node<K, V>[] tab, nt;
int n, sc;
// 如果总数达到阈值,扩容-0.75n
while (s >= (long) (sc = sizeCtl) && (tab = table) != null &&
(n = tab.length) < MAXIMUM_CAPACITY) {
// 扩容标志
int rs = resizeStamp(n);
if (sc < 0) {
// sc<0说明正在扩容中
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0) {
// 扩容已经完成了,退出循环
// 正常应该只会触发nextTable==null这个条件,其它条件没看出来何时触发
break;
}
// 扩容未完成,则当前线程加入迁移元素中
// 并把扩容线程数加1
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
transfer(tab, nt);
}
} else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2)) {
// 这里是触发扩容的那个线程进入的地方
// sizeCtl的高16位存储着rs这个扩容邮戳
// sizeCtl的低16位存储着扩容线程数加1,即(1+nThreads)
// 所以官方说的扩容时sizeCtl的值为 -(1+nThreads)是错误的
// 进入迁移元素
transfer(tab, null);
}
// 重新计算扩容后的元素个数
s = sumCount();
}
}
}
/**
* Helps transfer if a resize is in progress.
*
* @param tab
* @param f
* @return
*/
private Node<K, V>[] helpTransfer(Node<K, V>[] tab, Node<K, V> f) {
Node<K, V>[] nextTab;
int sc;
if (tab == null && (f instanceof ForwardingNode)
&& (nextTab = ((ForwardingNode<K, V>) f).nextTable) != null) {
// 根据length得到一个标志邮戳,干啥的?
int rs = resizeStamp(tab.length);
while (nextTab == nextTable && tab == table && (sc = sizeCtl) < 0) {
// 如果 nextTab 没有被并发修改 且 tab 也没有被并发修改
// 且 sizeCtl < 0 (说明还在扩容)
if ((sc >>> RESIZE_STAMP_BITS) != rs || (sc == rs + 1)
|| sc == rs + MAXIMUM_CAPACITY || transferIndex <= 0) {
// 如果 sizeCtl 无符号右移 16 不等于 rs ( sc前 16 位如果不等于标识符,则标识符变化了)
// 或者 sizeCtl == rs + 1 (扩容结束了,不再有线程进行扩容)(默认第一个线程设置 sc ==rs 左移 16 位 + 2,当第一个线程结束扩容了,就会将 sc 减一。这个时候,sc 就等于 rs + 1)
// 或者 sizeCtl == rs + 65535 (如果达到最大帮助线程的数量,即 65535)
// 或者转移下标正在调整 (扩容结束)
// 结束循环,返回 table
break;
}
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
// 如果以上都不是, 将 sizeCtl + 1, (表示增加了一个线程帮助其扩容)
transfer(tab, nextTab);
break;
}
}
return nextTab;
}
return tab;
}
private final void tryPresize(int size) {
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
tableSizeFor(size + (size >>> 1) + 1);
int sc;
while ((sc = sizeCtl) >= 0) {
Node<K, V>[] tab = table;
int n;
if (tab == null || (n = tab.length) == 0) {
n = (sc > c) ? sc : c;
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if (table == tab) {
@SuppressWarnings("unchecked")
Node<K, V>[] nt = (Node<K, V>[]) new Node<?, ?>[n];
table = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
}
} else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
else if (tab == table) {
int rs = resizeStamp(n);
if (sc < 0) {
Node<K, V>[] nt;
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
} else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
}
}
}
private void transfer(Node<K, V>[] tab, Node<K, V>[] nextTab) {
int n = tab.length, stride;
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) {
//计算需要迁移多少个hash桶(MIN_TRANSFER_STRIDE该值作为下限,以避免扩容线程过多)
stride = MIN_TRANSFER_STRIDE;
}
try {
if (nextTab == null) {
// 扩容一倍
Node<K, V>[] nt = (Node<K, V>[]) new Node<?, ?>[n << 1];
nextTab = nt;
}
} catch (Throwable e) {
sizeCtl = Integer.MAX_VALUE;
transferIndex = n;
// 表示需要迁移的桶下标
}
int nextn = nextTab.length;
ForwardingNode<K, V> fwd = new ForwardingNode<K, V>(nextTab);
boolean advance = true; // 是否还有桶未迁移
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0; ; ) {
Node<K, V> f;
int fh;
//更新待迁移的hash桶索引,循环遍历
while (advance) {
// 索引扫描
int nextIndex, nextBound;
//更新迁移索引i。
if (--i >= bound || finishing) {
advance = false;
} else if ((nextIndex = transferIndex) < 0) {
// 当transferIndex<=0,表示索引完全,没有需要迁移的桶了,修改标志
i = -1;
advance = false;
} else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
//当迁移完bound这个桶后,尝试更新transferIndex,,获取下一批待迁移的hash桶
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
// 退出transfe
// 完成迁移,一次推出线程,修改标记
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
if (finishing) {
// 完成后,收尾工作-删除临时节点
nextTable = null;
table = nextTab;
// 将sizeof设置为2n*0.75,即最大数量
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
/**
第一个扩容的线程,执行transfer方法之前,会设置 sizeCtl = (resizeStamp(n) << RESIZE_STAMP_SHIFT) + 2)
后续帮其扩容的线程,执行transfer方法之前,会设置 sizeCtl = sizeCtl+1
每一个退出transfer的方法的线程,退出之前,会设置 sizeCtl = sizeCtl-1
那么最后一个线程退出时:
必然有sc == (resizeStamp(n) << RESIZE_STAMP_SHIFT) + 2),即 (sc - 2) == resizeStamp(n) << RESIZE_STAMP_SHIFT
*/
// 不是最后一个线程
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) {
return;
}
// 是最后一个线程
finishing = advance = true;
// 最后退出的线程要重新check下是否全部迁移完毕
// 或者说,如果是最后一个线程了,需要将i赋值为有效值,进入到上面的 if (finishing) {
// 处,释放资源
i = n; // recheck before commit
}
} else if ((f = tabAt(tab, i)) == null) {
// 节点为null,直接赋值迁移节点
advance = casTabAt(tab, i, null, fwd);
} else if ((fh = f.hash) == MOVED) {
// 正在迁移
advance = true; // already processed
} else {
//迁移node节点
synchronized (f) {
if (tabAt(tab, i) == f) {
Node<K, V> ln, hn;
// 链表迁移
if (fh >= 0) {
// 即将fh隐射在0-n和n-2n范围内,将hash冲突的数据重新分出
int runBit = fh & n;
Node<K, V> lastRun = f;
for (Node<K, V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
} else {
hn = lastRun;
ln = null;
}
// 将node链表,分成2个新的node链表
// 低n位,放在0-n中;高n为,放在n-2n中
for (Node<K, V> p = f; p != lastRun; p = p.next) {
int ph = p.hash;
K pk = p.key;
V pv = p.val;
if ((ph & n) == 0) {
ln = new Node<K, V>(ph, pk, pv, ln);
} else {
hn = new Node<K, V>(ph, pk, pv, hn);
}
}
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
} else if (f instanceof TreeBin) {
// 如果是红黑树,暂时保留
TreeBin<K, V> t = (TreeBin<K, V>) f;
TreeNode<K, V> lo = null, loTail = null;
TreeNode<K, V> hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node<K, V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<K, V> p = new TreeNode<K, V>
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null) {
lo = p;
} else {
loTail.next = p;
}
loTail = p;
++lc;
} else {
if ((p.prev = hiTail) == null) {
hi = p;
} else {
hiTail.next = p;
}
hiTail = p;
++hc;
}
}
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin<K, V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin<K, V>(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
// 用于记录同种数据的个数
@sun.misc.Contended
static final class CounterCell {
volatile long value;
CounterCell(long x) {
value = x;
}
}
// 数组遍历
final long sumCount() {
CounterCell[] as = counterCells;
CounterCell a;
long sum = baseCount;
if (as != null) {
for (int i = 0; i < as.length; ++i) {
if ((a = as[i]) != null) {
sum += a.value;
}
}
}
return sum;
}
/**
* 返回一个 16 位长度的扩容校验标识
* 前 16 位是数据校验标识,后 16 位是当前正在扩容的线程总数
*
* @param n
* @return
*/
static final int resizeStamp(int n) {
return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
}
private Node<K, V>[] initTable() {
Node<K, V>[] tab;
int sc;
while ((tab = table) == null || tab.length == 0) {
/**
* 其他线程正在使用表,停止当前线程
*/
if ((sc = sizeCtl) < 0) {
Thread.yield();
} else if (U.compareAndSwapInt(this, SIZECTL, sizeCtl, -1)) {
/**
* 自旋:尝试修改数组控制标志sizeCtl,获取成功就可以进行操作
*/
try {
if (tab == null || tab.length == 0) {
// 再次判断;如果数值不等于0,表示已经初始化,否则,初始化容量为16
int n = sc > 0 ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node<K, V>[] nt = (Node<K, V>[]) new Node<?, ?>[n];
table = tab = nt;
// 设置sc为数组长度的0.75倍
// n - (n >>> 2) = n - n/4 = 0.75n
// 可见这里装载因子和扩容门槛都是写死了的
// 这也正是没有threshold和loadFactor属性的原因
sc = sc - (sc >>> 2);
}
} finally {
// 设置当前的数组容量
sizeCtl = sc;
}
break;
}
}
return tab;
}
// 作者认为引入红黑树后,即使哈希冲突比较严重,寻址效率也足够高,所以作者并未在哈希值的计算上做过多设计
// 将Key的hashCode值与其高16位作异或并保证最高位为0(从而保证最终结果为正整数)。
private int spread(int h) {
return (h ^ (h >>> 16)) & HASH_BITS; // 得到hash码
}
// 三个核心原子tab方法
@SuppressWarnings("unchecked")
static final <K, V> Node<K, V> tabAt(Node<K, V>[] tab, int i) {
//获得在i位置上的Node节点
return (Node<K, V>) U.getObjectVolatile(tab, ((long) i << ASHIFT) + ABASE);
}
static final <K, V> boolean casTabAt(Node<K, V>[] tab, int i,
Node<K, V> c, Node<K, V> v) {
// 替换已知值
return U.compareAndSwapObject(tab, ((long) i << ASHIFT) + ABASE, c, v);
}
static final <K, V> void setTabAt(Node<K, V>[] tab, int i, Node<K, V> v) {
//利用volatile方法设置节点位置的值
U.putObjectVolatile(tab, ((long) i << ASHIFT) + ABASE, v);
}
/**
* 迁移节点:一个用于连接两个table的节点类。
* 它包含一个nextTable指针,用于指向下一张表。而且这个节点的key value next指针全部为null,
* 它的hash值为-1. 这里面定义的find的方法是从nextTable里进行查询节点,而不是以自身为头节点进行查找。
*/
static final class ForwardingNode<K, V> extends Node<K, V> {
final Node<K, V>[] nextTable;
ForwardingNode(Node<K, V>[] tab) {
// hash为-1表示扩容迁移
super(MOVED, null, null, null);
this.nextTable = tab;
}
}
//在 CAS 递增 baseCount 时,遇到竞争时,使用 fullAddCount 去在多个统计变量上去做递增,
// 这个多个统计变量就是 counterCells,这个数组的每一项是一个 CounterCell,是一个统计。
private final void fullAddCount(long x, boolean wasUncontended) {
// wasUncontended: 未竞争CAS递增CounterCell失败-false
int h;
// 随机找一个桶操作
if ((h = ThreadLocalRandom.getProbe()) == 0) {
ThreadLocalRandom.localInit(); // force initialization
h = ThreadLocalRandom.getProbe();
wasUncontended = true;
}
boolean collide = false; // True if last slot nonempty
for (; ; ) {
CounterCell[] as;
CounterCell a;
int n;
long v;
// 计数数组存在
if ((as = counterCells) != null && (n = as.length) > 0) {
// 当前随机的桶不存在
if ((a = as[(n - 1) & h]) == null) {
// cellsBusy桶计数正在被操作的个数;如果是0,初始化一个桶计数
if (cellsBusy == 0) { // Try to attach new Cell
CounterCell r = new CounterCell(x); // Optimistic create
// 初始化计数为1
if (cellsBusy == 0 &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean created = false;
try { // Recheck under lock
CounterCell[] rs;
int m, j;
if ((rs = counterCells) != null &&
(m = rs.length) > 0 &&
rs[j = (m - 1) & h] == null) {
// 将初始化的桶加入到桶数组中
rs[j] = r;
created = true;
}
} finally {
// 操作完成,还原
cellsBusy = 0;
}
if (created) {
// 创建完成,退出
break;
} //否则继续
continue; // Slot is now non-empty
}
}
collide = false;
} else if (!wasUncontended) {
// CAS already known to fail
// 如果当前桶存在,但是实竞争失败的,修改竞争标志,重新竞争加数
wasUncontended = true; // Continue after rehash
}
else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) {
// 重新竞争成功,退出
break;
}
else if (counterCells != as || n >= NCPU)
// 如果是counterCells失效了:其他线程在以上期间,又操作改变了
// 或者桶的数量大于cpu的数量?
collide = false;
else if (!collide)
collide = true;
else if (cellsBusy == 0 &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
// 再次尝试加数
try {
if (counterCells == as) {// Expand table unless stale
CounterCell[] rs = new CounterCell[n << 1];
for (int i = 0; i < n; ++i)
rs[i] = as[i];
counterCells = rs;
}
} finally {
cellsBusy = 0;
}
collide = false;
continue; // Retry with expanded table
}
h = ThreadLocalRandom.advanceProbe(h);
} else if (cellsBusy == 0 && counterCells == as &&
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean init = false;
try { // Initialize table
if (counterCells == as) {
CounterCell[] rs = new CounterCell[2];
rs[h & 1] = new CounterCell(x);
counterCells = rs;
init = true;
}
} finally {
cellsBusy = 0;
}
if (init) {
break;
}
} else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
break; // Fall back on using base
}
}
private final void treeifyBin(Node<K, V>[] tab, int index) {
Node<K, V> b;
int n, sc;
if (tab != null) {
if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
tryPresize(n << 1);
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
synchronized (b) {
if (tabAt(tab, index) == b) {
TreeNode<K, V> hd = null, tl = null;
for (Node<K, V> e = b; e != null; e = e.next) {
TreeNode<K, V> p =
new TreeNode<K, V>(e.hash, e.key, e.val,
null, null);
if ((p.prev = tl) == null)
hd = p;
else
tl.next = p;
tl = p;
}
setTabAt(tab, index, new TreeBin<K, V>(hd));
}
}
}
}
}
static <K, V> Node<K, V> untreeify(Node<K, V> b) {
Node<K, V> hd = null, tl = null;
for (Node<K, V> q = b; q != null; q = q.next) {
Node<K, V> p = new Node<K, V>(q.hash, q.key, q.val, null);
if (tl == null)
hd = p;
else
tl.next = p;
tl = p;
}
return hd;
}
// 树结构:当链表长度过长的时候>8,会转换为TreeNode
// 但是与HashMap不相同的是,它并不是直接转换为红黑树,
// 而是把这些结点包装成TreeNode放在TreeBin对象中,由TreeBin完成对红黑树的包装。
static final class TreeNode<K, V> extends Node<K, V> {
TreeNode<K, V> parent; // red-black tree links
TreeNode<K, V> left;
TreeNode<K, V> right;
TreeNode<K, V> prev; // needed to unlink next upon deletion
boolean red;
TreeNode(int hash, K key, V val, Node<K, V> next,
TreeNode<K, V> parent) {
super(hash, key, val, next);
this.parent = parent;
}
Node<K, V> find(int h, Object k) {
return findTreeNode(h, k, null);
}
/**
* Returns the TreeNode (or null if not found) for the given key
* starting at given root.
*/
final TreeNode<K, V> findTreeNode(int h, Object k, Class<?> kc) {
if (k != null) {
TreeNode<K, V> p = this;
do {
int ph, dir;
K pk;
TreeNode<K, V> q;
TreeNode<K, V> pl = p.left, pr = p.right;
if ((ph = p.hash) > h)
p = pl;
else if (ph < h)
p = pr;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if (pl == null)
p = pr;
else if (pr == null)
p = pl;
else if ((kc != null ||
(kc = comparableClassFor(k)) != null) &&
(dir = compareComparables(kc, k, pk)) != 0)
p = (dir < 0) ? pl : pr;
else if ((q = pr.findTreeNode(h, k, kc)) != null)
return q;
else
p = pl;
} while (p != null);
}
return null;
}
}
// TreeNode的包装
static final class TreeBin<K, V> extends Node<K, V> {
TreeNode<K, V> root;
volatile TreeNode<K, V> first;
volatile Thread waiter;
volatile int lockState;
// values for lockState
static final int WRITER = 1; // set while holding write lock
static final int WAITER = 2; // set when waiting for write lock
static final int READER = 4; // increment value for setting read lock
static int tieBreakOrder(Object a, Object b) {
int d;
if (a == null || b == null ||
(d = a.getClass().getName().
compareTo(b.getClass().getName())) == 0)
d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
-1 : 1);
return d;
}
/**
* Creates bin with initial set of nodes headed by b.
*/
TreeBin(TreeNode<K, V> b) {
super(TREEBIN, null, null, null);
this.first = b;
TreeNode<K, V> r = null;
for (TreeNode<K, V> x = b, next; x != null; x = next) {
next = (TreeNode<K, V>) x.next;
x.left = x.right = null;
if (r == null) {
x.parent = null;
x.red = false;
r = x;
} else {
K k = x.key;
int h = x.hash;
Class<?> kc = null;
for (TreeNode<K, V> p = r; ; ) {
int dir, ph;
K pk = p.key;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0)
dir = tieBreakOrder(k, pk);
TreeNode<K, V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
x.parent = xp;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
r = balanceInsertion(r, x);
break;
}
}
}
}
this.root = r;
assert checkInvariants(root);
}
/**
* Acquires write lock for tree restructuring.
*/
private final void lockRoot() {
if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
contendedLock(); // offload to separate method
}
/**
* Releases write lock for tree restructuring.
*/
private final void unlockRoot() {
lockState = 0;
}
/**
* Possibly blocks awaiting root lock.
*/
private final void contendedLock() {
boolean waiting = false;
for (int s; ; ) {
if (((s = lockState) & ~WAITER) == 0) {
if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
if (waiting)
waiter = null;
return;
}
} else if ((s & WAITER) == 0) {
if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
waiting = true;
waiter = Thread.currentThread();
}
} else if (waiting)
LockSupport.park(this);
}
}
/**
* Returns matching node or null if none. Tries to search
* using tree comparisons from root, but continues linear
* search when lock not available.
*/
final Node<K, V> find(int h, Object k) {
if (k != null) {
for (Node<K, V> e = first; e != null; ) {
int s;
K ek;
if (((s = lockState) & (WAITER | WRITER)) != 0) {
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
e = e.next;
} else if (U.compareAndSwapInt(this, LOCKSTATE, s,
s + READER)) {
TreeNode<K, V> r, p;
try {
p = ((r = root) == null ? null :
r.findTreeNode(h, k, null));
} finally {
Thread w;
if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
(READER | WAITER) && (w = waiter) != null)
LockSupport.unpark(w);
}
return p;
}
}
}
return null;
}
/**
* Finds or adds a node.
*
* @return null if added
*/
final TreeNode<K, V> putTreeVal(int h, K k, V v) {
Class<?> kc = null;
boolean searched = false;
for (TreeNode<K, V> p = root; ; ) {
int dir, ph;
K pk;
if (p == null) {
first = root = new TreeNode<K, V>(h, k, v, null, null);
break;
} else if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0) {
if (!searched) {
TreeNode<K, V> q, ch;
searched = true;
if (((ch = p.left) != null &&
(q = ch.findTreeNode(h, k, kc)) != null) ||
((ch = p.right) != null &&
(q = ch.findTreeNode(h, k, kc)) != null))
return q;
}
dir = tieBreakOrder(k, pk);
}
TreeNode<K, V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
TreeNode<K, V> x, f = first;
first = x = new TreeNode<K, V>(h, k, v, f, xp);
if (f != null)
f.prev = x;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
if (!xp.red)
x.red = true;
else {
lockRoot();
try {
root = balanceInsertion(root, x);
} finally {
unlockRoot();
}
}
break;
}
}
assert checkInvariants(root);
return null;
}
/**
* Removes the given node, that must be present before this
* call. This is messier than typical red-black deletion code
* because we cannot swap the contents of an interior node
* with a leaf successor that is pinned by "next" pointers
* that are accessible independently of lock. So instead we
* swap the tree linkages.
*
* @return true if now too small, so should be untreeified
*/
final boolean removeTreeNode(TreeNode<K, V> p) {
TreeNode<K, V> next = (TreeNode<K, V>) p.next;
TreeNode<K, V> pred = p.prev; // unlink traversal pointers
TreeNode<K, V> r, rl;
if (pred == null)
first = next;
else
pred.next = next;
if (next != null)
next.prev = pred;
if (first == null) {
root = null;
return true;
}
if ((r = root) == null || r.right == null || // too small
(rl = r.left) == null || rl.left == null)
return true;
lockRoot();
try {
TreeNode<K, V> replacement;
TreeNode<K, V> pl = p.left;
TreeNode<K, V> pr = p.right;
if (pl != null && pr != null) {
TreeNode<K, V> s = pr, sl;
while ((sl = s.left) != null) // find successor
s = sl;
boolean c = s.red;
s.red = p.red;
p.red = c; // swap colors
TreeNode<K, V> sr = s.right;
TreeNode<K, V> pp = p.parent;
if (s == pr) { // p was s's direct parent
p.parent = s;
s.right = p;
} else {
TreeNode<K, V> sp = s.parent;
if ((p.parent = sp) != null) {
if (s == sp.left)
sp.left = p;
else
sp.right = p;
}
if ((s.right = pr) != null)
pr.parent = s;
}
p.left = null;
if ((p.right = sr) != null)
sr.parent = p;
if ((s.left = pl) != null)
pl.parent = s;
if ((s.parent = pp) == null)
r = s;
else if (p == pp.left)
pp.left = s;
else
pp.right = s;
if (sr != null)
replacement = sr;
else
replacement = p;
} else if (pl != null)
replacement = pl;
else if (pr != null)
replacement = pr;
else
replacement = p;
if (replacement != p) {
TreeNode<K, V> pp = replacement.parent = p.parent;
if (pp == null)
r = replacement;
else if (p == pp.left)
pp.left = replacement;
else
pp.right = replacement;
p.left = p.right = p.parent = null;
}
root = (p.red) ? r : balanceDeletion(r, replacement);
if (p == replacement) { // detach pointers
TreeNode<K, V> pp;
if ((pp = p.parent) != null) {
if (p == pp.left)
pp.left = null;
else if (p == pp.right)
pp.right = null;
p.parent = null;
}
}
} finally {
unlockRoot();
}
assert checkInvariants(root);
return false;
}
/* ------------------------------------------------------------ */
// Red-black tree methods, all adapted from CLR
static <K, V> TreeNode<K, V> rotateLeft(TreeNode<K, V> root,
TreeNode<K, V> p) {
TreeNode<K, V> r, pp, rl;
if (p != null && (r = p.right) != null) {
if ((rl = p.right = r.left) != null)
rl.parent = p;
if ((pp = r.parent = p.parent) == null)
(root = r).red = false;
else if (pp.left == p)
pp.left = r;
else
pp.right = r;
r.left = p;
p.parent = r;
}
return root;
}
static <K, V> TreeNode<K, V> rotateRight(TreeNode<K, V> root,
TreeNode<K, V> p) {
TreeNode<K, V> l, pp, lr;
if (p != null && (l = p.left) != null) {
if ((lr = p.left = l.right) != null) {
lr.parent = p;
}
if ((pp = l.parent = p.parent) == null) {
(root = l).red = false;
} else if (pp.right == p) {
pp.right = l;
} else {
pp.left = l;
}
l.right = p;
p.parent = l;
}
return root;
}
static <K, V> TreeNode<K, V> balanceInsertion(TreeNode<K, V> root,
TreeNode<K, V> x) {
x.red = true;
for (TreeNode<K, V> xp, xpp, xppl, xppr; ; ) {
if ((xp = x.parent) == null) {
x.red = false;
return x;
} else if (!xp.red || (xpp = xp.parent) == null)
return root;
if (xp == (xppl = xpp.left)) {
if ((xppr = xpp.right) != null && xppr.red) {
xppr.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
} else {
if (x == xp.right) {
root = rotateLeft(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateRight(root, xpp);
}
}
}
} else {
if (xppl != null && xppl.red) {
xppl.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
} else {
if (x == xp.left) {
root = rotateRight(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateLeft(root, xpp);
}
}
}
}
}
}
static <K, V> TreeNode<K, V> balanceDeletion(TreeNode<K, V> root,
TreeNode<K, V> x) {
for (TreeNode<K, V> xp, xpl, xpr; ; ) {
if (x == null || x == root)
return root;
else if ((xp = x.parent) == null) {
x.red = false;
return x;
} else if (x.red) {
x.red = false;
return root;
} else if ((xpl = xp.left) == x) {
if ((xpr = xp.right) != null && xpr.red) {
xpr.red = false;
xp.red = true;
root = rotateLeft(root, xp);
xpr = (xp = x.parent) == null ? null : xp.right;
}
if (xpr == null)
x = xp;
else {
TreeNode<K, V> sl = xpr.left, sr = xpr.right;
if ((sr == null || !sr.red) &&
(sl == null || !sl.red)) {
xpr.red = true;
x = xp;
} else {
if (sr == null || !sr.red) {
if (sl != null)
sl.red = false;
xpr.red = true;
root = rotateRight(root, xpr);
xpr = (xp = x.parent) == null ?
null : xp.right;
}
if (xpr != null) {
xpr.red = (xp == null) ? false : xp.red;
if ((sr = xpr.right) != null)
sr.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateLeft(root, xp);
}
x = root;
}
}
} else { // symmetric
if (xpl != null && xpl.red) {
xpl.red = false;
xp.red = true;
root = rotateRight(root, xp);
xpl = (xp = x.parent) == null ? null : xp.left;
}
if (xpl == null)
x = xp;
else {
TreeNode<K, V> sl = xpl.left, sr = xpl.right;
if ((sl == null || !sl.red) &&
(sr == null || !sr.red)) {
xpl.red = true;
x = xp;
} else {
if (sl == null || !sl.red) {
if (sr != null)
sr.red = false;
xpl.red = true;
root = rotateLeft(root, xpl);
xpl = (xp = x.parent) == null ?
null : xp.left;
}
if (xpl != null) {
xpl.red = (xp == null) ? false : xp.red;
if ((sl = xpl.left) != null)
sl.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateRight(root, xp);
}
x = root;
}
}
}
}
}
/**
* Recursive invariant check
*/
static <K, V> boolean checkInvariants(TreeNode<K, V> t) {
TreeNode<K, V> tp = t.parent, tl = t.left, tr = t.right,
tb = t.prev, tn = (TreeNode<K, V>) t.next;
if (tb != null && tb.next != t) {
return false;
}
if (tn != null && tn.prev != t) {
return false;
}
if (tp != null && t != tp.left && t != tp.right) {
return false;
}
if (tl != null && (tl.parent != t || tl.hash > t.hash)) {
return false;
}
if (tr != null && (tr.parent != t || tr.hash < t.hash)) {
return false;
}
if (t.red && tl != null && tl.red && tr != null && tr.red) {
return false;
}
if (tl != null && !checkInvariants(tl)) {
return false;
}
return tr == null || checkInvariants(tr);
}
private static final sun.misc.Unsafe U;
private static final long LOCKSTATE;
static {
try {
U = sun.misc.Unsafe.getUnsafe();
Class<?> k = TreeBin.class;
LOCKSTATE = U.objectFieldOffset
(k.getDeclaredField("lockState"));
} catch (Exception e) {
throw new Error(e);
}
}
}
static Class<?> comparableClassFor(Object x) {
if (x instanceof Comparable) {
Class<?> c;
Type[] ts, as;
Type t;
ParameterizedType p;
if ((c = x.getClass()) == String.class) // bypass checks
{
return c;
}
if ((ts = c.getGenericInterfaces()) != null) {
for (int i = 0; i < ts.length; ++i) {
if (((t = ts[i]) instanceof ParameterizedType) &&
((p = (ParameterizedType) t).getRawType() ==
Comparable.class) &&
(as = p.getActualTypeArguments()) != null &&
as.length == 1 && as[0] == c) // type arg is c
{
return c;
}
}
}
}
return null;
}
@SuppressWarnings({"rawtypes", "unchecked"}) // for cast to Comparable
static int compareComparables(Class<?> kc, Object k, Object x) {
return (x == null || x.getClass() != kc ? 0 :
((Comparable) k).compareTo(x));
}
// Unsafe mechanics
// U.compareAndSwapXXX的方法:无锁化的修改值的操作,他可以大大降低锁代理的性能消耗
/**
* 这个算法的基本思想就是不断地去比较当前内存中的变量值与你指定的一个变量值是否相等,
* 如果相等,则接受你指定的修改的值,否则拒绝你的操作。
* 因为当前线程中的值已经不是最新的值,你的修改很可能会覆盖掉其他线程修改的结果。
* 这一点与乐观锁,SVN的思想是比较类似的。
*/
private static final sun.misc.Unsafe U;
private static final long SIZECTL;
private static final long TRANSFERINDEX;
private static final long BASECOUNT;
private static final long CELLSBUSY;
private static final long CELLVALUE;
private static final long ABASE;
private static final int ASHIFT;
// 类加载时初始化偏移地址
static {
try {
U = sun.misc.Unsafe.getUnsafe();
Class<?> k = ConcurrentHashMapDemo.class;
SIZECTL = U.objectFieldOffset
(k.getDeclaredField("sizeCtl"));
TRANSFERINDEX = U.objectFieldOffset
(k.getDeclaredField("transferIndex"));
BASECOUNT = U.objectFieldOffset
(k.getDeclaredField("baseCount"));
CELLSBUSY = U.objectFieldOffset
(k.getDeclaredField("cellsBusy"));
Class<?> ck = CounterCell.class;
CELLVALUE = U.objectFieldOffset
(ck.getDeclaredField("value"));
Class<?> ak = Node[].class;
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0) {
throw new Error("data type scale not a power of two");
}
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
}
}