ConcurrentHashMap有5种初始化方法。其中,ConcurrentHashMap()是个无参构造,里面没有任何代码,不需要理会;ConcurrentHashMap(int initialCapacity, float loadFactor)是个套娃的方法,只需要理解ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel)就可以了。因此下面就详细解析剩下的3种初始化方法。
1、ConcurrentHashMap(int initialCapacity)
/**
* Creates a new, empty map with an initial table size
* accommodating the specified number of elements without the need
* to dynamically resize.
*
* @param initialCapacity The implementation performs internal
* sizing to accommodate this many elements.
* @throws IllegalArgumentException if the initial capacity of
* elements is negative
*/
public ConcurrentHashMap(int initialCapacity) {
if (initialCapacity < 0) throw new IllegalArgumentException();
//MAXIMUM_CAPACITY >>> 1: 表示MAXIMUM_CAPACITY的一半
//如果传入的值 >= MAXIMUM_CAPACITY的一半 直接创建最大容量的表
//如果传入的值 < MAXIMUM_CAPACITY的一半 会计算出一个最近的2的次方数,作为初始化大小
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
//表示初始化大小
this.sizeCtl = cap;
}
2、ConcurrentHashMap(Map<? extends K, ? extends V> m)
初始化调用的 putVal() 和 tryPresize()方法中的transfer() 都会在解析Put方法时进行详细解析。
/**
* Creates a new map with the same mappings as the given map.
*
* @param m the map
*/
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
/**
* Copies all of the mappings from the specified map to this one.
* These mappings replace any mappings that this map had for any of the
* keys currently in the specified map.
*
* @param m mappings to be stored in this map
*/
public void putAll(Map<? extends K, ? extends V> m) {
//m: 需要转化为ConcurrentHashMap的Map
//预先调整table大小
//注意:这个方法有个很大的问题,如果传入的map有8个元素,这个方法里会在创建完长度为16的链表后又扩容一次 最后创建的concurrentHashMap长度为32很浪费空间
tryPresize(m.size());
//将待转化map中的元素全部赋值给新创建的ConcurrentHashMap
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
putVal(e.getKey(), e.getValue(), false);
}
/**
* Tries to presize table to accommodate the given number of elements.
*
* @param size number of elements (doesn't need to be perfectly accurate)
*/
private final void tryPresize(int size) {
//(size >= (MAXIMUM_CAPACITY >>> 1)) 判断需要创建的ConcurrentHashMap的大小是否超过最大值的一半
//ture -> c = MAXIMUM_CAPACITY 最大值
//false -> c = tableSizeFor(size + (size >>> 1) + 1) 能保证在插入指定数量的元素前不会触发扩容 牺牲内存,保证性能
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : tableSizeFor(size + (size >>> 1) + 1);
//sc: sizeCtl临时变量 默认是16
int sc;
//自旋 当 sc<0 时退出
while ((sc = sizeCtl) >= 0) {
//tab: table引用
//n: 数组长度
Node<K,V>[] tab = table; int n;
//CASE1: 当前table还未初始化
if (tab == null || (n = tab.length) == 0) {
//获取sc 和 c 中较大的那位
n = (sc > c) ? sc : c;
//sizeCtl设置为 -1 表示加锁
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
//双重验证
if (table == tab) {
@SuppressWarnings("unchecked")
//创建一个长度为n的链表
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
//将新链表赋值给table
table = nt;
//设置下次扩容阈值 0.75n
sc = n - (n >>> 2);
}
} finally {
//sizeCtl = 下次扩容阈值
sizeCtl = sc;
}
}
}
//前置条件: 当前table已经初始化
//CASE2: 扩容大小 <= 扩容阈值 或 当前table长度已经是最大值了
else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
//前置条件: 1、当前table已经初始化
// 2、当前table准备扩容
//CASE3: 当前table还未发生变化
else if (tab == table) {
int rs = resizeStamp(n);
//true -> 表示table扩容中
if (sc < 0) {
Node<K,V>[] nt;
//条件1: true -> 当前线程获取到的扩容唯一标识戳非本次扩容批次,执行方法体
// false -> 当前线程获取到的扩容唯一标识戳是本次扩容批次,进入下一个判断
//条件2: jdk1.8有bug 这里应该是 sc == (rs << RESIZE_STAMP_SHIFT) + 1
// true -> 表示所有线程都执行完毕了 线程数量是 = 1+n,执行方法体
// false -> 表示还在扩容中,进入下一个判断
//条件3: jdk1.8有bug 这里应该是 sc == (rs << RESIZE_STAMP_SHIFT) + MAX_RESIZERS
// true -> 表示当前参与扩容的线程数量达到最大限度了,不能再参与了,执行方法体
// false -> 表示当前参与扩容的线程数量还未达到最大限度,当前线程可以参与,进入下一个判断
//条件4: true -> 表示扩容已完毕,执行方法体
// false -> 表示扩容还在进行,进入下一个判断
//条件5: true -> 表示全局范围内的任务已经分配完了,执行方法体
// false -> 表示还有任务可分配,结束判断
if ((sc >>> RESIZE_STAMP_SHIFT) != rs ||
sc == rs + 1 ||
sc == rs + MAX_RESIZERS ||
(nt = nextTable) == null ||
transferIndex <= 0)
break;
//当前线程协助扩容
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
//true -> 当前线程是第一个执行扩容的线程
else if (U.compareAndSwapInt(this, SIZECTL, sc,(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
}
}
}
3、ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel)
/**
* Creates a new, empty map with an initial table size based on
* the given number of elements ({@code initialCapacity}), table
* density ({@code loadFactor}), and number of concurrently
* updating threads ({@code concurrencyLevel}).
*
* @param initialCapacity the initial capacity. The implementation
* performs internal sizing to accommodate this many elements,
* given the specified load factor.
* @param loadFactor the load factor (table density) for
* establishing the initial table size
* @param concurrencyLevel the estimated number of concurrently
* updating threads. The implementation may use this value as
* a sizing hint.
* @throws IllegalArgumentException if the initial capacity is
* negative or the load factor or concurrencyLevel are
* nonpositive
*/
public ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) {
//loadFactor 和 concurrencyLevel只是参与ConcurrentHashMap初始化值的计算,并不会赋值到属性中
if (!(loadFactor > 0.0f) ||
initialCapacity < 0 ||
concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
//ConcurrentHashMap创建的表的大小会比指定的大小大。loadFactor决定浮动范围
//为什么要+1?
//为了在插入指定数组大小个元素前不会触发扩容。
//比如,new ConcurrentHashMap时指定的大小是16,如果不 +1 则会在插入大于12个元素后进行扩容,+1 是牺牲了内存,保证性能。
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}