ConcurrentHashMap
HashMap源码解析:https://blog.csdn.net/bobo1356/article/details/103865462
1. 定义
public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
implements ConcurrentMap<K,V>, Serializable
2.常量和字段
常量
/**
* 最大容量,32位int型因为有2位用作控制目的,因此最大是1的30次方
*/
private static final int MAXIMUM_CAPACITY = 1 << 30;
/**
* 默认初始容量
*/
private static final int DEFAULT_CAPACITY = 16;
/**
* 最大数字大小
*/
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
/**
* 默认并发度,目前没用,是因为以前版本的类有用到
*/
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
/**
* 负载因子,一般不常用,而是用n - (n >>> 2)来计算扩容阈值
*/
private static final float LOAD_FACTOR = 0.75f;
/**
* 链表转为红黑树的阈值
*/
static final int TREEIFY_THRESHOLD = 8;
/**
* 红黑树转为链表的阈值
*/
static final int UNTREEIFY_THRESHOLD = 6;
/**
* 当链表转为红黑树时最小的容量,如果还小于该容量,应该进行扩容而不是转换为红黑树,是为了减少哈希冲突
*/
static final int MIN_TREEIFY_CAPACITY = 64;
/**
* 每个传输步骤最小的桶数量. Ranges are
* subdivided to allow multiple resizer threads. This value
* serves as a lower bound to avoid resizers encountering
* excessive memory contention. The value should be at least
* DEFAULT_CAPACITY.
*/
private static final int MIN_TRANSFER_STRIDE = 16;
/**
* The number of bits used for generation stamp in sizeCtl.
* Must be at least 6 for 32bit arrays.
*/
private static int RESIZE_STAMP_BITS = 16;
/**
* 帮助扩容的最大线程数
*/
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
/**
* The bit shift for recording size stamp in sizeCtl.
*/
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
/*
* Encodings for Node hash fields. See above for explanation.
*/
static final int MOVED = -1; // hash for forwarding nodes
static final int TREEBIN = -2; // hash for roots of trees
static final int RESERVED = -3; // hash for transient reservations
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
/** cpu核数 */
static final int NCPU = Runtime.getRuntime().availableProcessors();
字段
/**
* 节点数组,大小一定是2的n次方,在第一次插入时才懒初始化。注意用volatile修饰,保证在多线程下的可见性
*/
transient volatile Node<K,V>[] table;
/**
* 下个使用的数组,只有在扩容时才非空。
*/
private transient volatile Node<K,V>[] nextTable;
/**
* 基本计数器值,主要在没有争用的情况下使用,也用于表初始化,通过CAS更新
*/
private transient volatile long baseCount;
/**
* 在数组进行初始化和扩容时进行控制。
* -1 表示初始化
* -n 表示n-1个线程正在扩容
* 当table为null时,代表着初始容量
* table初始化之后,代表扩容阈值
*/
private transient volatile int sizeCtl;
/**
* 扩容时的分隔索引
*/
private transient volatile int transferIndex;
/**
* Spinlock (locked via CAS) used when resizing and/or creating CounterCells. 自旋锁
*/
private transient volatile int cellsBusy;
/**
* Table of counter cells. When non-null, size is a power of 2.
*/
private transient volatile CounterCell[] counterCells;
// views 视图
private transient KeySetView<K,V> keySet;
private transient ValuesView<K,V> values;
private transient EntrySetView<K,V> entrySet;
3.内部类
//链表节点类,注意value和next用volatile修饰,保证多线程时的可见性
static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
volatile V val;
volatile Node<K,V> next;
}
4.构造函数
/**
* 创建1个空的map,初始默认容量16
*/
public ConcurrentHashMap() {
}
/**
* 推荐的构造函数,预估好需要的容量,不需要动态扩容
*/
public ConcurrentHashMap(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
//这里计算容量时,是 a + a/2 +1,考虑了负载因子,因此用户程序不再需要考虑负载因子,直接传 实际保存数据的个数即可。
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
//sizeCtl直接保存初始容量,但这里不进行申请空间,在第一次put才真正进行初始化
this.sizeCtl = cap;
}
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
//这里考虑了负载因子,用户也不需要再考虑了
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
5.初始化方法
/**
* 初始化table,使用sizecCtl来进行控制
*/
private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
//当table为初始化时,一直进行循环
if ((sc = sizeCtl) < 0)
//sizeCtl 小于0表明 其他线程正在进行初始化或者扩容,当前线程释放cpu资源
Thread.yield(); // lost initialization race; just spin
//否则通过cas更新sizeCtl的值,置为-1,表明当前线程正在进行初始化table
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
//再次判断table是否为空,因为可能在刚进入外边的循环之后,当前线程被切换出去,其他线程完成了初始化,但当前线程切换进来之后成功修改了sizeCtl
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
//申请新的空间,给table赋值,扩容阈值=0.75*table容量
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = tab = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
break;
}
}
return tab;
}
6.核心方法get
get方法不需要加锁
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
//计算该key的hash值
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
//头结点就正好是该key,直接返回
return e.val;
}
//特殊节点:红黑树、已经迁移到nextTable的节点,则去对应的地方查找该key
else if (eh < 0)
return (p = e.find(h, key)) != null ? p.val : null;
//链表节点,直接在链表中查找
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
//计算hash值, 高16位 异或 当前值,最后与上0x7fffffff(31个1,2的31次方减1)
static final int spread(int h) {
return (h ^ (h >>> 16)) & HASH_BITS;
}
7. 核心方法put
public V put(K key, V value) {
return putVal(key, value, false);
}
/** Implementation for put and putIfAbsent */
final V putVal(K key, V value, boolean onlyIfAbsent) {
//concurrentHashMap中key和value都不能为空
if (key == null || value == null) throw new NullPointerException();
//计算key的hash值
int hash = spread(key.hashCode());
int binCount = 0;
//死循环
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
//如果table还是null,则先进行初始化
if (tab == null || (n = tab.length) == 0)
tab = initTable();
//如果key对应位置的桶为空,则通过cas设置该key和value
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
//如果该位置的头节点标记为MOVED已迁移,说明该table正在进行扩容,则当前线程帮助进行扩容
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
//没有进行扩容,则通过synchronized锁住头节点,进行更新该桶
synchronized (f) {
if (tabAt(tab, i) == f) {
//hash值大于等于0,说明是链表节点,则在链表中进行查找替换或插入
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
//原先就有该key,则进行替换
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
//尾插法插入
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
//是红黑树,则在红黑树中进行put
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
//链表的节点数量大于等于8,则转换为红黑树
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
//数量加1,可能触发扩容
addCount(1L, binCount);
return null;
}