JDK11

我们可以看到ConcurrentHashMap实现了ConcurrentMap,Serializable这两个接口并且继承了AbstractMap这个抽象类。
实现与提供的操作和HashMap类似,都是哈希数组+链表/红黑树,但是key和value不能为null,并且是线程安全的,然而又不像HashTable用synchroinze锁表来保证线程安全,那样做的话效率太低了。
重要常量和字段
常量
/**
* The default concurrency level for this table.
* Unused but defined for compatibility with previous versions of this class.
*/
private static final int DEFAULT_CONCURRENCY_LEVEL = 16; // 默认并发级别,未使用,只是为与该类的先前版本兼容
/**
* The largest possible (non-power of two) array size.
* Needed by toArray and related methods.
*/
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; // 哈希数组最大容量(用于toArray中的阈值判断)
/**
* The largest possible table capacity. This value must be
* exactly 1<<30 to stay within Java array allocation and indexing
* bounds for power of two table sizes, and is further required
* because the top two bits of 32bit hash fields are used for
* control purposes.
*/
private static final int MAXIMUM_CAPACITY = 1 << 30; // 哈希数组最大容量
/**
* The default initial table capacity. Must be a power of 2
* (i.e., at least 1) and at most MAXIMUM_CAPACITY.
*/
private static final int DEFAULT_CAPACITY = 16; // 哈希数组默认容量
/**
* The load factor for this table. Overrides of this value in
* constructors affect only the initial table capacity. The
* actual floating point value isn't normally used -- it is
* simpler to use expressions such as {@code n - (n >>> 2)} for
* the associated resizing threshold.
*/
private static final float LOAD_FACTOR = 0.75f; // ConcurrentHashMap默认装载因子(负荷系数)
/**
* The bin count threshold for using a tree rather than list for a
* bin. Bins are converted to trees when adding an element to a
* bin with at least this many nodes. The value must be greater
* than 2, and should be at least 8 to mesh with assumptions in
* tree removal about conversion back to plain bins upon
* shrinkage.
*/
static final int TREEIFY_THRESHOLD = 8; // 某个哈希槽(链)上的元素数量增加到此值后,这些元素进入波动期,即将从链表转换为红黑树
/**
* The smallest table capacity for which bins may be treeified.
* (Otherwise the table is resized if too many nodes in a bin.)
* The value should be at least 4 * TREEIFY_THRESHOLD to avoid
* conflicts between resizing and treeification thresholds.
*/
//避免Table建立初期某个链表有太多元素,会导致一开始就建立了一个红黑树
static final int MIN_TREEIFY_CAPACITY = 64; // 哈希数组的容量至少增加到此值,且满足TREEIFY_THRESHOLD的要求时,将链表转换为红黑树
/**
* The bin count threshold for untreeifying a (split) bin during a
* resize operation. Should be less than TREEIFY_THRESHOLD, and at
* most 6 to mesh with shrinkage detection under removal.
*/
static final int UNTREEIFY_THRESHOLD = 6; // 哈希槽(链)上的红黑树上的元素数量减少到此值时,将红黑树转换为链表
/**
* Minimum number of rebinnings per transfer step. Ranges are
* subdivided to allow multiple resizer threads. This value
* serves as a lower bound to avoid resizers encountering
* excessive memory contention. The value should be at least
* DEFAULT_CAPACITY.
*/
// 每个线程最少负责16个节点的迁移工作
private static final int MIN_TRANSFER_STRIDE = 16;
/**
* The number of bits used for generation stamp in sizeCtl.
* Must be at least 6 for 32bit arrays.
*/
//在扩容时候生成唯一随机数
private static final int RESIZE_STAMP_BITS = 16;
/**
* The maximum number of threads that can help resize.
* Must fit in 32 - RESIZE_STAMP_BITS bits.
*/
//可以同时进行扩容操作的最大线程数
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
/**
* The bit shift for recording size stamp in sizeCtl.
*/
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
/*
* Encodings for Node hash fields. See above for explanation.
*/
static final int MOVED = -1; // hash for forwarding nodes // 前向结点
static final int TREEBIN = -2; // hash for roots of trees // 红黑树(头结点)
static final int RESERVED = -3; // hash for transient reservations // 占位结点
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
/** Number of CPUS, to place bounds on some sizings */
// 虚拟机可用的处理器数量
static final int NCPU = Runtime.getRuntime().availableProcessors();
字段
/**
* The array of bins. Lazily initialized upon first insertion.
* Size is always a power of two. Accessed directly by iterators.
*/
transient volatile Node<K,V>[] table; // 哈希数组(注:哈希数组的容量跟ConcurrentHashMap可以存储的元素数量不是一回事)
/**
* The next table to use; non-null only while resizing.
*/
private transient volatile Node<K,V>[] nextTable; // 哈希数组扩容时使用的新数组
/**
* Base counter value, used mainly when there is no contention,
* but also as a fallback during table initialization
* races. Updated via CAS.
*/
private transient volatile long baseCount;
/**
* Table initialization and resizing control.
* When negative, the table is being initialized or resized: -1 for initialization,
* else -(1 + the number of active resizing threads).
* Otherwise, when table is null, holds the initial table size to use upon creation, or 0 for default.
* After initialization, holds the next element count value upon which to resize the table.
*/
/**
* 控制table的初始化和扩容.
* 0 : 初始默认值
* -1 : 有线程正在进行table的初始化
* >0 : table初始化时使用的容量,或初始化/扩容完成后的threshold
* -(1 + nThreads) : 记录正在执行扩容任务的线程数
*/
private transient volatile int sizeCtl;
/**
* The next table index (plus one) to split while resizing.
*/
// 扩容时候用到的下标变量
private transient volatile int transferIndex;
/**
* Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
*/
//自旋标识位,用于CounterCell[]扩容,类似于LongAdder的cellsBusy
private transient volatile int cellsBusy;
/**
* Table of counter cells. When non-null, size is a power of 2.
*/
private transient volatile CounterCell[] counterCells;
五种结点

可以看到哈希桶有四种类型:Node,TreeBin,Forwarding Node, ReservationNode
其中TreeBin是一种代理节点,其连着红黑树的结点TreeNode,选择用一个代理节点包含复杂的红黑树操作,并且提供加锁和解锁的方法。
- Node
链表结点
static class Node<K, V> implements Map.Entry<K, V>
final int hash;
final K key;
volatile V val;
volatile Node<K, V> next;
其中value和next节点用volatile保证线程可见性
为何hash和key不用呢?因为它们都已经用final修饰了,肯定没有线程安全问题啊
- TreeNode
红黑树结点
static final class TreeNode<K, V> extends Node<K, V> {
TreeNode<K, V> parent; // red-black tree links
TreeNode<K, V> left;
TreeNode<K, V> right;
boolean red;
TreeNode<K, V> prev; // needed to unlink next upon deletion
- TreeBin
代理结点,分配在哈希槽中并且指向红黑树的根节点。
/**
* TreeNodes used at the heads of bins. TreeBins do not hold user
* keys or values, but instead point to list of TreeNodes and
* their root. They also maintain a parasitic read-write lock
* forcing writers (who hold bin lock) to wait for readers (who do
* not) to complete before tree restructuring operations.
*/
// 红黑树(头结点)
static final class TreeBin<K, V> extends Node<K, V> {
// values for lockState
static final int WRITER = 1; // set while holding write lock
static final int WAITER = 2; // set when waiting for write lock
static final int READER = 4; // increment value for setting read lock
volatile TreeNode<K, V> first; //链表头结点
TreeNode<K, V> root;
volatile Thread waiter;//最近一个设置WAITER标志位的线程
volatile int lockState;
private static final Unsafe U = Unsafe.getUnsafe();
private static final long LOCKSTATE = U.objectFieldOffset(TreeBin.class, "lockState");
我们可以看到关于红黑树的操作现在都在这个TreeBin类下面。
我们重点看下和读写锁有关的操作
find方法
可以看到find操作全程是没有上锁的,并且在如果有线程在持有或者抢写锁的情况下,是以遍历链表的方式来查找的。在更新期间链表遍历总是可以进行的,但是树遍历不行,因为树旋转时可能会改变根结点或者其链接。 没有获取读锁的时候,以遍历链表慢速查询,获取了读锁,以遍历红黑树来查询。这样永远不会阻塞读操作,有利于提高并发
/**
* 从根结点开始遍历查找,找到“相等”的结点就返回它,没找到就返回null
* 当存在写锁时,以链表方式进行查找
*/
final Node<K, V> find(int h, Object k) {
if (k != null) {
for (Node<K, V> e = first; e != null; ) {
int s;
K ek;
/**
* 两种特殊情况下以链表的方式进行查找:
* 1. 有线程正持有写锁,这样做能够不阻塞读线程
* 2. 有线程等待获取写锁,不再继续加读锁,相当于“写优先”模式
*/
if (((s = lockState) & (WAITER | WRITER)) != 0) {
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
e = e.next;
// 读线程数量加1,读状态进行累加
} else if (U.compareAndSwapInt(this, LOCKSTATE, s,
s + READER)) {
TreeNode<K, V> r, p;
try {
//如果root为空则把null赋给p
//否则调用红黑树的findTreeNode 方法
p = ((r = root) == null ? null :
r.findTreeNode(h, k, null));
} finally {
Thread w;
如果当前线程是最后一个读线程,且有写线程因为读锁而阻塞,则写线程,告诉它可以尝试获取写锁了
if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
(READER | WAITER) && (w = waiter) != null)
//唤醒最后一个设置WAITER位的线程
LockSupport.unpark(w);
}
return p;
}
}
}
return null;
}
获取写锁,对红黑树的根节点加锁
在红黑树插入和删除节点而造成树的结构发生改变的时候都会调用这个上锁方法
总结下逻辑:
1.CAS原子操作,尝试上写锁
2.如果失败了,调用contendedLock,说明有多个线程竞争
3.如果当前线程处于等待状态,赋予写锁
4.如果当前线程是WRITER/READER状态,那么转换回等待写锁的状态,并且当前线程状态设置为等待线程
/**
* Acquires write lock for tree restructuring.
*/
private final void lockRoot() {
//当前线程加写锁
if(!U.compareAndSetInt(this, LOCKSTATE, 0, WRITER)) {
//如果CAS加锁失败,以竞争方式加锁
contendedLock(); // offload to separate method
}
}
/**
* Possibly blocks awaiting root lock.
*/
private final void contendedLock() {
boolean waiting = false;
for(int s; ; ) {
// WRITER = 001; WAITER = 010; READER = 100; ~表示取反
// ~WAITER 101
// if 表示没有占有读锁和写锁,只有WAITER值才满足
// 即WAIT状态->获得写锁
if(((s = lockState) & ~WAITER) == 0) {
// 锁状态赋值为WRITER
if(U.compareAndSetInt(this, LOCKSTATE, s, WRITER)) {
if(waiting) {
waiter = null;
}
return;
}
// 如果是WRITER或者READER状态
// 表示等待获得读锁或者写锁
} else if((s & WAITER) == 0) {
if(U.compareAndSetInt(this, LOCKSTATE, s, s | WAITER)) {
waiting = true;
waiter = Thread.currentThread();
}
//如果已经有线程占有WAITER 否则使得自身线程挂起
} else if(waiting) {
LockSupport.park(this);
}
}
}
- Forwarding Node
一个特殊的Node结点,hash为-1,key与val都为null。
如果旧数组中的一个哈希桶的结点全都迁移到了新的table中,则会在这个桶中放置一个Forwarding Node。
读操作碰到ForwardingNode时,将操作转发到扩容后的新table数组上去执行;写操作碰见它时,则尝试帮助扩容。
/**
* A node inserted at head of bins during transfer operations.
*/
// 前向结点,应用在数据迁移中。比如Map在扩容时,在哈希槽中放置该结点表示这个槽中的链表结点为null或者已经被移动,扩容完毕之后,则撤销该前向结点
static final class ForwardingNode<K, V> extends Node<K, V> {
final Node<K, V>[] nextTable;
ForwardingNode(Node<K, V>[] tab) {
super(MOVED, null, null);
this.nextTable = tab;
}
- Reservation Node
常用于compute和computeIfAbsent操作
/**
* A place-holder node used in computeIfAbsent and compute.
*/
// 占位结点
static final class ReservationNode<K, V> extends Node<K, V> {
ReservationNode() {
super(RESERVED, null, null);
}
Node<K, V> find(int h, Object k) {
return null;
}
}
本地方法
// Unsafe mechanics
private static final Unsafe U = Unsafe.getUnsafe();
private static final long SIZECTL;
private static final long TRANSFERINDEX;
private static final long BASECOUNT;
private static final long CELLSBUSY;
private static final long CELLVALUE;
private static final int ABASE;
private static final int ASHIFT;
static {
SIZECTL = U.objectFieldOffset(ConcurrentHashMap.class, "sizeCtl");
TRANSFERINDEX = U.objectFieldOffset(ConcurrentHashMap.class, "transferIndex");
BASECOUNT = U.objectFieldOffset(ConcurrentHashMap.class, "baseCount");
CELLSBUSY = U.objectFieldOffset(ConcurrentHashMap.class, "cellsBusy");
CELLVALUE = U.objectFieldOffset(CounterCell.class, "value");
// 寻找Node数组中的元素时约定的起始偏移地址(更像是一个标记)
ABASE = U.arrayBaseOffset(Node[].class);
// Node数组每个元素所占字节数(必须为2的冪)
int scale = U.arrayIndexScale(Node[].class);
if((scale & (scale - 1)) != 0) {
throw new ExceptionInInitializerError("array index scale not a power of two");
}
// 计算log2(scale),并向下取整
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
// Reduce the risk of rare disastrous classloading in first call to
// LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
Class<?> ensureLoaded = LockSupport.class;
// Eager class load observed to help JIT during startup
ensureLoaded = ReservationNode.class;
}
ConcurrentHashMap中有使用到CAS操作,底层是通过本地方法实现的。Java中的Unsafe方法常用来提供硬件级别的原子操作。
哈希数组的初始化
这个函数涉及的知识点:
1.yield操作
2.如何使用CAS进行原子赋值
3.十分fancy的乘以0.75写法
4.数组的初始化属于懒加载,只有第一次使用的时候才初始化。你看它的默认构造函数是空的。
/**
* Creates a new, empty map with the default initial table size (16).
*/
public ConcurrentHashMap() {
}
大概过程总结:
一个while循环内进行初始化,并且把sizeCtl原子地设置为-1表示正在扩容,如果有线程读取到当前sizeCtl为-1则调用yield让出CPU时间片。然后进行数组的初始化,阈值的分配。
private final Node<K, V>[] initTable() {
Node<K, V>[] tab;
// 直到哈希数组初始化完成后,循环结束
while((tab = table) == null || tab.length == 0) {
int sc = sizeCtl;
// 如果当前Map已经处于初始化阶段,则当前线程让出CPU时间片
if(sc<0) {
Thread.yield(); // lost initialization race; just spin
// 原子地将sizeCtl字段更新为-1,代表当前Map进入初始化阶段
} else if(U.compareAndSetInt(this, SIZECTL, sc, -1)) {
try {
if((tab = table) == null || tab.length == 0) {
/*
* 如果指定了初始容量,则使用预设容量,否则使用默认的初始容量
* 参见:构造器中初始容量与预设容量的换算方式
*/
int len = (sc>0) ? sc : DEFAULT_CAPACITY;
// 创建新的哈希数组
@SuppressWarnings("unchecked")
Node<K, V>[] newTable = (Node<K, V>[]) new Node<?, ?>[len];
table = tab = newTable;
// 更新为扩容阈值:0.75*容量
sc = len - (len >>> 2);
}
} finally {
// 恢复sizeCtl标记为非负数
sizeCtl = sc;
}
break;
}
}
return tab;
}
put操作
/**
* Maps the specified key to the specified value in this table.
* Neither the key nor the value can be null.
*
* <p>The value can be retrieved by calling the {@code get} method
* with a key that is equal to the original key.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
*
* @return the previous value associated with {@code key}, or
* {@code null} if there was no mapping for {@code key}
*
* @throws NullPointerException if the specified key or value is null
*/
// 将指定的元素(key-value)存入当前map,并返回旧值,允许覆盖
public V put(K key, V value) {
return putVal(key, value, false);
}
从注释可以看到key与value都不能为null,这里与hashmap不同哦
核心是putVal私有方法
/*
* 向当前Map中存入新的元素,并返回旧元素
*
* onlyIfAbsent 是否需要维持原状(不覆盖旧值)
*/
final V putVal(K key, V value, boolean onlyIfAbsent) {
if(key == null || value == null) {
throw new NullPointerException();
}
/*
* 计算key的哈希值,在这个过程中会调用key的hashCode()方法
*
* key是一个对象的引用(可以看成地址)
* 理论上讲,key的值是否相等,跟计算出的哈希值是否相等,没有必然联系,一切都取决于hashCode()这个方法
*/
int hash = spread(key.hashCode());
/*
* 使用链表保存时,binCount记录table[i]这个桶中所保存的结点数;
* 使用红黑树保存时,binCount==2,保证put后更改计数值时能够进行扩容检查,同时不触发红黑树化操作
*/
int binCount = 0;
Node<K, V>[] tab = table;
while(true) {
Node<K, V> f; // 指向待插入元素应当插入的位置
int fh; // 元素f对应的哈希值
K fk; // 元素f中的key
V fv; // 元素f中的value
int len; // 哈希数组容量
int i; // 当前key在哈希数组上的索引
// 如果哈希数组还未初始化,或者容量无效,则需要初始化一个哈希数组
if(tab == null || (len = tab.length) == 0) {
// 初始化哈希数组
tab = initTable();
/*
* 如果哈希数组已经初始化,则需要确定待插入元素所在的哈希槽
*
* f指向hash所在的哈希槽(链)上的首个元素
* f==null意味着需要在哈希槽的相应位置插入首个元素
*
* 如果当前哈希槽处没有旧元素,说明当前的node将作为首个元素插入
*/
} else if((f = tabAt(tab, i = (len - 1) & hash)) == null) {
// node是相应哈希槽处的首个元素
Node<K, V> node = new Node<>(hash, key, value);
// 原子地更新tab[i]为node
if(casTabAt(tab, i, null, node)) {
// 跳出外循环
break; // no lock when adding to empty bin
}
/*
* 如果待插入元素所在的哈希槽上已经有别的结点存在,且该结点类型为MOVED
* 说明当前哈希数组正在扩容中,此时,可以尝试加速扩容过程
*/
} else if((fh = f.hash) == MOVED) {
tab = helpTransfer(tab, f);
/*
* 如果待插入元素所在的哈希槽上已经有别的结点存在,且当前状态不是在扩容当中,那么首先判断该结点是否为同位元素
* 如果遇到了同位元素,但不允许覆盖存储,则直接返回待插入的值
*/
} else if(onlyIfAbsent // 如果不允许覆盖存储
&& fh == hash && ((fk = f.key) == key || (fk != null && key.equals(fk))) && (fv = f.val) != null) { // 如果遇到了同位元素
return fv;
/*
* 如果不是同位元素,则需要搜寻合适的插入位置
* 如果是同位元素,且允许覆盖,则直接覆盖旧元素
*/
} else {
V oldVal = null;
// 对已有元素进行搜索,尝试插入新结点(尾插)
synchronized(f) {
//再判断一下table[i]是不是第一个结点, 防止其它线程的写修改,有点像单例模式双检锁的第二个if
if(tabAt(tab, i) == f) {
// 如果当前哈希槽首个元素是普通结点
if(fh >= 0) {
binCount = 1;
// 遍历哈希槽(链)
for(Node<K, V> e = f; ; ++binCount) {
K ek;
// 找到了同位元素,替换旧元素或者跳出循环
if(e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) {
// 记录同位元素旧值
oldVal = e.val;
// 如果允许覆盖,则存入新值
if(!onlyIfAbsent) {
e.val = value;
}
// 跳出内循环
break;
}
Node<K, V> pred = e;
e = e.next;
// 无法找到同位元素的话,说明需要在哈希槽(链)末尾新增元素
if(e==null) {
pred.next = new Node<K, V>(hash, key, value);
// 跳出内循环
break;
}
} // for
// 如果当前哈希槽首个元素是红黑树(头结点)
} else if(f instanceof TreeBin) {
Node<K, V> p;
binCount = 2;
if((p = ((TreeBin<K, V>) f).putTreeVal(hash, key, value)) != null) {
oldVal = p.val;
if(!onlyIfAbsent) {
p.val = value;
}
}
} else if(f instanceof ReservationNode) {
throw new IllegalStateException("Recursive update");
}
}
} // synchronized
// 如果对已有元素搜索过,则计数会发生变动,这里需要进一步观察
if(binCount != 0) {
// 哈希槽(链)上的元素数量增加到TREEIFY_THRESHOLD后,这些元素进入波动期,即将从链表转换为红黑树
if(binCount >= TREEIFY_THRESHOLD) {
treeifyBin(tab, i);
}
if(oldVal != null) {
return oldVal;
}
break;
}
}
} // while
// 增加计数
addCount(1L, binCount);
return null;
}
总结一下:
1.如果key或者value为null,抛异常,不接受null情况
2.如果哈希数组没有初始化,调用initTable()初始化,属于懒加载
3.如果桶为空,CAS占用桶的结点tab[i]
4.如果发现Forwarding结点,说明table正在发生扩容,则会尝试协助扩容
5.如果桶中有结点,则用synchroinze锁住槽的结点,然后遍历槽上的结点,看有没有同位元素。如果没有
- 当table[i]的结点类型为Node——链表结点时,就会将新结点以“尾插法”的形式插入链表的尾部。
- 当table[i]的结点类型为TreeBin——红黑树代理结点时,就会将新结点通过红黑树的插入方式插入。
6.如果table容量>64并且链表结点个数>8,则链表转换回红黑树、
get操作
// 根据指定的key获取对应的value,如果不存在,则返回null
public V get(Object key) {
Node<K, V>[] tab;
Node<K, V> e, p;
int n, eh;
K ek;
int h = spread(key.hashCode());
if((tab = table) != null && (n = tab.length)>0 && (e = tabAt(tab, (n - 1) & h)) != null) {
if((eh = e.hash) == h) {
if((ek = e.key) == key || (ek != null && key.equals(ek))) {
return e.val;
}
// 非链表节点
} else if(eh<0) {
return (p = e.find(h, key)) != null ? p.val : null;
}
while((e = e.next) != null) {
if(e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) {
return e.val;
}
}
}
return null;
}
- 如果table[i]的key和待查找key相同(比较hash和key)则直接返回
- 如果是非链表结点,用find查找,这里特指TreeBin结点的查找,具体的方法我上面已经介绍了
- 如果是普通链表结点则遍历链表查询
Node 链表结点的查找
Node<K, V> find(int h, Object k) {
Node<K, V> e = this;
if(k != null) {
do {
K ek;
if(e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) {
return e;
}
} while((e = e.next) != null);
}
return null;
}
TreeBin 红黑树结点的查找
上面我讲了
ForwardingNode结点的查找
Node<K, V> find(int h, Object k) {
// loop to avoid arbitrarily deep recursion on forwarding nodes
outer:
for(Node<K, V>[] tab = nextTable; ; ) {
Node<K, V> e;
int n;
if(k == null || tab == null || (n = tab.length) == 0 || (e = tabAt(tab, (n - 1) & h)) == null) {
return null;
}
for(; ; ) {
int eh;
K ek;
if((eh = e.hash) == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) {
return e;
}
if(eh<0) {
// 转到扩容数组部分查找
if(e instanceof ForwardingNode) {
tab = ((ForwardingNode<K, V>) e).nextTable;
continue outer;
} else {
return e.find(h, k);
}
}
if((e = e.next) == null) {
return null;
}
}
}
}
ReservationNode结点的查找
不保留数据,直接返回null
Node<K, V> find(int h, Object k) {
return null;
}
删除操作
核心方法是replaceNode
/**
* Removes the key (and its corresponding value) from this map.
* This method does nothing if the key is not in the map.
*
* @param key the key that needs to be removed
*
* @return the previous value associated with {@code key}, or
* {@code null} if there was no mapping for {@code key}
*
* @throws NullPointerException if the specified key is null
*/
// 移除拥有指定key的元素,并返回刚刚移除的元素的值
public V remove(Object key) {
return replaceNode(key, null, null);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if the specified key is null
*/
// 移除拥有指定key和value的元素,返回值表示是否移除成功
public boolean remove(Object key, Object value) {
if(key == null) {
throw new NullPointerException();
}
return value != null && replaceNode(key, null, value) != null;
}
- replaceNode 方法
final V replaceNode(Object key, V newValue, Object cv) {
int hash = spread(key.hashCode());
Node<K,V>[] tab = table;
for(; ; ) {
Node<K,V> f; int n, i, fh;
// 如果指定的哈希槽处没有结点可以处理
if (tab == null || (n = tab.length) == 0 || (f = tabAt(tab, i = (n - 1) & hash)) == null) {
break;
// 正在扩容中
} else if ((fh = f.hash) == MOVED) {
tab = helpTransfer(tab, f);
} else {
V oldVal = null;
// 是否处理过结点
boolean validated = false;
synchronized (f) {
// 双重检查
if (tabAt(tab, i) == f) {
// 处理普通结点
if (fh >= 0) {
validated = true;
// 初始化为哈希槽上首个结点
Node<K,V> e = f;
Node<K,V> pred = null;
while(true) {
K ek;
// 如果找到了同位元素
if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) {
// 旧值
V ev = e.val;
if (cv == null || cv == ev || (ev != null && cv.equals(ev))) {
oldVal = ev;
if (newValue != null) {
// 覆盖旧值
e.val = newValue;
// 如果newValue为null,需要进一步判断
} else {
// 如果pred为null,说明需要移除哈希槽上首个结点
if(pred == null) {
// 设置tab[i]为e.next
setTabAt(tab, i, e.next);
} else {
// 移除旧结点
pred.next = e.next;
}
}
}
break;
}
pred = e;
if ((e = e.next) == null) {
break;
}
}// while(true)
// 处理红黑树
} else if (f instanceof TreeBin) {
validated = true;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || cv == pv || (pv != null && cv.equals(pv))) { oldVal = pv;
if (newValue != null) {
p.val = newValue;
} else if (t.removeTreeNode(p)) {
// 很迷,这里把tab[i]设置为删除后红黑树的根节点,并且把红黑树转回链表
Node<K, V> x = untreeify(t.first);
// 设置tab[i]为x
setTabAt(tab, i, x);
}
}
}
} else if (f instanceof ReservationNode) {
throw new IllegalStateException("Recursive update");
}
}
}// synchronized
// 如果处理过结点,则需要进一步验证
if (validated) {
if (oldVal != null) {
if (newValue == null) {
addCount(-1L, -1);
}
return oldVal;
}
break;
}
}
}// for(; ; )
return null;
}
清空所有元素
1.如果检测到正在扩容,协助其扩容
2.给当前bucket上锁,然后分链表和红黑树两种情况删除
3.更新map的size
/**
* Removes all of the mappings from this map.
*/
// 清空当前Map中所有元素
public void clear() {
long delta = 0L; // negative number of deletions
int i = 0;
Node<K, V>[] tab = table;
while(tab != null && i<tab.length) {
int fh;
Node<K, V> f = tabAt(tab, i);
if(f == null) {
++i;
} else if((fh = f.hash) == MOVED) {
tab = helpTransfer(tab, f);
i = 0; // restart
} else {
synchronized(f) {
//double check
if(tabAt(tab, i) == f) {
//如果是TreeBin,从红黑树的根节点开始删(f.first)
Node<K, V> p = (fh >= 0 ? f : (f instanceof TreeBin) ? ((TreeBin<K, V>) f).first : null);
// delta用于记录删除的节点个数
while(p != null) {
--delta;
p = p.next;
}
// 设置tab[i]为null
setTabAt(tab, i++, null);
}
}
}
}
if(delta != 0L) {
addCount(delta, -1);
}
}
ref
https://segmentfault.com/a/1190000016096542
CHM的扩容
https://www.cnblogs.com/aniao/p/aniao_chm.html
https://zhuanlan.zhihu.com/p/34055127
本文详细解析了JDK11中ConcurrentHashMap的实现,包括其线程安全特性、哈希数组初始化、put与get操作的细节,特别是非扩容机制和读写锁的使用。通过对Node、TreeBin、ForwardingNode和ReservationNode等结点类型的分析,阐述了在并发环境下的高效操作。
1万+

被折叠的 条评论
为什么被折叠?



