【本文是为了梳理知识的总结性文章,总结了一些自认为相关的重要知识点,只为巩固记忆以及技术交流,忘批评指正。其中参考了很多前辈的文章,包括图片也是引用,如有冒犯,侵删。】
0 存储结构
从底层实现来看,ConcurrentHashMap和HashMap一样都是数组+链表+红黑树(JDK1.8 为了优化链表查询性能新增红黑树)实现的。
1 类定义
public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
implements ConcurrentMap<K,V>, Serializable
2 静态常量
// 最大容量,不能超过该值
private static final int MAXIMUM_CAPACITY = 1 << 30;
// 初始容量,默认是16
private static final int DEFAULT_CAPACITY = 16;
// 最大可能的数组大小
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
// 默认支持并发更新的线程数量
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
// 默认负载因子
private static final float LOAD_FACTOR = 0.75f;
// 同一个桶中链表元素数量超过8可能会转化为树
static final int TREEIFY_THRESHOLD = 8;
// 删除冲突节点后,桶中节点数目小于这个数,红黑树就恢复成链表
static final int UNTREEIFY_THRESHOLD = 6;
// 扩容的临界值
static final int MIN_TREEIFY_CAPACITY = 64;
// 扩容时,并发转移节点(transfer方法)时,每次转移的最小节点数
private static final int MIN_TRANSFER_STRIDE = 16;
// 生成sizeCtl所使用的bit位数
private static int RESIZE_STAMP_BITS = 16;
// 进行扩容所允许的最大线程数
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
// 记录sizeCtl中的大小所需要进行的偏移位数
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
// 以下常量定义了特定节点类hash字段的值
static final int MOVED = -1; // ForwardingNode类对象的hash值
static final int TREEBIN = -2; // TreeBin类对象的hash值
static final int RESERVED = -3; // ReservationNode类对象的hash值
static final int HASH_BITS = 0x7fffffff; /// 普通Node节点的hash初始值
// CPU的数量
static final int NCPU = Runtime.getRuntime().availableProcessors();
// 进行序列化的属性
private static final ObjectStreamField[] serialPersistentFields = {
new ObjectStreamField("segments", Segment[].class),
new ObjectStreamField("segmentMask", Integer.TYPE),
new ObjectStreamField("segmentShift", Integer.TYPE)
};
3 属性
// 存储元素的数组,其长度必须为2的次方
transient volatile Node<K,V>[] table;
// 扩容时新生成的数组,其大小为原来的两倍
private transient volatile Node<K,V>[] nextTable;
// 基本计数
private transient volatile long baseCount;
//
/**
* Table initialization and resizing control. When negative, the
* table is being initialized or resized: -1 for initialization,
* else -(1 + the number of active resizing threads). Otherwise,
* when table is null, holds the initial table size to use upon
* creation, or 0 for default. After initialization, holds the
* next element count value upon which to resize the table.
*/
// 默认为0,用来控制对表初始化和扩容操作
// sizeCtl = -1,表示正在初始化中
// sizeCtl = -n,表示 n - 1个线程正在进行扩容
// sizeCtl > 0,初始化和扩容中需要使用的容量
// sizeCtl = 0,默认值,使用默认值进行初始化
private transient volatile int sizeCtl;
// 扩容下另一个表的索引
private transient volatile int transferIndex;
// 旋转锁,扩容或者创建counterCells时使用
private transient volatile int cellsBusy;
// counterCell数组
private transient volatile CounterCell[] counterCells;
// 视图
private transient KeySetView<K,V> keySet;
private transient ValuesView<K,V> values;
private transient EntrySetView<K,V> entrySet;
4 构造方法
主要初始容量 、负载因子 和 sizeCtl的值
// 创建一个初始容量 16、负载因子 0.75 和 concurrencyLevel 为16的Map
public ConcurrentHashMap() {
}
public ConcurrentHashMap(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
public ConcurrentHashMap(int initialCapacity,float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
5 内部节点
链表节点
static class Node<K,V> implements Map.Entry<K,V> {
final int hash; // 节点hash码
final K key; // 节点键
V value; // 节点值
Node<K,V> next; // 指向下一个节点
Node(int hash, K key, V val, Node<K,V> next) {
this.hash = hash;
this.key = key;
this.val = val;
this.next = next;
}
public final K getKey() { return key; }
public final V getValue() { return val; }
public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
public final String toString(){ return key + "=" + val; }
public final V setValue(V value) {
throw new UnsupportedOperationException();
}
public final boolean equals(Object o) {
Object k, v, u; Map.Entry<?,?> e;
return ((o instanceof Map.Entry) &&
(k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
(v = e.getValue()) != null &&
(k == key || k.equals(key)) &&
(v == (u = val) || v.equals(u)));
}
// 用来支持map.get(),每个子类有不同的实现
Node<K,V> find(int h, Object k) {
Node<K,V> e = this;
if (k != null) {
do {
K ek;
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
} while ((e = e.next) != null);
}
return null;
}
}
TreeNode
红黑树节点
static final class TreeNode<K,V> extends Node<K,V> {
TreeNode<K,V> parent; // 父节点
TreeNode<K,V> left; // 左子树
TreeNode<K,V> right; // 右子树
TreeNode<K,V> prev; // 前一个节点
boolean red; // 颜色
TreeNode(int hash, K key, V val, Node<K,V> next,
TreeNode<K,V> parent) {
super(hash, key, val, next);
this.parent = parent;
}
Node<K,V> find(int h, Object k) {
return findTreeNode(h, k, null);
}
// 红黑树查找
final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
if (k != null) {
TreeNode<K,V> p = this;
do {
int ph, dir; K pk; TreeNode<K,V> q;
TreeNode<K,V> pl = p.left, pr = p.right;
if ((ph = p.hash) > h)
p = pl;
else if (ph < h)
p = pr;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if (pl == null)
p = pr;
else if (pr == null)
p = pl;
else if ((kc != null ||
(kc = comparableClassFor(k)) != null) &&
(dir = compareComparables(kc, k, pk)) != 0)
p = (dir < 0) ? pl : pr;
else if ((q = pr.findTreeNode(h, k, kc)) != null)
return q;
else
p = pl;
} while (p != null);
}
return null;
}
}
ForwardingNode
扩容转发节点,放置此节点后外部对原有哈希槽的操作会转发到nextTable上
static final class ForwardingNode<K,V> extends Node<K,V> {
final Node<K,V>[] nextTable;
ForwardingNode(Node<K,V>[] tab) {
super(MOVED, null, null, null);
this.nextTable = tab;
}
// 重写了Node的find方法
Node<K,V> find(int h, Object k) {
// loop to avoid arbitrarily deep recursion on forwarding nodes
outer: for (Node<K,V>[] tab = nextTable;;) {
Node<K,V> e;
int n;
// 如果k为空或者nextTable为空或者hash值对应的数组位置元素为空则返回null
if (k == null || tab == null || (n = tab.length) == 0 ||
(e = tabAt(tab, (n - 1) & h)) == null)
return null;
for (;;) {
int eh;
K ek;
if ((eh = e.hash) == h && ((ek = e.key) == k || (ek != null && k.equals(ek))))
return e; // hash值和key都相等,直接返回该节点
if (eh < 0) { // hash 值小于0,表示为特殊节点
if (e instanceof ForwardingNode) {
// 是ForwardingNode节点,在新的nextTable中查找
tab = ((ForwardingNode<K,V>)e).nextTable;
continue outer;
}
else // 正常节点则继续调用该节点的find查找
return e.find(h, k);
}
if ((e = e.next) == null) // 没有找到返回null
return null;
}
}
}
}
ReservationNode
占位加锁节点,执行某些方法时,对其加锁,如computeIfAbsent等
static final class ReservationNode<K,V> extends Node<K,V> {
ReservationNode() {
super(RESERVED, null, null, null);
}
Node<K,V> find(int h, Object k) {
return null;
}
}
6 常用方法
put方法
public V put(K key, V value) {
return putVal(key, value, false);
}
/** Implementation for put and putIfAbsent */
final V putVal(K key, V value, boolean onlyIfAbsent) {
// key和value都不能为null
if (key == null || value == null) throw new NullPointerException();
// key的hash值经过计算获得hash值
int hash = spread(key.hashCode());
int binCount = 0;
for (Node<K,V>[] tab = table;;) { // 无限循环
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable(); // table为空时线初始化数组
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
// 哈希槽为空,使用CAS插入新节点
if (casTabAt(tab, i, null,new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
else if ((fh = f.hash) == MOVED) // 该桶正在扩容中
tab = helpTransfer(tab, f); // 进行结点的转移
else {
V oldVal = null;
synchronized (f) { // 加锁同步
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
// 已经存在相同key的节点,直接替换
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
// 当前结点的下一个结点为空,即为最后一个结点,进行插入
pred.next = new Node<K,V>(hash, key,value, null);
break;
}
}
}
else if (f instanceof TreeBin) { // 结点为红黑树结点类型
Node<K,V> p;
binCount = 2;
// 红黑树插入
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
// 如果binCount >= 转化为红黑树的阈值,进行转化
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
// 增加binCount的数量
addCount(1L, binCount);
return null;
}
get方法
public V get(Object key) {
Node<K,V>[] tab;
Node<K,V> e, p;
int n, eh;
K ek;
// 计算hash值
int h = spread(key.hashCode());
// table 不为空,并且hash指向的数组位置不为空
if ((tab = table) != null && (n = tab.length) > 0 && (e = tabAt(tab, (n - 1) & h)) != null) {
if ((eh = e.hash) == h) { // 元素的hash值与key的hash值相等
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val; // 键相等,返回value
}
else if (eh < 0) // 结点hash值小于0,在桶(链表/红黑树)中查找
return (p = e.find(h, key)) != null ? p.val : null;
// 对于结点hash值大于0的情况
while ((e = e.next) != null) {
if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
remove方法
删除方法是在Map中查找hash值和Key值都相等的节点,然后根据节点的hash值的正负判断,其节点类型,然后进行链表查找删除或者红黑树的查找删除。
public V remove(Object key) {
return replaceNode(key, null, null);
}
final V replaceNode(Object key, V value, Object cv) {
int hash = spread(key.hashCode());
for (Node<K,V>[] tab = table;;) {
Node<K,V> f;
int n, i, fh;
// map为空或者key不存在在,则退出
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null)
break;
// 正在扩容
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
// 正常删除
V oldVal = null;
boolean validated = false;
// 锁定头结点
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
// 如果头结点是链表节点
validated = true;
// 遍历链表
for (Node<K,V> e = f, pred = null;;) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
// 找到相等的key
V ev = e.val;
// 根据cv的值来判断是进行替换还是删除
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
oldVal = ev;
if (value != null)
e.val = value; // 替换
else if (pred != null)
pred.next = e.next; // 删除
else
setTabAt(tab, i, e.next); // 设置头节点
}
break;
}
// 遍历下一个
pred = e;
if ((e = e.next) == null)
break;
}
}
else if (f instanceof TreeBin) {
// 红黑树删除
validated = true;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
// 红黑树查找
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
// 找到具有相同key的节点
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
if (value != null)
p.val = value; // 替换
else if (t.removeTreeNode(p)) // 删除
setTabAt(tab, i, untreeify(t.first)); // 设置头节点
}
}
}
}
}
if (validated) {
// 删除或替换有效
if (oldVal != null) {
if (value == null)
addCount(-1L, -1); // 计数减1
return oldVal;
}
break;
}
}
}
return null;
}
helpTransfer扩容方法
在插入和删除的时候都要进行判断,节点的 f.hash == MOVED 这个条件,相等说明map正处于扩容状态,