先看ConcurrentHashMap中的注释
* This map usually acts as a binned (bucketed) hash table. Each
* key-value mapping is held in a Node. Most nodes are instances
* of the basic Node class with hash, key, value, and next
* fields. However, various subclasses exist: TreeNodes are
* arranged in balanced trees, not lists. TreeBins hold the roots
* of sets of TreeNodes. ForwardingNodes are placed at the heads
* of bins during resizing. ReservationNodes are used as
* placeholders while establishing values in computeIfAbsent and
* related methods. The types TreeBin, ForwardingNode, and
* ReservationNode do not hold normal user keys, values, or
* hashes, and are readily distinguishable during search etc
* because they have negative hash fields and null key and value
* fields. (These special nodes are either uncommon or transient,
* so the impact of carrying around some unused fields is
* insignificant.)
从注释中我们可以看到ConcurrentHashMap有4种节点
(1)Node节点(最普通的节点)
(2)TreeBin/TreeNode节点 TreeBin节点存储的是一系列TreeNode节点组成的红黑树的根
(3)ForwardingNode节点,当扩容时处理过桶时会出现
(4)ReservationNode节点,在compute和computeIfAbsent中充当占位节点
/**
* Table initialization and resizing control. When negative, the
* table is being initialized or resized: -1 for initialization,
* else -(1 + the number of active resizing threads). Otherwise,
* when table is null, holds the initial table size to use upon
* creation, or 0 for default. After initialization, holds the
* next element count value upon which to resize the table.
*/
private transient volatile int sizeCtl;
sizeCtl是ConcurrentHashMap的核心,不同的sizeCtl表示了ConcurrentHashMap不同的状态
(1)sizeCtl=-1 ConcurrentHashMap正在初始化
(2)sizeCtl<=-2 扩容线程的个数等于sizeCtl的绝对值-1
sizeCtl=ConcurrentHashMap的预初始化容量或0说明ConcurrentHashMap的table数组尚未初始化
1.put()
public V put(K key, V value) {
return putVal(key, value, false);
}
/** Implementation for put and putIfAbsent */
final V putVal(K key, V value, boolean onlyIfAbsent) {
//ConcurrentHashMap的Key和Value都不可以为null
if (key == null || value == null) throw new NullPointerException();
//计算key的散列值
int hash = spread(key.hashCode());
//binCount似乎是一个记录一个桶里有多少个节点的值
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
//如果table数组为null或者长度为0(未初始化)
if (tab == null || (n = tab.length) == 0)
tab = initTable();
//如果插入的桶为null(桶中没有元素)就用CAS在这个桶插入一个Node节
//点
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
//如果这个节点的hash值为-1,说明是一个Forwarding节点,就协助扩容
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
//如果找到了应该插入的位置,就锁住链表头
//仔细看,f已经在f = tabAt(tab, i = (n - 1) & hash)被初始化,指向的就是
//要插入的位置的头,i也在这个时候被初始化
synchronized (f) {
if (tabAt(tab, i) == f) {
//如果是Node节点
if (fh >= 0) {
//已经遍历了一个节点所以binCount为1
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
//如果物理地址相同或者逻辑相同
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
//如果遍历到链表尾就在链表尾插入
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
//如果是TreeBin节点
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
//如果这个桶中的节点数大于TREEIFY_THRESHOLD(8)的时候,就把它变成
//一个红黑树
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
addCount(1L, binCount);
return null;
}
2.get()
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
//依旧是根据散列函数算出key如果存在的话所放的桶的下标
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
//先检查头节点是否满足
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
//如果是forwarding节点
else if (eh < 0)
return (p = e.find(h, key)) != null ? p.val : null;
//遍历除头结点的整条链,如果找到就返回
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
3.transfer()扩容
private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
int n = tab.length, stride;
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // subdivide range
if (nextTab == null) { // initiating
try {
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX _VALUE;
return;
}
nextTable = nextTab;
transferIndex = n;
}
int nextn = nextTab.length;
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
//如果处理过的话advance为true,下面有讲
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node<K,V> f; int fh;
//看了其他的资料,ConcurrentHashMap通过这里反向遍历整个table数组
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing)
advance = false;
//transferIndex等于table数组的大小,之前有初始化
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
//如果已经遍历了所有的节点
if (finishing) {
nextTable = null;
table = nextTab;
//把扩容阈值设置成原来的1.5倍
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
//如果遍历到的桶没有元素就插入一个forwarding节点
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
//如果这个桶的hash值为-1,说明是forwarding节点,这个桶已经被处理过了
else if ((fh = f.hash) == MOVED)
advance = true; // already processed
else {
//如果遍历到的是Node节点
synchronized (f) {
if (tabAt(tab, i) == f) {
Node<K,V> ln, hn;
if (fh >= 0) {
int runBit = fh & n;
Node<K,V> lastRun = f;
for (Node<K,V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
}
else {
hn = lastRun;
ln = null;
}
for (Node<K,V> p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
//eg. N=16 10000
//hash=010010110
//N&hash=1
//也就是在第k位为0时,插入到low链,
//为1时,插入到high链
if ((ph & n) == 0)
ln = new Node<K,V>(ph, pk, pv, ln);
else
hn = new Node<K,V>(ph, pk, pv, hn);
}
//把low链和high链插入原位置和原位置+当前容量的位置
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
//为什么不在i+n放一个forwarding节点?因为那个节点已
//经处理过了,扩容的时候是从反向遍历table数组的
//(n-1->0)
setTabAt(tab, i, fwd);
//再进入while循环的时候会把i-1
advance = true;
}
//如果遍历的是TreeBin节点,操作和遍历Node节点差不多
else if (f instanceof TreeBin) {
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> lo = null, loTail = null;
TreeNode<K,V> hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node<K,V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<K,V> p = new TreeNode<K,V>
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
//如果low链的长度小于UNTREEIFY_THRESHOLD的时候,
//默认为6
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin<K,V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin<K,V>(hi) : t;
//在nextTable中插入ln和hn
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
4.remove() (搜了一波好像写的时候网上还查不到 1.8的 ConcurrentHashmap的删除解析 )
public V remove(Object key) {
return replaceNode(key, null, null);
}
final V replaceNode(Object key, V value, Object cv) {
//还是先计算这个key放在哪个桶里
int hash = spread(key.hashCode());
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
//如果table没有被初始化,长度为0,或者这个桶里面为空,就退出for循环
//返回null
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null)
break;
//如果hash值为-1就帮助扩容
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
boolean validated = false;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
validated = true;
for (Node<K,V> e = f, pred = null;;) {
K ek;
//如果找到了这个要被删除的key
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
V ev = e.val;
//删除时cv已经为null了,肯定会进入这段代码
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
oldVal = ev;
//value已经为null了,这个语句不会执行
if (value != null)
e.val = value;
//pred指向的是当前节点的前一个节点
else if (pred != null)
pred.next = e.next;
//当pred为null的时候,此时的e就为头节
//点,table[i]设为e.next
else
setTabAt(tab, i, e.next);
}
break;
}
pred = e;
if ((e = e.next) == null)
break;
}
}
//如果为TreeBin节点,方法和Node节点类似
else if (f instanceof TreeBin) {
validated = true;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
if (value != null)
p.val = value;
else if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
if (validated) {
if (oldVal != null) {
if (value == null)
addCount(-1L, -1);
return oldVal;
}
break;
}
}
}
return null;
}
ConcurrentHashmap主要的函数就是这些,如果以后想起来有关键的再继续补