public class HashMap<K,V> extends AbstractMap<K,V>
implements Map<K,V>, Cloneable, Serializable {
private static final long serialVersionUID = 362498820763181265L
/**
* hashmap初始容量,必须为2的n次
* 注:根据数据量设置合适的初始值,可以避免hashmap因扩容带来的开销
*/
static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
/**
* hashmap最大容量,必须为2的次方,且不能大于1<<30
* 注:若初始值比1<<30大,则设置初始值为1<<30
*/
static final int MAXIMUM_CAPACITY = 1 << 30;
/**
* 负载因子.
* 注:数据量超过hashmap容量乘以负载因子的结果,hashmap扩容为原来容量的一倍
*/
static final float DEFAULT_LOAD_FACTOR = 0.75f;
/**
* hashmap中链表树形化阈值,链表大于此值会转换成红黑树(jdk1.8hashmap引入红黑树)
* 注:(1)jdk1.8以前hashmap结构为数组+链表,jdk1.8hashmap结构为数组+链表+红黑树
* (2)hashmap容量超过MIN_TREEIFY_CAPACITY值链表才会树形化,否则只会扩容
* (3)极端情况,hashmap元素全部集中在一个链表,查找时间复杂度为O(n),替换成红黑树后为O(logn)
*/
static final int TREEIFY_THRESHOLD = 8;
/**
* hashmap扩容后,红黑树数据小于或等于此值可将红黑树还原成链表
*/
static final int UNTREEIFY_THRESHOLD = 6;
/**
* hashmap容量超过MIN_TREEIFY_CAPACITY值链表才会树形化,否则只会扩容
* 注:为了平衡扩容和树形化,此值不小于4*TREEIFY_THRESHOLD
*/
static final int MIN_TREEIFY_CAPACITY = 64;
/**
* Node<K,V> 类用来实现数组及链表的数据结构
*/
static class Node<K,V> implements Map.Entry<K,V> {
final int hash; //指key对应的hash值
final K key;
V value;
Node<K,V> next;
Node(int hash, K key, V value, Node<K,V> next) {
this.hash = hash;
this.key = key;
this.value = value;
this.next = next;
}
public final K getKey() { return key; }
public final V getValue() { return value; }
public final String toString() { return key + "=" + value; }
public final int hashCode() {
return Objects.hashCode(key) ^ Objects.hashCode(value);
}
public final V setValue(V newValue) {
V oldValue = value;
value = newValue;
return oldValue;
}
public final boolean equals(Object o) {
if (o == this)
return true;
if (o instanceof Map.Entry) {
Map.Entry<?,?> e = (Map.Entry<?,?>)o;
if (Objects.equals(key, e.getKey()) &&
Objects.equals(value, e.getValue()))
return true;
}
return false;
}
}
/* ---------------- Static utilities -------------- */
/**
* key的hash值的计算方法。key的hash值高16位不变,低16位与高16位异或作为key的最终hash值(h >>> 16,表示无符号右移16位,高位补0,任何数跟0异或都是其本身,因此key的hash值高16位不变。)
*/
static final int hash(Object key) {
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
}
/**
* 用于找到大于等于initialCapacity的最小的2的幂
*/
static final int tableSizeFor(int cap) {
int n = cap - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
/**
* hashmap数据结构,数组+链表
*/
transient Node<K,V>[] table;
/**
* Holds cached entrySet(). Note that AbstractMap fields are used
* for keySet() and values().
*/
transient Set<Map.Entry<K,V>> entrySet;
/**
* hashmap中数据量
*/
transient int size;
/**
* hashmap内容修改都将反应在modCount上,线程不安全访问时,会抛出异常(本地保存的修改次数与modCount不同,表示其他线程修改过modCount,该变量使用volatile修饰,保证线程间的可见性)
*/
transient int modCount;
/**
* threshold表示当HashMap的size大于threshold时会执行resize操作,threshold=capacity*loadFactor
*/
int threshold;
/**
* 加载因子,用来衡量HashMap满的程度。loadFactor的默认值为0.75f。计算HashMap的实时装载因子的方法为:size/capacity
*/
final float loadFactor;
/**
* 构造一个空的hashmap,指定初始容量和加载因子
*/
public HashMap(int initialCapacity, float loadFactor) {
// 初始容量小于0抛出IllegalArgumentException异常
if (initialCapacity < 0)
throw new IllegalArgumentException("Illegal initial capacity: " +
initialCapacity);
// 初始容量大于MAXIMUM_CAPACITY(1<<30),将初始容量置为MAXIMUM_CAPACITY
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
// 加载因子小于等于0或者loadFactor为NaN,抛出IllegalArgumentException异常
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new IllegalArgumentException("Illegal load factor: " +
loadFactor);
this.loadFactor = loadFactor;
// 注意,得到的这个capacity却被赋值给了threshold。
// this.threshold = tableSizeFor(initialCapacity);
// 开始以为这个是个Bug,感觉应该这么写:
// this.threshold = tableSizeFor(initialCapacity) * this.loadFactor;
// 这样才符合threshold的意思(当HashMap的size到达threshold这个阈值时会扩容)。
// 但是,请注意,在构造方法中,并没有对table这个成员变量进行初始化,table的初始化被推迟到了put方法中,
// 在put方法中会对threshold重新计算
this.threshold = tableSizeFor(initialCapacity);
}
/**
* 构造一个空的hashmap,指定初始容量,默认加载因子0.75f
*/
public HashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR);
}
/**
* 构造一个空的hashmap,默认容量16,默认加载因子0.75f
*/
public HashMap() {
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
}
/**
* 创建一个新的hashmap,将指定的m存入新的hashmap,默认加载因子为0.75f
*/
public HashMap(Map<? extends K, ? extends V> m) {
this.loadFactor = DEFAULT_LOAD_FACTOR;
putMapEntries(m, false);
}
/**
* 将指定的m的k-v值存入当前hashmap
* evict:这个参数如果为true,那么每插入一个新值,就把链表头"踢出去",保持链表元素数目不变(只有LinkedHashMap使用此参数)
*/
final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
int s = m.size();
// 如果传入的m不是空的hashmap,则存入当前hashmap
if (s > 0) {
// 判断table是否已初始化,table 为当前hashmap中存储元素的数据结构(数组+链表)
if (table == null) { // pre-size
// 为初始化,s为m的元素个数
float ft = ((float)s / loadFactor) + 1.0F;
int t = ((ft < (float)MAXIMUM_CAPACITY) ?
(int)ft : MAXIMUM_CAPACITY);
// 如果t大于阈值,重新计算threshold为大于t的2的幂
if (t > threshold)
threshold = tableSizeFor(t);
}
else if (s > threshold){
// 如果m的大小大于threshold阈值,进行扩容
resize();
}
// 使用entrySet遍历方式将m的元素添加到当前hashmap中
for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
K key = e.getKey();
V value = e.getValue();
// 调用当前hashmap方法添加元素
putVal(hash(key), key, value, false, evict);
}
}
}
/**
* 获取当前hashmap元素个数
*/
public int size() {
return size;
}
/**
* 判断当前hashmap是否为空的hashmap
*/
public boolean isEmpty() {
return size == 0;
}
/**
* 获取当前hashmap中key对应的value,具体的get算法查看方法getNode(int hash, Object key)
* (1)如果key在hashmap中没有存储对应键值对,则返回null
* (2)如果key=null,hashmap中最多存在一个key=null的键值对,返回key=null对应的value
*/
public V get(Object key) {
Node<K,V> e;
return (e = getNode(hash(key), key)) == null ? null : e.value;
}
/**
* 根据key的hash值和key在当前hashmap中遍历找到对应的存储节点(Node<K,V>类型的对象)
*/
final Node<K,V> getNode(int hash, Object key) {
// tab用于存储当前table数据结构
// first用于存储table中第一个节点
// e用于存储遍历时当前节点的下一个节点
// n用于存储table数组的长度(不是元素个数,元素个数=数组长度+链表长度)
// k用于存储当前比对节点的key值
Node<K,V>[] tab; Node<K,V> first, e; int n; K k;
if ((tab = table) != null && (n = tab.length) > 0 &&
(first = tab[(n - 1) & hash]) != null) {
// tab[(n - 1) & hash]定位key对应的节点在数组的第几个元素
if (first.hash == hash && // always check first node
((k = first.key) == key || (key != null && key.equals(k))))
// 满足条件即为找到:(1)当前节点的key的hash值和参数key的hash值相等(2)当前节点的key与参数key相同或字符串相等
return first;
if ((e = first.next) != null) {
//第一个节点不是要找的节点,找first的下一个节点(前提是first存在下一个节点)
if (first instanceof TreeNode)
// 如果第一个节点是TreeNode类型(红黑树,说明该链表中数据量已经大于TREEIFY_THRESHOLD,
// 已经转换成红黑树),则按照红黑树算法查找节点
return ((TreeNode<K,V>)first).getTreeNode(hash, key);
do {
// 循环遍历链表直到找到为止,未找到跳出循环执行最后return null;
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
return e;
} while ((e = e.next) != null);
}
//first不存在下一个节点,而first又不是要找的,执行最后的return null,即未找到key对应的节点
}
return null;
}
/**
* 当前hashmap是否包含key对应的节点
*/
public boolean containsKey(Object key) {
// 通过getNode方法查询当前hashmap中key对应的节点,
// 未找到(返回null)结果false(null!=null),找到(返回节点)结果为true(Node!=null)
return getNode(hash(key), key) != null;
}
/**
* 向当前hashmap存入key-value键值对,若已存在key对应的节点,则被新值替换,方法返回旧值
*
*
*/
public V put(K key, V value) {
return putVal(hash(key), key, value, false, true);
}
/**
* 当前hashmap存入或替换key-value
* (1)hash:key的hash值
* (2)key:参数key
* (3)value:需要存入或替换的值
* (4)onlyIfAbsent:源码中默认为false,表示不存在新增,存在则替换;若为true表示存在则不更新
* (5)evict:LinkedHashMap会用到,HashMap不使用
*/
final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
boolean evict) {
// tab用于存储当前hashmap的table数据结构
// p用于存储当前定位到的节点
// n用来表示当前hashmap的table的容量大小
// i用来表示根据key的hash值定位到的table数组下标
Node<K,V>[] tab; Node<K,V> p; int n, i;
if ((tab = table) == null || (n = tab.length) == 0)
// 当前hashmap的table为null或者大小为0,则扩容
n = (tab = resize()).length;
if ((p = tab[i = (n - 1) & hash]) == null)
// tab[i]为null,则表示i下标中的链表没有数据,因此将key-value创建新的节点保存在tab[i]链表表头
tab[i] = newNode(hash, key, value, null);
else {
//否则,表示tab[i]链表中存在元素
Node<K,V> e; K k;
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
// tab[i]链表第一个元素的key就是要put进去的key,将第一个元素赋值给e变量
e = p;
else if (p instanceof TreeNode)
// 若tab[i]链表第一个元素p为红黑树类型(链表长度超过TREEIFY_THRESHOLD,
// 进行过treeifyBin(tab, hash)),则按照红黑树算法进行put操作
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
else {
for (int binCount = 0; ; ++binCount) {
// binCount表示链表遍历计数,用于判断是否将链表转换为红黑树
if ((e = p.next) == null) {
// 遍历到链表表尾,则创建节点设置为p.next
p.next = newNode(hash, key, value, null);
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
treeifyBin(tab, hash);
break;
}
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
break;
p = e;
}
}
if (e != null) { // existing mapping for key
// e!=null表示e节点为key对应要更新的节点,临时保存e节点的旧值oldValue
V oldValue = e.value;
if (!onlyIfAbsent || oldValue == null)
// 设置新的值
e.value = value;
afterNodeAccess(e);
// 返回key对应的旧值
return oldValue;
}
}
// 当前hashmap修改计数加1,用于在线程不安全访问时,抛出异常,保证hashmap数据正确性
++modCount;
if (++size > threshold)
// 添加新元素后大小加1,若大小大于阈值,扩容
resize();
afterNodeInsertion(evict);
// 添加的为新元素,因此旧值为null
return null;
}
/**
* 当前hashmap扩容,返回扩容以后的Node<K,V>[]
*/
final Node<K,V>[] resize() {
// 临时保存旧table
Node<K,V>[] oldTab = table;
// 临时保存旧table容量
int oldCap = (oldTab == null) ? 0 : oldTab.length;
// 临时保存旧hashmap的阈值
int oldThr = threshold;
// 初始化新容量=0,新阈值=0
int newCap, newThr = 0;
if (oldCap > 0) {
//如果旧table有数据,即oldCap>0
if (oldCap >= MAXIMUM_CAPACITY) {
// 旧table容量大于或等于1<<30,设置当前hashmap阈值为Integer的最大值并返回旧table(不能继续扩容,
// 但设置阈值后,还可以继续装数据)
threshold = Integer.MAX_VALUE;
return oldTab;
}
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
oldCap >= DEFAULT_INITIAL_CAPACITY)
// 旧table容量小于最大容量限制并且大于或等于默认容量16,将容量扩大一倍,阈值扩大一倍
newThr = oldThr << 1;
}
else if (oldThr > 0)
// 旧table没有数据并且旧的阈值大于0,设置旧的阈值为扩容后的hashmap容量
newCap = oldThr;
else {
// 旧table没有数据并且旧的阈值=0,设置默认容量16,默认阈值为16*0.75=12
newCap = DEFAULT_INITIAL_CAPACITY;
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
}
if (newThr == 0) {
// 计算扩容以后的阈值
float ft = (float)newCap * loadFactor;
// 扩容后的阈值
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
(int)ft : Integer.MAX_VALUE);
}
threshold = newThr;
// 构建一个新的table,容量为扩容后容量
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
table = newTab;
if (oldTab != null) {
// 如果原来的table有数据,则将数据复制到新的table中
for (int j = 0; j < oldCap; ++j) {
Node<K,V> e;
// 获取数组的第j个链表
if ((e = oldTab[j]) != null) {
oldTab[j] = null;
if (e.next == null)
// 如果链表只有一个节点,则进行直接赋值
newTab[e.hash & (newCap - 1)] = e;
else if (e instanceof TreeNode)
// 红黑树赋值
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
else {
// 进行链表复制
// 方法比较特殊: 它并没有重新计算元素在数组中的位置
// 而是采用了 原始位置加原数组长度的方法计算得到位置
Node<K,V> loHead = null, loTail = null;
Node<K,V> hiHead = null, hiTail = null;
Node<K,V> next;
do {
next = e.next;
if ((e.hash & oldCap) == 0) {
if (loTail == null)
loHead = e;
else
loTail.next = e;
loTail = e;
}
else {
if (hiTail == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
}
} while ((e = next) != null);
//新表索引:hash & (newCap - 1)---》低x位为Index
//旧表索引:hash & (oldCap - 1)---》低x-1位为Index
//newCap = oldCap << 1
//举例说明:resize()之前为低x-1位为Index,resize()之后为低x位为Index
//则所有Entry中,hash值第x位为0的,不需要哈希到新位置,只需要呆在当前索引下的新位置j
//hash值第x位为1的,需要哈希到新位置,新位置为j+oldCap
if (loTail != null) {
loTail.next = null;
newTab[j] = loHead;
}
if (hiTail != null) {
hiTail.next = null;
newTab[j + oldCap] = hiHead;
}
}
}
}
}
return newTab;
}
/**
* 链表树形化(红黑树)
*/
final void treeifyBin(Node<K,V>[] tab, int hash) {
int n, index; Node<K,V> e;
/*
* 如果元素数组为空 或者 数组长度小于 树结构化的最小限制
* MIN_TREEIFY_CAPACITY 默认值64,对于这个值可以理解为:如果元素数组长度小于这个值,没有必要去进行结构转换
* 当一个数组位置上集中了多个键值对,那是因为这些key的hash值和数组长度取模之后结果相同。(并不是因为这些key的hash值相同)
* 因为hash值相同的概率不高,所以可以通过扩容的方式,来使得最终这些key的hash值在和新的数组长度取模之后,拆分到多个数组位置*上。
*/
if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
resize();
// 如果元素数组长度已经大于等于了 MIN_TREEIFY_CAPACITY,那么就有必要进行结构转换了
// 根据hash值和数组长度进行取模运算后,得到链表的首节点
else if ((e = tab[index = (n - 1) & hash]) != null) {
TreeNode<K,V> hd = null, tl = null;
do {
// 将该节点转换为 树节点
TreeNode<K,V> p = replacementTreeNode(e, null);
// 如果尾节点为空,说明还没有根节点
if (tl == null)
// 首节点(根节点)指向 当前节点
hd = p;
else {
// 尾节点不为空,以下两行是一个双向链表结构
// 当前树节点的前一个节点指向尾节点
p.prev = tl;
// 尾节点的后一个节点指向当前节点
tl.next = p;
}
// 把当前节点设为尾节点
tl = p;
} while ((e = e.next) != null);
// 到目前为止 也只是把Node对象转换成了TreeNode对象,把单向链表转换成了双向链表
// 把转换后的双向链表,替换原来位置上的单向链表
if ((tab[index] = hd) != null)
hd.treeify(tab);
}
}
/**
* 将m中所有元素存入当前hashmap中,若存在相同的key,则替换
*/
public void putAll(Map<? extends K, ? extends V> m) {
putMapEntries(m, true);
}
/**
* 删除key对应的节点,返回节点值value,不存在节点返回null
*/
public V remove(Object key) {
Node<K,V> e;
return (e = removeNode(hash(key), key, null, false, true)) == null ?
null : e.value;
}
/**
* 从当前hashmap中删除key对应的节点
*
* @param hash key的hash值
* @param key key值
* @param value 如果指定了matchValue=true,需要同时匹配value
* @param matchValue 如果为true,只有key和value完全匹配才能删除
* @param movable 如果指定false,则在删除后不移动其他节点,默认true
*/
final Node<K,V> removeNode(int hash, Object key, Object value,
boolean matchValue, boolean movable) {
// tab:当前hashmap的table结构
// p:根据hash定位到table位置的当前节点
// n:tab长度
// index:key对应节点在table数组中的下标
Node<K,V>[] tab; Node<K,V> p; int n, index;
if ((tab = table) != null && (n = tab.length) > 0 &&
(p = tab[index = (n - 1) & hash]) != null) {
// node:需要删除的节点
// e:用于遍历的节点变量
// k:节点变量的key
// v:要删除节点的值
Node<K,V> node = null, e; K k; V v;
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
// 要删除的节点为链表的首节点
node = p;
else if ((e = p.next) != null) {
// 链表存在其他节点
if (p instanceof TreeNode)
// 链表首节点p是红黑树类型,根据红黑树算法查询需要删除的节点
node = ((TreeNode<K,V>)p).getTreeNode(hash, key);
else {
// 否则根据链表算法遍历找到需要删除的节点
do {
if (e.hash == hash &&
((k = e.key) == key ||
(key != null && key.equals(k)))) {
// 找到需要删除的节点,跳出循环
node = e;
break;
}
// 将要删除的前一个节点赋值给p,用于后续删除后移动链表指针
p = e;
} while ((e = e.next) != null);
}
}
if (node != null && (!matchValue || (v = node.value) == value ||
(value != null && value.equals(v)))) {
// 节点类型为红黑树类型,按照红黑树算法删除节点,并移动其他节点
if (node instanceof TreeNode)
((TreeNode<K,V>)node).removeTreeNode(this, tab, movable);
// 要删除的节点为链表头节点(该链表只有一个节点)
else if (node == p)
// 移动链表
tab[index] = node.next;
else
// 删除node节点(通过改变p.next的指向为node的下一个节点来删除node节点)
p.next = node.next;
// 当前hashmap修改计数器加1
++modCount;
// 当前hashmap元素个数减1
--size;
afterNodeRemoval(node);
// 返回删除的节点
return node;
}
}
return null;
}
/**
* 清空当前hashmap中所有元素,并设置size=0
*/
public void clear() {
Node<K,V>[] tab;
modCount++;
if ((tab = table) != null && size > 0) {
size = 0;
for (int i = 0; i < tab.length; ++i)
tab[i] = null;
}
}
/**
* 判定当前hashmap中是否存在包含value值的键值对
*/
public boolean containsValue(Object value) {
Node<K,V>[] tab; V v;
if ((tab = table) != null && size > 0) {
for (int i = 0; i < tab.length; ++i) {
// 外层循环遍历table数组
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
// 内层循环遍历tab[i]表示的链表
if ((v = e.value) == value ||
(value != null && value.equals(v)))
// 一旦找到即返回true,否则遍历完后返回false
return true;
}
}
}
// table为null或者当前hashmap没有元素或者遍历后未找到匹配的value,返回false
return false;
}
/**
* 返回包含所有key值的Set集合
*/
public Set<K> keySet() {
Set<K> ks = keySet;
if (ks == null) {
ks = new KeySet();
keySet = ks;
}
return ks;
}
final class KeySet extends AbstractSet<K> {
public final int size() { return size; }
public final void clear() { HashMap.this.clear(); }
public final Iterator<K> iterator() { return new KeyIterator(); }
public final boolean contains(Object o) { return containsKey(o); }
public final boolean remove(Object key) {
return removeNode(hash(key), key, null, false, true) != null;
}
public final Spliterator<K> spliterator() {
return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0);
}
public final void forEach(Consumer<? super K> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e.key);
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
}
/**
* 和keySet()方法有点相似,返回的是所有的value集合
*/
public Collection<V> values() {
Collection<V> vs = values;
if (vs == null) {
vs = new Values();
values = vs;
}
return vs;
}
final class Values extends AbstractCollection<V> {
public final int size() { return size; }
public final void clear() { HashMap.this.clear(); }
public final Iterator<V> iterator() { return new ValueIterator(); }
public final boolean contains(Object o) { return containsValue(o); }
public final Spliterator<V> spliterator() {
return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0);
}
public final void forEach(Consumer<? super V> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e.value);
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
}
/**
* 对返回当前hashmap的Map.Entry<K,V>集合,若对当前hashmap作出修改,会同时反映在该集合上
*/
public Set<Map.Entry<K,V>> entrySet() {
Set<Map.Entry<K,V>> es;
return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
}
final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
public final int size() { return size; }
public final void clear() { HashMap.this.clear(); }
public final Iterator<Map.Entry<K,V>> iterator() {
return new EntryIterator();
}
public final boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Node<K,V> candidate = getNode(hash(key), key);
return candidate != null && candidate.equals(e);
}
public final boolean remove(Object o) {
if (o instanceof Map.Entry) {
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Object value = e.getValue();
return removeNode(hash(key), key, value, true, true) != null;
}
return false;
}
public final Spliterator<Map.Entry<K,V>> spliterator() {
return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0);
}
public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e);
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
}
/**
*获取key对应的value,若不存在则返回默认值(传入参数defaultValue)
*/
@Override
public V getOrDefault(Object key, V defaultValue) {
Node<K,V> e;
return (e = getNode(hash(key), key)) == null ? defaultValue : e.value;
}
/**
*key对应的节点不存在则添加;若存在,则不替换,并返回已存在的值
*/
@Override
public V putIfAbsent(K key, V value) {
return putVal(hash(key), key, value, true, true);
}
/**
*删除key-value对应的节点,此方法需要严格匹配key和value才能删除,删除成功返回true
*/
@Override
public boolean remove(Object key, Object value) {
return removeNode(hash(key), key, value, true, true) != null;
}
/**
*替换key对应的值,需要严格匹配key和oldValue才能替换新值
*/
@Override
public boolean replace(K key, V oldValue, V newValue) {
Node<K,V> e; V v;
if ((e = getNode(hash(key), key)) != null &&
((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
e.value = newValue;
afterNodeAccess(e);
return true;
}
return false;
}
/**
*替换key对应的值,只要找到key对应的节点即可替换新值
*/
@Override
public V replace(K key, V value) {
Node<K,V> e;
if ((e = getNode(hash(key), key)) != null) {
V oldValue = e.value;
e.value = value;
afterNodeAccess(e);
return oldValue;
}
return null;
}
/**
* Map接口的实现类如HashMap,ConcurrentHashMap,HashTable等继承了此方法,通过此方法可以构建JAVA本地缓存,降低程序的计算量,程序的复杂度,使代码简洁,易懂。
* 此方法首先判断缓存MAP中是否存在指定key的值,如果不存在,会自动调用mappingFunction(key)计算key的value,然后将key = value放入到缓存Map,java8会使用thread-safe的方式从cache中存取记录
*/
@Override
public V computeIfAbsent(K key,
Function<? super K, ? extends V> mappingFunction) {
if (mappingFunction == null)
throw new NullPointerException();
int hash = hash(key);
Node<K,V>[] tab; Node<K,V> first; int n, i;
int binCount = 0;
TreeNode<K,V> t = null;
Node<K,V> old = null;
if (size > threshold || (tab = table) == null ||
(n = tab.length) == 0)
n = (tab = resize()).length;
if ((first = tab[i = (n - 1) & hash]) != null) {
if (first instanceof TreeNode)
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
else {
Node<K,V> e = first; K k;
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k)))) {
old = e;
break;
}
++binCount;
} while ((e = e.next) != null);
}
V oldValue;
if (old != null && (oldValue = old.value) != null) {
afterNodeAccess(old);
return oldValue;
}
}
// 到此为止,上面代码实现通过key查找value,找到即返回,没找到执行下面通过mappingFunction函数计算key对应的value,并存入当前hashmap
V v = mappingFunction.apply(key);
if (v == null) {
return null;
} else if (old != null) {
old.value = v;
afterNodeAccess(old);
return v;
}
else if (t != null)
t.putTreeVal(this, tab, hash, key, v);
else {
tab[i] = newNode(hash, key, v, first);
if (binCount >= TREEIFY_THRESHOLD - 1)
treeifyBin(tab, hash);
}
++modCount;
++size;
afterNodeInsertion(true);
return v;
}
/**
* 通过key查找数据,若不存在,方法返回null;
* 若存在,(1)新值不为null,存储新值并返回;(2)计算出来的新值为null,将key对应的节点从当前hashmap中删除
*/
public V computeIfPresent(K key,
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
if (remappingFunction == null)
throw new NullPointerException();
Node<K,V> e; V oldValue;
int hash = hash(key);
// key对应数据存在且不为null
if ((e = getNode(hash, key)) != null &&
(oldValue = e.value) != null) {
// 通过函数重新计算key对应的value
V v = remappingFunction.apply(key, oldValue);
if (v != null) {
e.value = v;
afterNodeAccess(e);
return v;
}
else
// 若重新计算的value为null,将节点移除
removeNode(hash, key, null, false, true);
}
// key对应数据不存在
return null;
}
/**
* 通过key查找数据,若不存在,(1)计算得到的新值为null,方法返回null;(2)计算得到的新值不为null,存储新值并返回
* 若存在,(1)新值不为null,存储新值并返回;(2)计算出来的新值为null,将key对应的节点从当前hashmap中删除
*/
@Override
public V compute(K key,
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
if (remappingFunction == null)
throw new NullPointerException();
int hash = hash(key);
Node<K,V>[] tab; Node<K,V> first; int n, i;
int binCount = 0;
TreeNode<K,V> t = null;
Node<K,V> old = null;
// 判断是否需要扩容
if (size > threshold || (tab = table) == null ||
(n = tab.length) == 0)
n = (tab = resize()).length;
// 通过key的hash值能定位到数据处于table中的位置
if ((first = tab[i = (n - 1) & hash]) != null) {
if (first instanceof TreeNode)
// 用红黑树算法查找节点
// t用来记录红黑树根节点
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
else {
Node<K,V> e = first; K k;
// 链表遍历查找节点
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k)))) {
old = e;
break;
}
// 累加树形化计数器,便于后续判断是否需要将链表转换成红黑树
++binCount;
} while ((e = e.next) != null);
}
}
// 取得key对应的旧值
V oldValue = (old == null) ? null : old.value;
// 重新计算key对应的值v
V v = remappingFunction.apply(key, oldValue);
if (old != null) {
// key在当前hashmap中存在
if (v != null) {
// 重新计算的新值v不为null,替换旧值
old.value = v;
afterNodeAccess(old);
}
else
// 重新计算的新值v=null,将key对应节点移除
removeNode(hash, key, null, false, true);
}
// key在当前hashmap中不存在
else if (v != null) {
if (t != null)
// 红黑树加入新节点
t.putTreeVal(this, tab, hash, key, v);
else {
// 链表加入新节点
tab[i] = newNode(hash, key, v, first);
// 链表加入新节点后,判断是否需要树形化
if (binCount >= TREEIFY_THRESHOLD - 1)
treeifyBin(tab, hash);
}
// 当前hashmap修改计数器加1
++modCount;
// 当前hashmap大小加1
++size;
afterNodeInsertion(true);
}
return v;
}
/**
* 功能大部分与compute相同,不同之处在于BiFunction中apply的参数,入参为oldValue、value,调用merge时根据两个value进行逻辑处理并返回value。
*/
@Override
public V merge(K key, V value,
BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
if (value == null)
throw new NullPointerException();
if (remappingFunction == null)
throw new NullPointerException();
int hash = hash(key);
Node<K,V>[] tab; Node<K,V> first; int n, i;
int binCount = 0;
TreeNode<K,V> t = null;
Node<K,V> old = null;
// 判断是否需要扩容
if (size > threshold || (tab = table) == null ||
(n = tab.length) == 0)
// 扩容
n = (tab = resize()).length;
// 查找key对应节点
if ((first = tab[i = (n - 1) & hash]) != null) {
if (first instanceof TreeNode)
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
else {
Node<K,V> e = first; K k;
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k)))) {
old = e;
break;
}
++binCount;
} while ((e = e.next) != null);
}
}
//节点存在
if (old != null) {
V v;
if (old.value != null)
// 旧值不为null,根据入参value和旧值重新计算得到新值
v = remappingFunction.apply(old.value, value);
else
v = value;
if (v != null) {
old.value = v;
afterNodeAccess(old);
}
else
removeNode(hash, key, null, false, true);
return v;
}
// 此处的if似乎是多余的?方法第一行就做了value判null处理
if (value != null) {
if (t != null)
// 红黑树加入新节点
t.putTreeVal(this, tab, hash, key, value);
else {
// 链表加入新节点
tab[i] = newNode(hash, key, value, first);
if (binCount >= TREEIFY_THRESHOLD - 1)
treeifyBin(tab, hash);
}
++modCount;
++size;
afterNodeInsertion(true);
}
return value;
}
/**
* 调用此方法时实现BiConsumer接口重写void accept(Object o, Object o2)方法,其中o为key,o2为value,可根据自己的实现对map中所有数据进行处理
*/
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e.key, e.value);
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
/**
* 调用此方法时重写BiFunction的Object apply(Object o, Object o2)方法,其中o为key,o2为value,根据重写方法逻辑进行重新赋值。
*/
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
Node<K,V>[] tab;
if (function == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
e.value = function.apply(e.key, e.value);
}
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
/**
* 返回当前hashmap的一个浅拷贝
*/
@SuppressWarnings("unchecked")
@Override
public Object clone() {
HashMap<K,V> result;
try {
result = (HashMap<K,V>)super.clone();
} catch (CloneNotSupportedException e) {
// this shouldn't happen, since we are Cloneable
throw new InternalError(e);
}
// table = null;
// entrySet = null;
// keySet = null;
// values = null;
// modCount = 0;
// threshold = 0;
// size = 0;
result.reinitialize();
// 将当前hashmap所有节点存入新的hashmap
result.putMapEntries(this, false);
return result;
}
// These methods are also used when serializing HashSets
final float loadFactor() { return loadFactor; }
final int capacity() {
return (table != null) ? table.length :
(threshold > 0) ? threshold :
DEFAULT_INITIAL_CAPACITY;
}
/**
* Save the state of the <tt>HashMap</tt> instance to a stream (i.e.,
* serialize it).
*
* @serialData The <i>capacity</i> of the HashMap (the length of the
* bucket array) is emitted (int), followed by the
* <i>size</i> (an int, the number of key-value
* mappings), followed by the key (Object) and value (Object)
* for each key-value mapping. The key-value mappings are
* emitted in no particular order.
*/
private void writeObject(java.io.ObjectOutputStream s)
throws IOException {
int buckets = capacity();
// Write out the threshold, loadfactor, and any hidden stuff
s.defaultWriteObject();
s.writeInt(buckets);
s.writeInt(size);
internalWriteEntries(s);
}
/**
* Reconstitute the {@code HashMap} instance from a stream (i.e.,
* deserialize it).
*/
private void readObject(java.io.ObjectInputStream s)
throws IOException, ClassNotFoundException {
// Read in the threshold (ignored), loadfactor, and any hidden stuff
s.defaultReadObject();
reinitialize();
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new InvalidObjectException("Illegal load factor: " +
loadFactor);
s.readInt(); // Read and ignore number of buckets
int mappings = s.readInt(); // Read number of mappings (size)
if (mappings < 0)
throw new InvalidObjectException("Illegal mappings count: " +
mappings);
else if (mappings > 0) { // (if zero, use defaults)
// Size the table using given load factor only if within
// range of 0.25...4.0
float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
float fc = (float)mappings / lf + 1.0f;
int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
DEFAULT_INITIAL_CAPACITY :
(fc >= MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY :
tableSizeFor((int)fc));
float ft = (float)cap * lf;
threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
(int)ft : Integer.MAX_VALUE);
@SuppressWarnings({"rawtypes","unchecked"})
Node<K,V>[] tab = (Node<K,V>[])new Node[cap];
table = tab;
// Read the keys and values, and put the mappings in the HashMap
for (int i = 0; i < mappings; i++) {
@SuppressWarnings("unchecked")
K key = (K) s.readObject();
@SuppressWarnings("unchecked")
V value = (V) s.readObject();
putVal(hash(key), key, value, false, false);
}
}
}
/* ------------------------------------------------------------ */
// iterators
abstract class HashIterator {
Node<K,V> next; // next entry to return
Node<K,V> current; // current entry
int expectedModCount; // for fast-fail
int index; // current slot
HashIterator() {
expectedModCount = modCount;
Node<K,V>[] t = table;
current = next = null;
index = 0;
if (t != null && size > 0) { // advance to first entry
do {} while (index < t.length && (next = t[index++]) == null);
}
}
public final boolean hasNext() {
return next != null;
}
final Node<K,V> nextNode() {
Node<K,V>[] t;
Node<K,V> e = next;
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
if (e == null)
throw new NoSuchElementException();
if ((next = (current = e).next) == null && (t = table) != null) {
do {} while (index < t.length && (next = t[index++]) == null);
}
return e;
}
public final void remove() {
Node<K,V> p = current;
if (p == null)
throw new IllegalStateException();
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
current = null;
K key = p.key;
removeNode(hash(key), key, null, false, false);
expectedModCount = modCount;
}
}
final class KeyIterator extends HashIterator
implements Iterator<K> {
public final K next() { return nextNode().key; }
}
final class ValueIterator extends HashIterator
implements Iterator<V> {
public final V next() { return nextNode().value; }
}
final class EntryIterator extends HashIterator
implements Iterator<Map.Entry<K,V>> {
public final Map.Entry<K,V> next() { return nextNode(); }
}
/* ------------------------------------------------------------ */
// spliterators
static class HashMapSpliterator<K,V> {
final HashMap<K,V> map;
Node<K,V> current; // current node
int index; // current index, modified on advance/split
int fence; // one past last index
int est; // size estimate
int expectedModCount; // for comodification checks
HashMapSpliterator(HashMap<K,V> m, int origin,
int fence, int est,
int expectedModCount) {
this.map = m;
this.index = origin;
this.fence = fence;
this.est = est;
this.expectedModCount = expectedModCount;
}
final int getFence() { // initialize fence and size on first use
int hi;
if ((hi = fence) < 0) {
HashMap<K,V> m = map;
est = m.size;
expectedModCount = m.modCount;
Node<K,V>[] tab = m.table;
hi = fence = (tab == null) ? 0 : tab.length;
}
return hi;
}
public final long estimateSize() {
getFence(); // force init
return (long) est;
}
}
static final class KeySpliterator<K,V>
extends HashMapSpliterator<K,V>
implements Spliterator<K> {
KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
}
public KeySpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
return (lo >= mid || current != null) ? null :
new KeySpliterator<>(map, lo, index = mid, est >>>= 1,
expectedModCount);
}
public void forEachRemaining(Consumer<? super K> action) {
int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Node<K,V>[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = (tab == null) ? 0 : tab.length;
}
else
mc = expectedModCount;
if (tab != null && tab.length >= hi &&
(i = index) >= 0 && (i < (index = hi) || current != null)) {
Node<K,V> p = current;
current = null;
do {
if (p == null)
p = tab[i++];
else {
action.accept(p.key);
p = p.next;
}
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
}
}
public boolean tryAdvance(Consumer<? super K> action) {
int hi;
if (action == null)
throw new NullPointerException();
Node<K,V>[] tab = map.table;
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
while (current != null || index < hi) {
if (current == null)
current = tab[index++];
else {
K k = current.key;
current = current.next;
action.accept(k);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
}
}
}
return false;
}
public int characteristics() {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
Spliterator.DISTINCT;
}
}
static final class ValueSpliterator<K,V>
extends HashMapSpliterator<K,V>
implements Spliterator<V> {
ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
}
public ValueSpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
return (lo >= mid || current != null) ? null :
new ValueSpliterator<>(map, lo, index = mid, est >>>= 1,
expectedModCount);
}
public void forEachRemaining(Consumer<? super V> action) {
int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Node<K,V>[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = (tab == null) ? 0 : tab.length;
}
else
mc = expectedModCount;
if (tab != null && tab.length >= hi &&
(i = index) >= 0 && (i < (index = hi) || current != null)) {
Node<K,V> p = current;
current = null;
do {
if (p == null)
p = tab[i++];
else {
action.accept(p.value);
p = p.next;
}
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
}
}
public boolean tryAdvance(Consumer<? super V> action) {
int hi;
if (action == null)
throw new NullPointerException();
Node<K,V>[] tab = map.table;
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
while (current != null || index < hi) {
if (current == null)
current = tab[index++];
else {
V v = current.value;
current = current.next;
action.accept(v);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
}
}
}
return false;
}
public int characteristics() {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
}
}
static final class EntrySpliterator<K,V>
extends HashMapSpliterator<K,V>
implements Spliterator<Map.Entry<K,V>> {
EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
}
public EntrySpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
return (lo >= mid || current != null) ? null :
new EntrySpliterator<>(map, lo, index = mid, est >>>= 1,
expectedModCount);
}
public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Node<K,V>[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = (tab == null) ? 0 : tab.length;
}
else
mc = expectedModCount;
if (tab != null && tab.length >= hi &&
(i = index) >= 0 && (i < (index = hi) || current != null)) {
Node<K,V> p = current;
current = null;
do {
if (p == null)
p = tab[i++];
else {
action.accept(p);
p = p.next;
}
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
}
}
public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
int hi;
if (action == null)
throw new NullPointerException();
Node<K,V>[] tab = map.table;
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
while (current != null || index < hi) {
if (current == null)
current = tab[index++];
else {
Node<K,V> e = current;
current = current.next;
action.accept(e);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
}
}
}
return false;
}
public int characteristics() {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
Spliterator.DISTINCT;
}
}
/* ------------------------------------------------------------ */
// LinkedHashMap support
/*
* The following package-protected methods are designed to be
* overridden by LinkedHashMap, but not by any other subclass.
* Nearly all other internal methods are also package-protected
* but are declared final, so can be used by LinkedHashMap, view
* classes, and HashSet.
*/
// Create a regular (non-tree) node
Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) {
return new Node<>(hash, key, value, next);
}
// For conversion from TreeNodes to plain nodes
Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
return new Node<>(p.hash, p.key, p.value, next);
}
// Create a tree bin node
TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
return new TreeNode<>(hash, key, value, next);
}
// For treeifyBin
TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
return new TreeNode<>(p.hash, p.key, p.value, next);
}
/**
* Reset to initial default state. Called by clone and readObject.
*/
void reinitialize() {
table = null;
entrySet = null;
keySet = null;
values = null;
modCount = 0;
threshold = 0;
size = 0;
}
// Callbacks to allow LinkedHashMap post-actions
void afterNodeAccess(Node<K,V> p) { }
void afterNodeInsertion(boolean evict) { }
void afterNodeRemoval(Node<K,V> p) { }
// Called only from writeObject, to ensure compatible ordering.
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
Node<K,V>[] tab;
if (size > 0 && (tab = table) != null) {
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
s.writeObject(e.key);
s.writeObject(e.value);
}
}
}
}
/* ------------------------------------------------------------ */
// Tree bins
/**
* Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn
* extends Node) so can be used as extension of either regular or
* linked node.
*/
static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> {
TreeNode<K,V> parent; // red-black tree links
TreeNode<K,V> left;
TreeNode<K,V> right;
TreeNode<K,V> prev; // needed to unlink next upon deletion
boolean red;
TreeNode(int hash, K key, V val, Node<K,V> next) {
super(hash, key, val, next);
}
/**
* Returns root of tree containing this node.
*/
final TreeNode<K,V> root() {
for (TreeNode<K,V> r = this, p;;) {
if ((p = r.parent) == null)
return r;
r = p;
}
}
/**
* Ensures that the given root is the first node of its bin.
*/
static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) {
int n;
if (root != null && tab != null && (n = tab.length) > 0) {
int index = (n - 1) & root.hash;
TreeNode<K,V> first = (TreeNode<K,V>)tab[index];
if (root != first) {
Node<K,V> rn;
tab[index] = root;
TreeNode<K,V> rp = root.prev;
if ((rn = root.next) != null)
((TreeNode<K,V>)rn).prev = rp;
if (rp != null)
rp.next = rn;
if (first != null)
first.prev = root;
root.next = first;
root.prev = null;
}
assert checkInvariants(root);
}
}
/**
* Finds the node starting at root p with the given hash and key.
* The kc argument caches comparableClassFor(key) upon first use
* comparing keys.
*/
final TreeNode<K,V> find(int h, Object k, Class<?> kc) {
TreeNode<K,V> p = this;
do {
int ph, dir; K pk;
TreeNode<K,V> pl = p.left, pr = p.right, q;
if ((ph = p.hash) > h)
p = pl;
else if (ph < h)
p = pr;
else if ((pk = p.key) == k || (k != null && k.equals(pk)))
return p;
else if (pl == null)
p = pr;
else if (pr == null)
p = pl;
else if ((kc != null ||
(kc = comparableClassFor(k)) != null) &&
(dir = compareComparables(kc, k, pk)) != 0)
p = (dir < 0) ? pl : pr;
else if ((q = pr.find(h, k, kc)) != null)
return q;
else
p = pl;
} while (p != null);
return null;
}
/**
* Calls find for root node.
*/
final TreeNode<K,V> getTreeNode(int h, Object k) {
return ((parent != null) ? root() : this).find(h, k, null);
}
/**
* Tie-breaking utility for ordering insertions when equal
* hashCodes and non-comparable. We don't require a total
* order, just a consistent insertion rule to maintain
* equivalence across rebalancings. Tie-breaking further than
* necessary simplifies testing a bit.
*/
static int tieBreakOrder(Object a, Object b) {
int d;
if (a == null || b == null ||
(d = a.getClass().getName().
compareTo(b.getClass().getName())) == 0)
d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
-1 : 1);
return d;
}
/**
* Forms tree of the nodes linked from this node.
* @return root of tree
*/
final void treeify(Node<K,V>[] tab) {
TreeNode<K,V> root = null;
for (TreeNode<K,V> x = this, next; x != null; x = next) {
next = (TreeNode<K,V>)x.next;
x.left = x.right = null;
if (root == null) {
x.parent = null;
x.red = false;
root = x;
}
else {
K k = x.key;
int h = x.hash;
Class<?> kc = null;
for (TreeNode<K,V> p = root;;) {
int dir, ph;
K pk = p.key;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0)
dir = tieBreakOrder(k, pk);
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
x.parent = xp;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
root = balanceInsertion(root, x);
break;
}
}
}
}
moveRootToFront(tab, root);
}
/**
* Returns a list of non-TreeNodes replacing those linked from
* this node.
*/
final Node<K,V> untreeify(HashMap<K,V> map) {
Node<K,V> hd = null, tl = null;
for (Node<K,V> q = this; q != null; q = q.next) {
Node<K,V> p = map.replacementNode(q, null);
if (tl == null)
hd = p;
else
tl.next = p;
tl = p;
}
return hd;
}
/**
* Tree version of putVal.
*/
final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab,
int h, K k, V v) {
Class<?> kc = null;
boolean searched = false;
TreeNode<K,V> root = (parent != null) ? root() : this;
for (TreeNode<K,V> p = root;;) {
int dir, ph; K pk;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((pk = p.key) == k || (k != null && k.equals(pk)))
return p;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0) {
if (!searched) {
TreeNode<K,V> q, ch;
searched = true;
if (((ch = p.left) != null &&
(q = ch.find(h, k, kc)) != null) ||
((ch = p.right) != null &&
(q = ch.find(h, k, kc)) != null))
return q;
}
dir = tieBreakOrder(k, pk);
}
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
Node<K,V> xpn = xp.next;
TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn);
if (dir <= 0)
xp.left = x;
else
xp.right = x;
xp.next = x;
x.parent = x.prev = xp;
if (xpn != null)
((TreeNode<K,V>)xpn).prev = x;
moveRootToFront(tab, balanceInsertion(root, x));
return null;
}
}
}
/**
* Removes the given node, that must be present before this call.
* This is messier than typical red-black deletion code because we
* cannot swap the contents of an interior node with a leaf
* successor that is pinned by "next" pointers that are accessible
* independently during traversal. So instead we swap the tree
* linkages. If the current tree appears to have too few nodes,
* the bin is converted back to a plain bin. (The test triggers
* somewhere between 2 and 6 nodes, depending on tree structure).
*/
final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab,
boolean movable) {
int n;
if (tab == null || (n = tab.length) == 0)
return;
int index = (n - 1) & hash;
TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl;
TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev;
if (pred == null)
tab[index] = first = succ;
else
pred.next = succ;
if (succ != null)
succ.prev = pred;
if (first == null)
return;
if (root.parent != null)
root = root.root();
if (root == null || root.right == null ||
(rl = root.left) == null || rl.left == null) {
tab[index] = first.untreeify(map); // too small
return;
}
TreeNode<K,V> p = this, pl = left, pr = right, replacement;
if (pl != null && pr != null) {
TreeNode<K,V> s = pr, sl;
while ((sl = s.left) != null) // find successor
s = sl;
boolean c = s.red; s.red = p.red; p.red = c; // swap colors
TreeNode<K,V> sr = s.right;
TreeNode<K,V> pp = p.parent;
if (s == pr) { // p was s's direct parent
p.parent = s;
s.right = p;
}
else {
TreeNode<K,V> sp = s.parent;
if ((p.parent = sp) != null) {
if (s == sp.left)
sp.left = p;
else
sp.right = p;
}
if ((s.right = pr) != null)
pr.parent = s;
}
p.left = null;
if ((p.right = sr) != null)
sr.parent = p;
if ((s.left = pl) != null)
pl.parent = s;
if ((s.parent = pp) == null)
root = s;
else if (p == pp.left)
pp.left = s;
else
pp.right = s;
if (sr != null)
replacement = sr;
else
replacement = p;
}
else if (pl != null)
replacement = pl;
else if (pr != null)
replacement = pr;
else
replacement = p;
if (replacement != p) {
TreeNode<K,V> pp = replacement.parent = p.parent;
if (pp == null)
root = replacement;
else if (p == pp.left)
pp.left = replacement;
else
pp.right = replacement;
p.left = p.right = p.parent = null;
}
TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement);
if (replacement == p) { // detach
TreeNode<K,V> pp = p.parent;
p.parent = null;
if (pp != null) {
if (p == pp.left)
pp.left = null;
else if (p == pp.right)
pp.right = null;
}
}
if (movable)
moveRootToFront(tab, r);
}
/**
* Splits nodes in a tree bin into lower and upper tree bins,
* or untreeifies if now too small. Called only from resize;
* see above discussion about split bits and indices.
*
* @param map the map
* @param tab the table for recording bin heads
* @param index the index of the table being split
* @param bit the bit of hash to split on
*/
final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) {
TreeNode<K,V> b = this;
// Relink into lo and hi lists, preserving order
TreeNode<K,V> loHead = null, loTail = null;
TreeNode<K,V> hiHead = null, hiTail = null;
int lc = 0, hc = 0;
for (TreeNode<K,V> e = b, next; e != null; e = next) {
next = (TreeNode<K,V>)e.next;
e.next = null;
if ((e.hash & bit) == 0) {
if ((e.prev = loTail) == null)
loHead = e;
else
loTail.next = e;
loTail = e;
++lc;
}
else {
if ((e.prev = hiTail) == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
++hc;
}
}
if (loHead != null) {
if (lc <= UNTREEIFY_THRESHOLD)
tab[index] = loHead.untreeify(map);
else {
tab[index] = loHead;
if (hiHead != null) // (else is already treeified)
loHead.treeify(tab);
}
}
if (hiHead != null) {
if (hc <= UNTREEIFY_THRESHOLD)
tab[index + bit] = hiHead.untreeify(map);
else {
tab[index + bit] = hiHead;
if (loHead != null)
hiHead.treeify(tab);
}
}
}
/* ------------------------------------------------------------ */
// Red-black tree methods, all adapted from CLR
static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> r, pp, rl;
if (p != null && (r = p.right) != null) {
if ((rl = p.right = r.left) != null)
rl.parent = p;
if ((pp = r.parent = p.parent) == null)
(root = r).red = false;
else if (pp.left == p)
pp.left = r;
else
pp.right = r;
r.left = p;
p.parent = r;
}
return root;
}
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> l, pp, lr;
if (p != null && (l = p.left) != null) {
if ((lr = p.left = l.right) != null)
lr.parent = p;
if ((pp = l.parent = p.parent) == null)
(root = l).red = false;
else if (pp.right == p)
pp.right = l;
else
pp.left = l;
l.right = p;
p.parent = l;
}
return root;
}
static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
TreeNode<K,V> x) {
x.red = true;
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
if ((xp = x.parent) == null) {
x.red = false;
return x;
}
else if (!xp.red || (xpp = xp.parent) == null)
return root;
if (xp == (xppl = xpp.left)) {
if ((xppr = xpp.right) != null && xppr.red) {
xppr.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
}
else {
if (x == xp.right) {
root = rotateLeft(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateRight(root, xpp);
}
}
}
}
else {
if (xppl != null && xppl.red) {
xppl.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
}
else {
if (x == xp.left) {
root = rotateRight(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateLeft(root, xpp);
}
}
}
}
}
}
static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
TreeNode<K,V> x) {
for (TreeNode<K,V> xp, xpl, xpr;;) {
if (x == null || x == root)
return root;
else if ((xp = x.parent) == null) {
x.red = false;
return x;
}
else if (x.red) {
x.red = false;
return root;
}
else if ((xpl = xp.left) == x) {
if ((xpr = xp.right) != null && xpr.red) {
xpr.red = false;
xp.red = true;
root = rotateLeft(root, xp);
xpr = (xp = x.parent) == null ? null : xp.right;
}
if (xpr == null)
x = xp;
else {
TreeNode<K,V> sl = xpr.left, sr = xpr.right;
if ((sr == null || !sr.red) &&
(sl == null || !sl.red)) {
xpr.red = true;
x = xp;
}
else {
if (sr == null || !sr.red) {
if (sl != null)
sl.red = false;
xpr.red = true;
root = rotateRight(root, xpr);
xpr = (xp = x.parent) == null ?
null : xp.right;
}
if (xpr != null) {
xpr.red = (xp == null) ? false : xp.red;
if ((sr = xpr.right) != null)
sr.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateLeft(root, xp);
}
x = root;
}
}
}
else { // symmetric
if (xpl != null && xpl.red) {
xpl.red = false;
xp.red = true;
root = rotateRight(root, xp);
xpl = (xp = x.parent) == null ? null : xp.left;
}
if (xpl == null)
x = xp;
else {
TreeNode<K,V> sl = xpl.left, sr = xpl.right;
if ((sl == null || !sl.red) &&
(sr == null || !sr.red)) {
xpl.red = true;
x = xp;
}
else {
if (sl == null || !sl.red) {
if (sr != null)
sr.red = false;
xpl.red = true;
root = rotateLeft(root, xpl);
xpl = (xp = x.parent) == null ?
null : xp.left;
}
if (xpl != null) {
xpl.red = (xp == null) ? false : xp.red;
if ((sl = xpl.left) != null)
sl.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateRight(root, xp);
}
x = root;
}
}
}
}
}
/**
* Recursive invariant check
*/
static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
tb = t.prev, tn = (TreeNode<K,V>)t.next;
if (tb != null && tb.next != t)
return false;
if (tn != null && tn.prev != t)
return false;
if (tp != null && t != tp.left && t != tp.right)
return false;
if (tl != null && (tl.parent != t || tl.hash > t.hash))
return false;
if (tr != null && (tr.parent != t || tr.hash < t.hash))
return false;
if (t.red && tl != null && tl.red && tr != null && tr.red)
return false;
if (tl != null && !checkInvariants(tl))
return false;
if (tr != null && !checkInvariants(tr))
return false;
return true;
}
}
}
JDK 1.8 HashMap源码分析(部分)
最新推荐文章于 2022-05-19 16:12:57 发布