源码是以1.8的进行分析的:
这是JDK1.8HashMap的数据结构数组,链表(如果key的Hash相同),红黑树(当table的length>=64,在链表上相同key的数量>=7就开始使用红黑树)
Map<String,String> map = new HashMap();
map.put("hello","world");
map.get("hello");
//这是默认的构造方法,就是设置了一个默认的构建因子DEFAULT_LOAD_FACTOR = 0.75f
/**
* Constructs an empty <tt>HashMap</tt> with the default initial capacity
* (16) and the default load factor (0.75).
* 这是一个空的HashMap,初始化容量是16 加载因子是0.75
*/
public HashMap() {
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
}
/** 创建一个给定初始容量的HashMap,初始化加载因子为默认的0.75
* Constructs an empty <tt>HashMap</tt> with the specified initial
* capacity and the default load factor (0.75).
*
* @param initialCapacity the initial capacity.
* @throws IllegalArgumentException if the initial capacity is negative.
*/
public HashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR);
}
/**
* 通过一个给定的Map,创建一个HashMap,初始化默认的加载因子为0.75
* Constructs a new <tt>HashMap</tt> with the same mappings as the
* specified <tt>Map</tt>. The <tt>HashMap</tt> is created with
* default load factor (0.75) and an initial capacity sufficient to
* hold the mappings in the specified <tt>Map</tt>.
*
* @param m the map whose mappings are to be placed in this map
* @throws NullPointerException if the specified map is null
*/
public HashMap(Map<? extends K, ? extends V> m) {
this.loadFactor = DEFAULT_LOAD_FACTOR;
putMapEntries(m, false);
}
/**
* Implements Map.putAll and Map constructor
*
* @param m the map
* @param evict false when initially constructing this map, else
* true (relayed to method afterNodeInsertion).
*/
final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
int s = m.size();
if (s > 0) {
//判断这个存储数据的Note数组是否初始化,如果没有就初始化
if (table == null) { // pre-size //这个table在下面put的时候介绍
float ft = ((float)s / loadFactor) + 1.0F;
int t = ((ft < (float)MAXIMUM_CAPACITY) ?
(int)ft : MAXIMUM_CAPACITY);
if (t > threshold)
threshold = tableSizeFor(t);
}
else if (s > threshold)//初始化后,判断数组的大小如果小于当前Map的大小就进行扩容,
resize();//这是一个扩容的方法,后面介绍
for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {//遍历Map,把里面这值放在当前的Map里面
K key = e.getKey();
V value = e.getValue();
putVal(hash(key), key, value, false, evict);
}
}
}
这就是HashMap的构造方法,这个构造方法最主要的一个作用就是初始化下这个默认的加载因子0.75
下面看下map.put()的源码:
/**
* Associates the specified value with the specified key in this map.
* 将指定值与此Map中的指定键相关联
* If the map previously contained a mapping for the key, the old
* value is replaced.如果这个Map中已经包含这Key,旧的值将被替换
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
public V put(K key, V value) {//如果以前已经存在这个Key,就会返回以前的Value,如果没有就返回null
return putVal(hash(key), key, value, false, true);
}
//计算key.hashCode()并将(XOR散列的较高位扩展为较低
//^ (h >>> 16) 这个异或就是让hash分散,出现重复hash值的可能性降低
static final int hash(Object key) {
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
}
/**
* Implements Map.put and related methods
*
* @param hash hash for key //Key的Hash值
* @param key the key
* @param value the value to put
* @param onlyIfAbsent if true, don't change existing value //如果这个Key已经存在,不要更改存在的值
* @param evict if false, the table is in creation mode.//这个值是给LinkedHashMap用的,HashMap没有用到这个值
* @return previous value, or null if none
*/
final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
boolean evict) {
Node<K,V>[] tab; Node<K,V> p; int , i;
if ((tab = table) == null || (n = tab.length) == 0)//如果这个table的数组没有创建,就调用resize()方法创建数组
n = (tab = resize()).length;
//这是判断当前数组位置[i]没有保存了数据,直接创建Node保存在数组的i位置上
if ((p = tab[i = (n - 1) & hash]) == null)// i = (n - 1) & hash 这是计算当前hash值在数组中的索引,
tab[i] = newNode(hash, key, value, null);
else {//这个地方就是table[i]上已经!=null 这个p就已经有值了
Node<K,V> e; K k;
//p.hash 和传过来的Hash相等,并且已经存在的Key和传过来的key的内存地址相等或者值相等,就说明传过来的Key和存在的Key是同一个值
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
e = p;//直接替换
else if (p instanceof TreeNode)//如果是红黑树,就直接插入到红黑树
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
else {//不是同一个值,就开始遍历这个链表
for (int binCount = 0; ; ++binCount) {
if ((e = p.next) == null) {
p.next = newNode(hash, key, value, null);//如果是遍历到列表的最后一个,就把当前Node添加到最后一个的后面
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
treeifyBin(tab, hash);//这个方法是创建红黑树还是给table进行扩容
break;
}
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))//找到相同的key
break;
p = e;
}
}
if (e != null) { // existing mapping for key//如果这个Key存在于Map中
V oldValue = e.value;
if (!onlyIfAbsent || oldValue == null) //如果是可以改变的值,或者存在的value == null直接当前Value替换掉旧的value
e.value = value;
afterNodeAccess(e);
return oldValue;
}
}
++modCount;//修改
if (++size > threshold)//判断是否需要扩容
resize();
afterNodeInsertion(evict);
return null;
}
/**
* 初始化或者扩容这个数组table
* Initializes or doubles table size. If null, allocates in
* accord with initial capacity target held in field threshold.
* Otherwise, because we are using power-of-two expansion, the
* elements from each bin must either stay at same index, or move
* with a power of two offset in the new table.
*
* @return the table
*/
final Node<K,V>[] resize() {
Node<K,V>[] oldTab = table;//旧的table
int oldCap = (oldTab == null) ? 0 : oldTab.length;//旧的数组table容量
int oldThr = threshold;//旧的阀值,超过这个阀值就需要扩容
int newCap, newThr = 0;//新的容量和新的阀值
if (oldCap > 0) {//如果旧的容量>0,就进行扩容
if (oldCap >= MAXIMUM_CAPACITY) {//如果旧的容量已经超过最大的容量,就直接返回就的数组,不进行扩容
threshold = Integer.MAX_VALUE;
return oldTab;
}
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
oldCap >= DEFAULT_INITIAL_CAPACITY)// 如果旧的容量已经超过最大的容量,就直接返回就的数组,不进行扩容
newThr = oldThr << 1; // double threshold
}
else if (oldThr > 0) // initial capacity was placed in threshold
// public HashMap(Map<? extends K, ? extends V> m)这是用于这种构造方法的初始化时调用的
newCap = oldThr;
else { // zero initial threshold signifies using defaults,这是默认的构造方法调用的
newCap = DEFAULT_INITIAL_CAPACITY;
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
}
if (newThr == 0) {//计算新的阀值
float ft = (float)newCap * loadFactor;
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
(int)ft : Integer.MAX_VALUE);
}
threshold = newThr;//对新的阀值赋值
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];//创建新的table数组
table = newTab;
if (oldTab != null) {//如果是扩容,就把就数组的值赋值到新的数组
for (int j = 0; j < oldCap; ++j) {
Node<K,V> e;
if ((e = oldTab[j]) != null) {//取出旧数组的j索引e,如果e!= null
oldTab[j] = null;
if (e.next == null)//如果hash值相同的就这个值就直接重新计算索引赋值到新的数组table中
newTab[e.hash & (newCap - 1)] = e;
else if (e instanceof TreeNode)//如果是红黑树,就对红黑树进行分割
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
else { // preserve order// 如果是链表的话,就重新把链表中的值计算hash值得到索引,保存到新的链表中
Node<K,V> loHead = null, loTail = null;
Node<K,V> hiHead = null, hiTail = null;
Node<K,V> next;
do {
next = e.next;
if ((e.hash & oldCap) == 0) {//1101 1111
if (loTail == null)
loHead = e;
else
loTail.next = e;
loTail = e;
}
else {
if (hiTail == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
}
} while ((e = next) != null);
if (loTail != null) {
loTail.next = null;
newTab[j] = loHead;
}
if (hiTail != null) {
hiTail.next = null;
newTab[j + oldCap] = hiHead;
}
}
}
}
}
return newTab;
}