最全JDK8-HashMap源码(1),java软件工程师面试问题

Java核心架构进阶知识点

面试成功其实都是必然发生的事情,因为在此之前我做足了充分的准备工作,不单单是纯粹的刷题,更多的还会去刷一些Java核心架构进阶知识点,比如:JVM、高并发、多线程、缓存、Spring相关、分布式、微服务、RPC、网络、设计模式、MQ、Redis、MySQL、设计模式、负载均衡、算法、数据结构、kafka、ZK、集群等。而这些也全被整理浓缩到了一份pdf——《Java核心架构进阶知识点整理》,全部都是精华中的精华,本着共赢的心态,好东西自然也是要分享的

image

image

image

内容颇多,篇幅却有限,这就不在过多的介绍了,大家可根据以上截图自行脑补

本文已被CODING开源项目:【一线大厂Java面试题解析+核心总结学习笔记+最新讲解视频+实战项目源码】收录

需要这份系统化的资料的朋友,可以点击这里获取

public final Spliterator spliterator() {

return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0);

}

public final void forEach(Consumer<? super K> action) {

Node<K,V>[] tab;

if (action == null)

throw new NullPointerException();

if (size > 0 && (tab = table) != null) {

int mc = modCount;

for (int i = 0; i < tab.length; ++i) {

for (Node<K,V> e = tab[i]; e != null; e = e.next)

action.accept(e.key);

}

if (modCount != mc)

throw new ConcurrentModificationException();

}

}

}

/**

  • Returns a {@link Collection} view of the values contained in this map.

  • The collection is backed by the map, so changes to the map are

  • reflected in the collection, and vice-versa. If the map is

  • modified while an iteration over the collection is in progress

  • (except through the iterator’s own remove operation),

  • the results of the iteration are undefined. The collection

  • supports element removal, which removes the corresponding

  • mapping from the map, via the Iterator.remove,

  • Collection.remove, removeAll,

  • retainAll and clear operations. It does not

  • support the add or addAll operations.

  • @return a view of the values contained in this map

*/

public Collection values() {

Collection vs = values;

if (vs == null) {

vs = new Values();

values = vs;

}

return vs;

}

final class Values extends AbstractCollection {

public final int size() { return size; }

public final void clear() { HashMap.this.clear(); }

public final Iterator iterator() { return new ValueIterator(); }

public final boolean contains(Object o) { return containsValue(o); }

public final Spliterator spliterator() {

return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0);

}

public final void forEach(Consumer<? super V> action) {

Node<K,V>[] tab;

if (action == null)

throw new NullPointerException();

if (size > 0 && (tab = table) != null) {

int mc = modCount;

for (int i = 0; i < tab.length; ++i) {

for (Node<K,V> e = tab[i]; e != null; e = e.next)

action.accept(e.value);

}

if (modCount != mc)

throw new ConcurrentModificationException();

}

}

}

/**

  • Returns a {@link Set} view of the mappings contained in this map.

  • The set is backed by the map, so changes to the map are

  • reflected in the set, and vice-versa. If the map is modified

  • while an iteration over the set is in progress (except through

  • the iterator’s own remove operation, or through the

  • setValue operation on a map entry returned by the

  • iterator) the results of the iteration are undefined. The set

  • supports element removal, which removes the corresponding

  • mapping from the map, via the Iterator.remove,

  • Set.remove, removeAll, retainAll and

  • clear operations. It does not support the

  • add or addAll operations.

  • @return a set view of the mappings contained in this map

*/

public Set<Map.Entry<K,V>> entrySet() {

Set<Map.Entry<K,V>> es;

return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;

}

final class EntrySet extends AbstractSet<Map.Entry<K,V>> {

public final int size() { return size; }

public final void clear() { HashMap.this.clear(); }

public final Iterator<Map.Entry<K,V>> iterator() {

return new EntryIterator();

}

public final boolean contains(Object o) {

if (!(o instanceof Map.Entry))

return false;

Map.Entry<?,?> e = (Map.Entry<?,?>) o;

Object key = e.getKey();

Node<K,V> candidate = getNode(hash(key), key);

return candidate != null && candidate.equals(e);

}

public final boolean remove(Object o) {

if (o instanceof Map.Entry) {

Map.Entry<?,?> e = (Map.Entry<?,?>) o;

Object key = e.getKey();

Object value = e.getValue();

return removeNode(hash(key), key, value, true, true) != null;

}

return false;

}

public final Spliterator<Map.Entry<K,V>> spliterator() {

return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0);

}

public final void forEach(Consumer<? super Map.Entry<K,V>> action) {

Node<K,V>[] tab;

if (action == null)

throw new NullPointerException();

if (size > 0 && (tab = table) != null) {

int mc = modCount;

for (int i = 0; i < tab.length; ++i) {

for (Node<K,V> e = tab[i]; e != null; e = e.next)

action.accept(e);

}

if (modCount != mc)

throw new ConcurrentModificationException();

}

}

}

// Overrides of JDK8 Map extension methods

@Override

public V getOrDefault(Object key, V defaultValue) {

Node<K,V> e;

return (e = getNode(hash(key), key)) == null ? defaultValue : e.value;

}

@Override

public V putIfAbsent(K key, V value) {

return putVal(hash(key), key, value, true, true);

}

@Override

public boolean remove(Object key, Object value) {

return removeNode(hash(key), key, value, true, true) != null;

}

@Override

public boolean replace(K key, V oldValue, V newValue) {

Node<K,V> e; V v;

if ((e = getNode(hash(key), key)) != null &&

((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {

e.value = newValue;

afterNodeAccess(e);

return true;

}

return false;

}

@Override

public V replace(K key, V value) {

Node<K,V> e;

if ((e = getNode(hash(key), key)) != null) {

V oldValue = e.value;

e.value = value;

afterNodeAccess(e);

return oldValue;

}

return null;

}

@Override

public V computeIfAbsent(K key,

Function<? super K, ? extends V> mappingFunction) {

if (mappingFunction == null)

throw new NullPointerException();

int hash = hash(key);

Node<K,V>[] tab; Node<K,V> first; int n, i;

int binCount = 0;

TreeNode<K,V> t = null;

Node<K,V> old = null;

if (size > threshold || (tab = table) == null ||

(n = tab.length) == 0)

n = (tab = resize()).length;

if ((first = tab[i = (n - 1) & hash]) != null) {

if (first instanceof TreeNode)

old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);

else {

Node<K,V> e = first; K k;

do {

if (e.hash == hash &&

((k = e.key) == key || (key != null && key.equals(k)))) {

old = e;

break;

}

++binCount;

} while ((e = e.next) != null);

}

V oldValue;

if (old != null && (oldValue = old.value) != null) {

afterNodeAccess(old);

return oldValue;

}

}

V v = mappingFunction.apply(key);

if (v == null) {

return null;

} else if (old != null) {

old.value = v;

afterNodeAccess(old);

return v;

}

else if (t != null)

t.putTreeVal(this, tab, hash, key, v);

else {

tab[i] = newNode(hash, key, v, first);

if (binCount >= TREEIFY_THRESHOLD - 1)

treeifyBin(tab, hash);

}

++modCount;

++size;

afterNodeInsertion(true);

return v;

}

public V computeIfPresent(K key,

BiFunction<? super K, ? super V, ? extends V> remappingFunction) {

if (remappingFunction == null)

throw new NullPointerException();

Node<K,V> e; V oldValue;

int hash = hash(key);

if ((e = getNode(hash, key)) != null &&

(oldValue = e.value) != null) {

V v = remappingFunction.apply(key, oldValue);

if (v != null) {

e.value = v;

afterNodeAccess(e);

return v;

}

else

removeNode(hash, key, null, false, true);

}

return null;

}

@Override

public V compute(K key,

BiFunction<? super K, ? super V, ? extends V> remappingFunction) {

if (remappingFunction == null)

throw new NullPointerException();

int hash = hash(key);

Node<K,V>[] tab; Node<K,V> first; int n, i;

int binCount = 0;

TreeNode<K,V> t = null;

Node<K,V> old = null;

if (size > threshold || (tab = table) == null ||

(n = tab.length) == 0)

n = (tab = resize()).length;

if ((first = tab[i = (n - 1) & hash]) != null) {

if (first instanceof TreeNode)

old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);

else {

Node<K,V> e = first; K k;

do {

if (e.hash == hash &&

((k = e.key) == key || (key != null && key.equals(k)))) {

old = e;

break;

}

++binCount;

} while ((e = e.next) != null);

}

}

V oldValue = (old == null) ? null : old.value;

V v = remappingFunction.apply(key, oldValue);

if (old != null) {

if (v != null) {

old.value = v;

afterNodeAccess(old);

}

else

removeNode(hash, key, null, false, true);

}

else if (v != null) {

if (t != null)

t.putTreeVal(this, tab, hash, key, v);

else {

tab[i] = newNode(hash, key, v, first);

if (binCount >= TREEIFY_THRESHOLD - 1)

treeifyBin(tab, hash);

}

++modCount;

++size;

afterNodeInsertion(true);

}

return v;

}

@Override

public V merge(K key, V value,

BiFunction<? super V, ? super V, ? extends V> remappingFunction) {

if (value == null)

throw new NullPointerException();

if (remappingFunction == null)

throw new NullPointerException();

int hash = hash(key);

Node<K,V>[] tab; Node<K,V> first; int n, i;

int binCount = 0;

TreeNode<K,V> t = null;

Node<K,V> old = null;

if (size > threshold || (tab = table) == null ||

(n = tab.length) == 0)

n = (tab = resize()).length;

if ((first = tab[i = (n - 1) & hash]) != null) {

if (first instanceof TreeNode)

old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);

else {

Node<K,V> e = first; K k;

do {

if (e.hash == hash &&

((k = e.key) == key || (key != null && key.equals(k)))) {

old = e;

break;

}

++binCount;

} while ((e = e.next) != null);

}

}

if (old != null) {

V v;

if (old.value != null)

v = remappingFunction.apply(old.value, value);

else

v = value;

if (v != null) {

old.value = v;

afterNodeAccess(old);

}

else

removeNode(hash, key, null, false, true);

return v;

}

if (value != null) {

if (t != null)

t.putTreeVal(this, tab, hash, key, value);

else {

tab[i] = newNode(hash, key, value, first);

if (binCount >= TREEIFY_THRESHOLD - 1)

treeifyBin(tab, hash);

}

++modCount;

++size;

afterNodeInsertion(true);

}

return value;

}

@Override

public void forEach(BiConsumer<? super K, ? super V> action) {

Node<K,V>[] tab;

if (action == null)

throw new NullPointerException();

if (size > 0 && (tab = table) != null) {

int mc = modCount;

for (int i = 0; i < tab.length; ++i) {

for (Node<K,V> e = tab[i]; e != null; e = e.next)

action.accept(e.key, e.value);

}

if (modCount != mc)

throw new ConcurrentModificationException();

}

}

@Override

public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {

Node<K,V>[] tab;

if (function == null)

throw new NullPointerException();

if (size > 0 && (tab = table) != null) {

int mc = modCount;

for (int i = 0; i < tab.length; ++i) {

for (Node<K,V> e = tab[i]; e != null; e = e.next) {

e.value = function.apply(e.key, e.value);

}

}

if (modCount != mc)

throw new ConcurrentModificationException();

}

}

/* ------------------------------------------------------------ */

// Cloning and serialization

/**

  • Returns a shallow copy of this HashMap instance: the keys and

  • values themselves are not cloned.

  • @return a shallow copy of this map

*/

@SuppressWarnings(“unchecked”)

@Override

public Object clone() {

HashMap<K,V> result;

try {

result = (HashMap<K,V>)super.clone();

} catch (CloneNotSupportedException e) {

// this shouldn’t happen, since we are Cloneable

throw new InternalError(e);

}

result.reinitialize();

result.putMapEntries(this, false);

return result;

}

// These methods are also used when serializing HashSets

final float loadFactor() { return loadFactor; }

final int capacity() {

return (table != null) ? table.length :

(threshold > 0) ? threshold :

DEFAULT_INITIAL_CAPACITY;

}

/**

  • Save the state of the HashMap instance to a stream (i.e.,

  • serialize it).

  • @serialData The capacity of the HashMap (the length of the

  •         bucket array) is emitted (int), followed by the
    
  •         <i>size</i> (an int, the number of key-value
    
  •         mappings), followed by the key (Object) and value (Object)
    
  •         for each key-value mapping.  The key-value mappings are
    
  •         emitted in no particular order.
    

*/

private void writeObject(java.io.ObjectOutputStream s)

throws IOException {

int buckets = capacity();

// Write out the threshold, loadfactor, and any hidden stuff

s.defaultWriteObject();

s.writeInt(buckets);

s.writeInt(size);

internalWriteEntries(s);

}

/**

  • Reconstitute the {@code HashMap} instance from a stream (i.e.,

  • deserialize it).

*/

private void readObject(java.io.ObjectInputStream s)

throws IOException, ClassNotFoundException {

// Read in the threshold (ignored), loadfactor, and any hidden stuff

s.defaultReadObject();

reinitialize();

if (loadFactor <= 0 || Float.isNaN(loadFactor))

throw new InvalidObjectException("Illegal load factor: " +

loadFactor);

s.readInt(); // Read and ignore number of buckets

int mappings = s.readInt(); // Read number of mappings (size)

if (mappings < 0)

throw new InvalidObjectException("Illegal mappings count: " +

mappings);

else if (mappings > 0) { // (if zero, use defaults)

// Size the table using given load factor only if within

// range of 0.25…4.0

float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);

float fc = (float)mappings / lf + 1.0f;

int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?

DEFAULT_INITIAL_CAPACITY :

(fc >= MAXIMUM_CAPACITY) ?

MAXIMUM_CAPACITY :

tableSizeFor((int)fc));

float ft = (float)cap * lf;

threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?

(int)ft : Integer.MAX_VALUE);

// Check Map.Entry[].class since it’s the nearest public type to

// what we’re actually creating.

SharedSecrets.getJavaOISAccess().checkArray(s, Map.Entry[].class, cap);

@SuppressWarnings({“rawtypes”,“unchecked”})

Node<K,V>[] tab = (Node<K,V>[])new Node[cap];

table = tab;

// Read the keys and values, and put the mappings in the HashMap

for (int i = 0; i < mappings; i++) {

@SuppressWarnings(“unchecked”)

K key = (K) s.readObject();

@SuppressWarnings(“unchecked”)

V value = (V) s.readObject();

putVal(hash(key), key, value, false, false);

}

}

}

/* ------------------------------------------------------------ */

// iterators

abstract class HashIterator {

Node<K,V> next; // next entry to return

Node<K,V> current; // current entry

int expectedModCount; // for fast-fail

int index; // current slot

HashIterator() {

expectedModCount = modCount;

Node<K,V>[] t = table;

current = next = null;

index = 0;

if (t != null && size > 0) { // advance to first entry

do {} while (index < t.length && (next = t[index++]) == null);

}

}

public final boolean hasNext() {

return next != null;

}

final Node<K,V> nextNode() {

Node<K,V>[] t;

Node<K,V> e = next;

if (modCount != expectedModCount)

throw new ConcurrentModificationException();

if (e == null)

throw new NoSuchElementException();

if ((next = (current = e).next) == null && (t = table) != null) {

do {} while (index < t.length && (next = t[index++]) == null);

}

return e;

}

public final void remove() {

Node<K,V> p = current;

if (p == null)

throw new IllegalStateException();

if (modCount != expectedModCount)

throw new ConcurrentModificationException();

current = null;

K key = p.key;

removeNode(hash(key), key, null, false, false);

expectedModCount = modCount;

}

}

final class KeyIterator extends HashIterator

implements Iterator {

public final K next() { return nextNode().key; }

}

final class ValueIterator extends HashIterator

implements Iterator {

public final V next() { return nextNode().value; }

}

final class EntryIterator extends HashIterator

implements Iterator<Map.Entry<K,V>> {

public final Map.Entry<K,V> next() { return nextNode(); }

}

/* ------------------------------------------------------------ */

// spliterators

static class HashMapSpliterator<K,V> {

final HashMap<K,V> map;

Node<K,V> current; // current node

int index; // current index, modified on advance/split

int fence; // one past last index

int est; // size estimate

int expectedModCount; // for comodification checks

HashMapSpliterator(HashMap<K,V> m, int origin,

int fence, int est,

int expectedModCount) {

this.map = m;

this.index = origin;

this.fence = fence;

this.est = est;

this.expectedModCount = expectedModCount;

}

final int getFence() { // initialize fence and size on first use

int hi;

if ((hi = fence) < 0) {

HashMap<K,V> m = map;

est = m.size;

expectedModCount = m.modCount;

Node<K,V>[] tab = m.table;

hi = fence = (tab == null) ? 0 : tab.length;

}

return hi;

}

public final long estimateSize() {

getFence(); // force init

return (long) est;

}

}

static final class KeySpliterator<K,V>

extends HashMapSpliterator<K,V>

implements Spliterator {

KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,

int expectedModCount) {

super(m, origin, fence, est, expectedModCount);

}

public KeySpliterator<K,V> trySplit() {

int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;

return (lo >= mid || current != null) ? null :

new KeySpliterator<>(map, lo, index = mid, est >>>= 1,

expectedModCount);

}

public void forEachRemaining(Consumer<? super K> action) {

int i, hi, mc;

if (action == null)

throw new NullPointerException();

HashMap<K,V> m = map;

Node<K,V>[] tab = m.table;

if ((hi = fence) < 0) {

mc = expectedModCount = m.modCount;

hi = fence = (tab == null) ? 0 : tab.length;

}

else

mc = expectedModCount;

if (tab != null && tab.length >= hi &&

(i = index) >= 0 && (i < (index = hi) || current != null)) {

Node<K,V> p = current;

current = null;

do {

if (p == null)

p = tab[i++];

else {

action.accept(p.key);

p = p.next;

}

} while (p != null || i < hi);

if (m.modCount != mc)

throw new ConcurrentModificationException();

}

}

public boolean tryAdvance(Consumer<? super K> action) {

int hi;

if (action == null)

throw new NullPointerException();

Node<K,V>[] tab = map.table;

if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {

while (current != null || index < hi) {

if (current == null)

current = tab[index++];

else {

K k = current.key;

current = current.next;

action.accept(k);

if (map.modCount != expectedModCount)

throw new ConcurrentModificationException();

return true;

}

}

}

return false;

}

public int characteristics() {

return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |

Spliterator.DISTINCT;

}

}

static final class ValueSpliterator<K,V>

extends HashMapSpliterator<K,V>

implements Spliterator {

ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,

int expectedModCount) {

super(m, origin, fence, est, expectedModCount);

}

public ValueSpliterator<K,V> trySplit() {

int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;

return (lo >= mid || current != null) ? null :

new ValueSpliterator<>(map, lo, index = mid, est >>>= 1,

expectedModCount);

}

public void forEachRemaining(Consumer<? super V> action) {

int i, hi, mc;

if (action == null)

throw new NullPointerException();

HashMap<K,V> m = map;

Node<K,V>[] tab = m.table;

if ((hi = fence) < 0) {

mc = expectedModCount = m.modCount;

hi = fence = (tab == null) ? 0 : tab.length;

}

else

mc = expectedModCount;

if (tab != null && tab.length >= hi &&

(i = index) >= 0 && (i < (index = hi) || current != null)) {

Node<K,V> p = current;

current = null;

do {

if (p == null)

p = tab[i++];

else {

action.accept(p.value);

p = p.next;

}

} while (p != null || i < hi);

if (m.modCount != mc)

throw new ConcurrentModificationException();

}

}

public boolean tryAdvance(Consumer<? super V> action) {

int hi;

if (action == null)

throw new NullPointerException();

Node<K,V>[] tab = map.table;

if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {

while (current != null || index < hi) {

if (current == null)

current = tab[index++];

else {

V v = current.value;

current = current.next;

action.accept(v);

if (map.modCount != expectedModCount)

throw new ConcurrentModificationException();

return true;

}

}

}

return false;

}

public int characteristics() {

return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);

}

}

static final class EntrySpliterator<K,V>

extends HashMapSpliterator<K,V>

implements Spliterator<Map.Entry<K,V>> {

EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,

int expectedModCount) {

super(m, origin, fence, est, expectedModCount);

}

public EntrySpliterator<K,V> trySplit() {

int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;

return (lo >= mid || current != null) ? null :

new EntrySpliterator<>(map, lo, index = mid, est >>>= 1,

expectedModCount);

}

public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {

int i, hi, mc;

if (action == null)

throw new NullPointerException();

HashMap<K,V> m = map;

Node<K,V>[] tab = m.table;

if ((hi = fence) < 0) {

mc = expectedModCount = m.modCount;

hi = fence = (tab == null) ? 0 : tab.length;

}

else

mc = expectedModCount;

if (tab != null && tab.length >= hi &&

(i = index) >= 0 && (i < (index = hi) || current != null)) {

Node<K,V> p = current;

current = null;

do {

if (p == null)

p = tab[i++];

else {

action.accept§;

p = p.next;

}

} while (p != null || i < hi);

if (m.modCount != mc)

throw new ConcurrentModificationException();

}

}

public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {

int hi;

if (action == null)

throw new NullPointerException();

Node<K,V>[] tab = map.table;

if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {

while (current != null || index < hi) {

if (current == null)

current = tab[index++];

else {

Node<K,V> e = current;

current = current.next;

action.accept(e);

if (map.modCount != expectedModCount)

throw new ConcurrentModificationException();

return true;

}

}

}

return false;

}

public int characteristics() {

return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |

Spliterator.DISTINCT;

}

}

/* ------------------------------------------------------------ */

// LinkedHashMap support

/*

  • The following package-protected methods are designed to be

  • overridden by LinkedHashMap, but not by any other subclass.

  • Nearly all other internal methods are also package-protected

  • but are declared final, so can be used by LinkedHashMap, view

  • classes, and HashSet.

*/

// Create a regular (non-tree) node

Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) {

return new Node<>(hash, key, value, next);

}

// For conversion from TreeNodes to plain nodes

Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {

return new Node<>(p.hash, p.key, p.value, next);

}

// Create a tree bin node

TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {

return new TreeNode<>(hash, key, value, next);

}

// For treeifyBin

TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {

return new TreeNode<>(p.hash, p.key, p.value, next);

}

/**

  • Reset to initial default state. Called by clone and readObject.

*/

void reinitialize() {

table = null;

entrySet = null;

keySet = null;

values = null;

modCount = 0;

threshold = 0;

size = 0;

}

// Callbacks to allow LinkedHashMap post-actions

void afterNodeAccess(Node<K,V> p) { }

void afterNodeInsertion(boolean evict) { }

void afterNodeRemoval(Node<K,V> p) { }

// Called only from writeObject, to ensure compatible ordering.

void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {

Node<K,V>[] tab;

if (size > 0 && (tab = table) != null) {

for (int i = 0; i < tab.length; ++i) {

for (Node<K,V> e = tab[i]; e != null; e = e.next) {

s.writeObject(e.key);

s.writeObject(e.value);

}

}

}

}

/* ------------------------------------------------------------ */

// Tree bins

/**

  • Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn

  • extends Node) so can be used as extension of either regular or

  • linked node.

*/

static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> {

TreeNode<K,V> parent; // red-black tree links

TreeNode<K,V> left;

TreeNode<K,V> right;

TreeNode<K,V> prev; // needed to unlink next upon deletion

boolean red;

TreeNode(int hash, K key, V val, Node<K,V> next) {

super(hash, key, val, next);

}

/**

  • Returns root of tree containing this node.

*/

final TreeNode<K,V> root() {

for (TreeNode<K,V> r = this, p;😉 {

if ((p = r.parent) == null)

return r;

r = p;

}

}

/**

  • Ensures that the given root is the first node of its bin.

*/

static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) {

int n;

if (root != null && tab != null && (n = tab.length) > 0) {

int index = (n - 1) & root.hash;

TreeNode<K,V> first = (TreeNode<K,V>)tab[index];

if (root != first) {

Node<K,V> rn;

tab[index] = root;

TreeNode<K,V> rp = root.prev;

if ((rn = root.next) != null)

((TreeNode<K,V>)rn).prev = rp;

if (rp != null)

rp.next = rn;

if (first != null)

first.prev = root;

root.next = first;

root.prev = null;

}

assert checkInvariants(root);

}

}

/**

  • Finds the node starting at root p with the given hash and key.

  • The kc argument caches comparableClassFor(key) upon first use

  • comparing keys.

*/

final TreeNode<K,V> find(int h, Object k, Class<?> kc) {

TreeNode<K,V> p = this;

do {

int ph, dir; K pk;

TreeNode<K,V> pl = p.left, pr = p.right, q;

if ((ph = p.hash) > h)

p = pl;

else if (ph < h)

p = pr;

else if ((pk = p.key) == k || (k != null && k.equals(pk)))

return p;

else if (pl == null)

p = pr;

else if (pr == null)

p = pl;

else if ((kc != null ||

(kc = comparableClassFor(k)) != null) &&

(dir = compareComparables(kc, k, pk)) != 0)

p = (dir < 0) ? pl : pr;

else if ((q = pr.find(h, k, kc)) != null)

return q;

else

p = pl;

} while (p != null);

return null;

}

/**

  • Calls find for root node.

*/

final TreeNode<K,V> getTreeNode(int h, Object k) {

return ((parent != null) ? root() : this).find(h, k, null);

}

/**

  • Tie-breaking utility for ordering insertions when equal

  • hashCodes and non-comparable. We don’t require a total

  • order, just a consistent insertion rule to maintain

  • equivalence across rebalancings. Tie-breaking further than

  • necessary simplifies testing a bit.

*/

static int tieBreakOrder(Object a, Object b) {

int d;

if (a == null || b == null ||

(d = a.getClass().getName().

compareTo(b.getClass().getName())) == 0)

d = (System.identityHashCode(a) <= System.identityHashCode(b) ?

-1 : 1);

return d;

}

/**

  • Forms tree of the nodes linked from this node.

  • @return root of tree

*/

final void treeify(Node<K,V>[] tab) {

TreeNode<K,V> root = null;

for (TreeNode<K,V> x = this, next; x != null; x = next) {

next = (TreeNode<K,V>)x.next;

x.left = x.right = null;

if (root == null) {

x.parent = null;

x.red = false;

root = x;

}

else {

K k = x.key;

int h = x.hash;

Class<?> kc = null;

for (TreeNode<K,V> p = root;😉 {

int dir, ph;

K pk = p.key;

if ((ph = p.hash) > h)

dir = -1;

else if (ph < h)

dir = 1;

else if ((kc == null &&

(kc = comparableClassFor(k)) == null) ||

(dir = compareComparables(kc, k, pk)) == 0)

dir = tieBreakOrder(k, pk);

TreeNode<K,V> xp = p;

if ((p = (dir <= 0) ? p.left : p.right) == null) {

x.parent = xp;

if (dir <= 0)

xp.left = x;

else

xp.right = x;

root = balanceInsertion(root, x);

break;

}

}

}

}

moveRootToFront(tab, root);

}

/**

  • Returns a list of non-TreeNodes replacing those linked from

  • this node.

*/

final Node<K,V> untreeify(HashMap<K,V> map) {

Node<K,V> hd = null, tl = null;

for (Node<K,V> q = this; q != null; q = q.next) {

Node<K,V> p = map.replacementNode(q, null);

if (tl == null)

hd = p;

else

tl.next = p;

tl = p;

}

return hd;

}

/**

  • Tree version of putVal.

*/

final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab,

int h, K k, V v) {

Class<?> kc = null;

boolean searched = false;

TreeNode<K,V> root = (parent != null) ? root() : this;

for (TreeNode<K,V> p = root;😉 {

int dir, ph; K pk;

if ((ph = p.hash) > h)

dir = -1;

else if (ph < h)

dir = 1;

else if ((pk = p.key) == k || (k != null && k.equals(pk)))

return p;

else if ((kc == null &&

(kc = comparableClassFor(k)) == null) ||

(dir = compareComparables(kc, k, pk)) == 0) {

if (!searched) {

TreeNode<K,V> q, ch;

searched = true;

if (((ch = p.left) != null &&

(q = ch.find(h, k, kc)) != null) ||

((ch = p.right) != null &&

(q = ch.find(h, k, kc)) != null))

return q;

}

dir = tieBreakOrder(k, pk);

}

TreeNode<K,V> xp = p;

if ((p = (dir <= 0) ? p.left : p.right) == null) {

Node<K,V> xpn = xp.next;

TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn);

if (dir <= 0)

xp.left = x;

else

xp.right = x;

xp.next = x;

x.parent = x.prev = xp;

if (xpn != null)

((TreeNode<K,V>)xpn).prev = x;

moveRootToFront(tab, balanceInsertion(root, x));

return null;

}

}

}

/**

  • Removes the given node, that must be present before this call.

  • This is messier than typical red-black deletion code because we

  • cannot swap the contents of an interior node with a leaf

  • successor that is pinned by “next” pointers that are accessible

  • independently during traversal. So instead we swap the tree

  • linkages. If the current tree appears to have too few nodes,

  • the bin is converted back to a plain bin. (The test triggers

  • somewhere between 2 and 6 nodes, depending on tree structure).

*/

final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab,

boolean movable) {

int n;

if (tab == null || (n = tab.length) == 0)

return;

int index = (n - 1) & hash;

TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl;

TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev;

if (pred == null)

tab[index] = first = succ;

else

pred.next = succ;

if (succ != null)

succ.prev = pred;

if (first == null)

return;

if (root.parent != null)

root = root.root();

if (root == null || root.right == null ||

(rl = root.left) == null || rl.left == null) {

tab[index] = first.untreeify(map); // too small

return;

}

TreeNode<K,V> p = this, pl = left, pr = right, replacement;

if (pl != null && pr != null) {

TreeNode<K,V> s = pr, sl;

while ((sl = s.left) != null) // find successor

s = sl;

boolean c = s.red; s.red = p.red; p.red = c; // swap colors

TreeNode<K,V> sr = s.right;

TreeNode<K,V> pp = p.parent;

if (s == pr) { // p was s’s direct parent

p.parent = s;

s.right = p;

}

else {

TreeNode<K,V> sp = s.parent;

if ((p.parent = sp) != null) {

if (s == sp.left)

sp.left = p;

else

sp.right = p;

}

if ((s.right = pr) != null)

pr.parent = s;

}

p.left = null;

if ((p.right = sr) != null)

sr.parent = p;

if ((s.left = pl) != null)

pl.parent = s;

if ((s.parent = pp) == null)

root = s;

else if (p == pp.left)

pp.left = s;

else

pp.right = s;

if (sr != null)

replacement = sr;

else

replacement = p;

}

else if (pl != null)

replacement = pl;

else if (pr != null)

replacement = pr;

else

replacement = p;

if (replacement != p) {

TreeNode<K,V> pp = replacement.parent = p.parent;

if (pp == null)

root = replacement;

else if (p == pp.left)

pp.left = replacement;

else

pp.right = replacement;

p.left = p.right = p.parent = null;

}

TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement);

if (replacement == p) { // detach

TreeNode<K,V> pp = p.parent;

p.parent = null;

if (pp != null) {

if (p == pp.left)

pp.left = null;

else if (p == pp.right)

pp.right = null;

}

}

if (movable)

moveRootToFront(tab, r);

}

/**

  • Splits nodes in a tree bin into lower and upper tree bins,

  • or untreeifies if now too small. Called only from resize;

  • see above discussion about split bits and indices.

  • @param map the map

  • @param tab the table for recording bin heads

  • @param index the index of the table being split

  • @param bit the bit of hash to split on

*/

final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) {

TreeNode<K,V> b = this;

// Relink into lo and hi lists, preserving order

TreeNode<K,V> loHead = null, loTail = null;

TreeNode<K,V> hiHead = null, hiTail = null;

int lc = 0, hc = 0;

for (TreeNode<K,V> e = b, next; e != null; e = next) {

next = (TreeNode<K,V>)e.next;

e.next = null;

if ((e.hash & bit) == 0) {

if ((e.prev = loTail) == null)

loHead = e;

else

loTail.next = e;

loTail = e;

++lc;

}

else {

if ((e.prev = hiTail) == null)

hiHead = e;

else

hiTail.next = e;

hiTail = e;

++hc;

}

}

if (loHead != null) {

if (lc <= UNTREEIFY_THRESHOLD)

tab[index] = loHead.untreeify(map);

else {

tab[index] = loHead;

if (hiHead != null) // (else is already treeified)

loHead.treeify(tab);

}

}

if (hiHead != null) {

if (hc <= UNTREEIFY_THRESHOLD)

tab[index + bit] = hiHead.untreeify(map);

else {

tab[index + bit] = hiHead;

if (loHead != null)

hiHead.treeify(tab);

}

}

}

/* ------------------------------------------------------------ */

// Red-black tree methods, all adapted from CLR

static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,

TreeNode<K,V> p) {

TreeNode<K,V> r, pp, rl;

if (p != null && (r = p.right) != null) {

if ((rl = p.right = r.left) != null)

rl.parent = p;

if ((pp = r.parent = p.parent) == null)

(root = r).red = false;

else if (pp.left == p)

pp.left = r;

else

pp.right = r;

r.left = p;

p.parent = r;

}

return root;

}

static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,

TreeNode<K,V> p) {

TreeNode<K,V> l, pp, lr;

if (p != null && (l = p.left) != null) {

if ((lr = p.left = l.right) != null)

lr.parent = p;

if ((pp = l.parent = p.parent) == null)

(root = l).red = false;

else if (pp.right == p)

pp.right = l;

else

pp.left = l;

l.right = p;

p.parent = l;

}

return root;

}

static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,

TreeNode<K,V> x) {

x.red = true;

for (TreeNode<K,V> xp, xpp, xppl, xppr;😉 {

if ((xp = x.parent) == null) {

x.red = false;

return x;

}

else if (!xp.red || (xpp = xp.parent) == null)

return root;

if (xp == (xppl = xpp.left)) {

if ((xppr = xpp.right) != null && xppr.red) {

xppr.red = false;

xp.red = false;

xpp.red = true;

x = xpp;

}

else {

if (x == xp.right) {

root = rotateLeft(root, x = xp);

xpp = (xp = x.parent) == null ? null : xp.parent;

}

if (xp != null) {

xp.red = false;

if (xpp != null) {

xpp.red = true;

root = rotateRight(root, xpp);

}

}

}

}

else {

if (xppl != null && xppl.red) {

xppl.red = false;

xp.red = false;

xpp.red = true;

x = xpp;

}

else {

if (x == xp.left) {

root = rotateRight(root, x = xp);

xpp = (xp = x.parent) == null ? null : xp.parent;

}

if (xp != null) {

xp.red = false;

if (xpp != null) {

xpp.red = true;

root = rotateLeft(root, xpp);

}

}

}

}

}

}

static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,

TreeNode<K,V> x) {

for (TreeNode<K,V> xp, xpl, xpr;😉 {

if (x == null || x == root)

return root;

else if ((xp = x.parent) == null) {

x.red = false;

return x;

}

else if (x.red) {

x.red = false;

return root;

}

else if ((xpl = xp.left) == x) {

if ((xpr = xp.right) != null && xpr.red) {

xpr.red = false;

xp.red = true;

root = rotateLeft(root, xp);

xpr = (xp = x.parent) == null ? null : xp.right;

}

if (xpr == null)

x = xp;

else {

TreeNode<K,V> sl = xpr.left, sr = xpr.right;

if ((sr == null || !sr.red) &&

(sl == null || !sl.red)) {

xpr.red = true;

x = xp;

}

else {

if (sr == null || !sr.red) {

if (sl != null)

sl.red = false;

xpr.red = true;

root = rotateRight(root, xpr);

xpr = (xp = x.parent) == null ?

null : xp.right;

}

if (xpr != null) {

xpr.red = (xp == null) ? false : xp.red;

if ((sr = xpr.right) != null)

sr.red = false;

}

if (xp != null) {

xp.red = false;

root = rotateLeft(root, xp);

}

x = root;

}

}

}

else { // symmetric

if (xpl != null && xpl.red) {

xpl.red = false;

xp.red = true;

root = rotateRight(root, xp);

xpl = (xp = x.parent) == null ? null : xp.left;

}

if (xpl == null)

x = xp;

else {

TreeNode<K,V> sl = xpl.left, sr = xpl.right;

if ((sl == null || !sl.red) &&

(sr == null || !sr.red)) {

xpl.red = true;

x = xp;

}

else {

if (sl == null || !sl.red) {

if (sr != null)

sr.red = false;

xpl.red = true;

root = rotateLeft(root, xpl);

xpl = (xp = x.parent) == null ?

null : xp.left;

}

if (xpl != null) {

xpl.red = (xp == null) ? false : xp.red;

if ((sl = xpl.left) != null)

sl.red = false;

}

if (xp != null) {

xp.red = false;

root = rotateRight(root, xp);

总结

总的来说,面试是有套路的,一面基础,二面架构,三面个人。

最后,小编这里收集整理了一些资料,其中包括面试题(含答案)、书籍、视频等。希望也能帮助想进大厂的朋友

三面蚂蚁金服成功拿到offer后,他说他累了

三面蚂蚁金服成功拿到offer后,他说他累了

本文已被CODING开源项目:【一线大厂Java面试题解析+核心总结学习笔记+最新讲解视频+实战项目源码】收录

需要这份系统化的资料的朋友,可以点击这里获取

ot, x = xp);

xpp = (xp = x.parent) == null ? null : xp.parent;

}

if (xp != null) {

xp.red = false;

if (xpp != null) {

xpp.red = true;

root = rotateRight(root, xpp);

}

}

}

}

else {

if (xppl != null && xppl.red) {

xppl.red = false;

xp.red = false;

xpp.red = true;

x = xpp;

}

else {

if (x == xp.left) {

root = rotateRight(root, x = xp);

xpp = (xp = x.parent) == null ? null : xp.parent;

}

if (xp != null) {

xp.red = false;

if (xpp != null) {

xpp.red = true;

root = rotateLeft(root, xpp);

}

}

}

}

}

}

static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,

TreeNode<K,V> x) {

for (TreeNode<K,V> xp, xpl, xpr;😉 {

if (x == null || x == root)

return root;

else if ((xp = x.parent) == null) {

x.red = false;

return x;

}

else if (x.red) {

x.red = false;

return root;

}

else if ((xpl = xp.left) == x) {

if ((xpr = xp.right) != null && xpr.red) {

xpr.red = false;

xp.red = true;

root = rotateLeft(root, xp);

xpr = (xp = x.parent) == null ? null : xp.right;

}

if (xpr == null)

x = xp;

else {

TreeNode<K,V> sl = xpr.left, sr = xpr.right;

if ((sr == null || !sr.red) &&

(sl == null || !sl.red)) {

xpr.red = true;

x = xp;

}

else {

if (sr == null || !sr.red) {

if (sl != null)

sl.red = false;

xpr.red = true;

root = rotateRight(root, xpr);

xpr = (xp = x.parent) == null ?

null : xp.right;

}

if (xpr != null) {

xpr.red = (xp == null) ? false : xp.red;

if ((sr = xpr.right) != null)

sr.red = false;

}

if (xp != null) {

xp.red = false;

root = rotateLeft(root, xp);

}

x = root;

}

}

}

else { // symmetric

if (xpl != null && xpl.red) {

xpl.red = false;

xp.red = true;

root = rotateRight(root, xp);

xpl = (xp = x.parent) == null ? null : xp.left;

}

if (xpl == null)

x = xp;

else {

TreeNode<K,V> sl = xpl.left, sr = xpl.right;

if ((sl == null || !sl.red) &&

(sr == null || !sr.red)) {

xpl.red = true;

x = xp;

}

else {

if (sl == null || !sl.red) {

if (sr != null)

sr.red = false;

xpl.red = true;

root = rotateLeft(root, xpl);

xpl = (xp = x.parent) == null ?

null : xp.left;

}

if (xpl != null) {

xpl.red = (xp == null) ? false : xp.red;

if ((sl = xpl.left) != null)

sl.red = false;

}

if (xp != null) {

xp.red = false;

root = rotateRight(root, xp);

总结

总的来说,面试是有套路的,一面基础,二面架构,三面个人。

最后,小编这里收集整理了一些资料,其中包括面试题(含答案)、书籍、视频等。希望也能帮助想进大厂的朋友

[外链图片转存中…(img-iM9nWKgB-1715574385010)]

[外链图片转存中…(img-5cXab7ZF-1715574385011)]

本文已被CODING开源项目:【一线大厂Java面试题解析+核心总结学习笔记+最新讲解视频+实战项目源码】收录

需要这份系统化的资料的朋友,可以点击这里获取

  • 18
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值