Important attributes
-
The default capacity is 16
/** * The default initial capacity - MUST be a power of two. *N power of 2 is recommended */ static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
-
Default load factor
/** * The load factor used when none specified in constructor. */ static final float DEFAULT_LOAD_FACTOR = 0.75f;
-
Maximum capacity 2 ^ 30
/** * The maximum capacity, used if a higher value is implicitly specified * by either of the constructors with arguments. * MUST be a power of two <= 1<<30. */ static final int MAXIMUM_CAPACITY = 1 << 30;
-
Threshold value of converting data bin to red black tree
/** *The conversion threshold of the hash bucket from list to tree (when the 9th element is inserted, the list will be changed to tree) *The threshold must be greater than 2 and at least 8 to mesh with the assumption in tree deletion (switch back to normal box when shrinking) */ static final int TREEIFY_THRESHOLD = 8; /** *In the operation of adjusting the size of the hash bucket, the count threshold of the treelified storage of the hash bucket is cancelled *(when the element in a hash bucket is less than this value, it is converted to linked list storage) *Should be less than treeify? Threshold and a maximum of 6 for shrinkage check after delete operation */ static final int UNTREEIFY_THRESHOLD = 6; /** *The minimum capacity of the table stored in the hash bucket tree. (otherwise, if there are too many nodes in the hash bucket, the table will be resized.) *Should be at least 4 * treeify? Treehold to avoid conflicts between resizing and treeing thresholds. *Because a relatively small, full hash table does not perform as well as a relatively large, empty hash table, *This kind of payment request is considered to be larger first, rather than tree storage */ static final int MIN_TREEIFY_CAPACITY = 64;
-
Data table
/** *Initialize on first use, resize as needed (always N-power of 2) *In some operations, we also allow a length of zero to allow the use of boot mechanisms that are not currently needed. */ transient Node[] table; //Number of key value pairs transient int size; //Number of structural modifications transient int modCount; //Next size value to resize (capacity * load factor) //If the hash bucket array is uninitialized, the field holds the accident capacity, or 0 (which means using default uunitial uucapacity) int threshold; //Load factor final float loadFactor;
-
Data node type
static class Node implements Map.Entry { Final int hash; // used to locate the array index position final K key; V value; Node next; // the next node in the list Node(int hash, K key, V value, Node next) { ... } public final K getKey(){ ... } public final V getValue() { ... } public final String toString() { ... } public final int hashCode() { ... } public final V setValue(V newValue) { ... } public final boolean equals(Object o) { ... } } static final class TreeNode extends LinkedHashMap.Entry { Treenode parent; // parent Treenode left; // left Treenode right; // right TreeNode prev; // needed to unlink next upon deletion Boolean red; // judge the color TreeNode(int hash, K key, V val, Node next) { super(hash, key, val, next); } //Return to root node final TreeNode root() { for (TreeNode r = this, p;;) { if ((p = r.parent) == null) return r; r = p; }
From construction method to capacity expansion
-
Construction method
public HashMap(int initialCapacity, float loadFactor) { if (initialCapacity < 0) throw new IllegalArgumentException("Illegal initial capacity: " + initialCapacity); if (initialCapacity > MAXIMUM_CAPACITY) initialCapacity = MAXIMUM_CAPACITY; if (loadFactor <= 0 || Float.isNaN(loadFactor)) throw new IllegalArgumentException("Illegal load factor: " + loadFactor); //Obtain the N-power of the corresponding capacity this.loadFactor = loadFactor; this.threshold = tableSizeFor(initialCapacity); } //Construct a new HashMap using a collection of map interfaces: using the default LoadFactor (0.75) is sufficient (minimum available) to store the map in the specified map's initial capacity public HashMap(Map m) { this.loadFactor = DEFAULT_LOAD_FACTOR; putMapEntries(m, false); }
-
Putall() method
//Implement map.putall and map constructor final void putMapEntries(Map extends K, ? extends V> m, boolean evict) { int s = m.size(); if (s > 0) { //Determine whether the table has been initialized if (table == null) { // pre-size //Capacity * LoadFactor = threshold float ft = ((float)s / loadFactor) + 1.0F; //Get the minimum capacity required to save the input parameter map (size) int t = ((ft < (float)MAXIMUM_CAPACITY) ? (int)ft : MAXIMUM_CAPACITY); //Refresh threshold based on capacity if (t > threshold) threshold = tableSizeFor(t); } //It has been initialized and the number of M elements is greater than the threshold value for capacity expansion else if (s > threshold) resize(); for (Map.Entry e : m.entrySet()) { K key = e.getKey(); V value = e.getValue(); // constructor-evict:false // putAll-evict:true putVal(hash(key), key, value, false, evict); } } }
-
Core put method
/** * Implements Map.put and related methods * * @param hash hash for key * @param key the key * @param value the value to put * @param onlyIfAbsent if true, don't change existing value * @param evict if false, the table is in creation mode. * @return previous value, or null if none */ final V putVal(int hash, K key, V value, boolean onlyIfAbsent, boolean evict) { Node[] tab; Node p; int n, i; //Table is uninitialized or 0 in length for capacity expansion if ((tab = table) == null || (n = tab.length) == 0) n = (tab = resize()).length; //(n - 1) & hash determines the bucket in which the element is stored. If the bucket is empty, the newly generated node is placed in the bucket (at this time, the node is placed in the array) if ((p = tab[i = (n - 1) & hash]) == null) tab[i] = newNode(hash, key, value, null); //Element already exists in bucket else { Node e; K k; //Compare the hash value of the first element (node in the array) in the bucket to be equal, and the key to be equal if (p.hash == hash && ((k = p.key) == key || (key != null && key.equals(k)))) //Assign the first element to e and record with e e = p; else if (p instanceof TreeNode) e = ((TreeNode)p).putTreeVal(this, tab, hash, key, value); else { for (int binCount = 0; ; ++binCount) { if ((e = p.next) == null) { p.next = newNode(hash, key, value, null); if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st treeifyBin(tab, hash); break; } if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) break; p = e; } } if (e != null) { // existing mapping for key V oldValue = e.value; if (!onlyIfAbsent || oldValue == null) e.value = value; afterNodeAccess(e); return oldValue; } } ++modCount; if (++size > threshold) resize(); afterNodeInsertion(evict); return null; }
-
The expansion method consumes a lot
/** *Initializes or doubles the size of the array. If it is empty, the allocation is based on the initial capacity target maintained in the attribute threshold. *Otherwise, because we are using a power of 2, the elements in each bin must either remain the same index or be offset by a power of 2 in the new table. * * @return the table */ final Node[] resize() { Node[] oldTab = table; int oldCap = (oldTab == null) ? 0 : oldTab.length; int oldThr = threshold; int newCap, newThr = 0; //Not empty before expansion if (oldCap > 0) { //If it exceeds the maximum value, it will not expand any more, so we have to collide with you if (oldCap >= MAXIMUM_CAPACITY) { threshold = Integer.MAX_VALUE; return oldTab; } //If the maximum value is not exceeded, it will be expanded to 2 times of the original value (after doubling, it cannot be larger than the maximum capacity) else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && oldCap >= DEFAULT_INITIAL_CAPACITY) newThr = oldThr << 1; // double threshold } //Initialization capacity = threshold value (assigned in 2 parameter construction) else if (oldThr > 0) // initial capacity was placed in threshold newCap = oldThr; //Initialization method -- threshold = 0 (default value is used) else { // zero initial threshold signifies using defaults newCap = DEFAULT_INITIAL_CAPACITY; newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); } //Calculate new resize upper limit if (newThr == 0) { float ft = (float)newCap * loadFactor; newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? (int)ft : Integer.MAX_VALUE); } threshold = newThr; @SuppressWarnings({"rawtypes","unchecked"}) Node[] newTab = (Node[])new Node[newCap]; table = newTab; if (oldTab != null) { //Move each bucket to a new bucket for (int j = 0; j < oldCap; ++j) { Node e; if ((e = oldTab[j]) != null) { oldTab[j] = null; if (e.next == null) newTab[e.hash & (newCap - 1)] = e; else if (e instanceof TreeNode) ((TreeNode)e).split(this, newTab, j, oldCap); else { // preserve order //Code block of chain list optimization with multiple hash Node loHead = null, loTail = null; Node hiHead = null, hiTail = null; Node next; do { next = e.next; // original index if ((e.hash & oldCap) == 0) { if (loTail == null) loHead = e; else loTail.next = e; loTail = e; } // original index + oldCap else { if (hiTail == null) hiHead = e; else hiTail.next = e; hiTail = e; } } while ((e = next) != null); // original index放到bucket里 if (loTail != null) { loTail.next = null; newTab[j] = loHead; } // original index + oldCap放到bucket里 if (hiTail != null) { hiTail.next = null; newTab[j + oldCap] = hiHead; } } } } } return newTab; }