生活
写代码的时候总是想象维护你代码的家伙是一个知道你住在哪里的暴力精神病患者。
HashMap
在以前,学习JAVA集合时,只知道HashMap实现Map接口,底层是一个链表的数组实现,线程不安全,具体咋样,其实并没有去看过。
前面看了ConcurrentHashMap,相信现在看HashMap的源码会简单很多。
HashMap在 1.7和1.8下的实现有些不同,今天先来看1.7的实现。
成员
先来看下HashMap的成员组成
//默认初始化容量 16
static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
//最大容量
static final int MAXIMUM_CAPACITY = 1 << 30;
//默认加载因子 0.75
static final float DEFAULT_LOAD_FACTOR = 0.75f;
static final Entry<?,?>[] EMPTY_TABLE = {};
//链表数组
transient Entry<K,V>[] table = (Entry<K,V>[]) EMPTY_TABLE;
//元素个数
transient int size;
//阈值
int threshold;
//加载因子
final float loadFactor;
//修改次数
transient int modCount;
//针对string key 提供一个新的hash算法减少冲突
static final int ALTERNATIVE_HASHING_THRESHOLD_DEFAULT = Integer.MAX_VALUE;
Entry
final int hash;
final K key;
V value;
Node<K,V> next;
创建
public HashMap(int initialCapacity, float loadFactor) {
if (initialCapacity < 0)
throw new IllegalArgumentException("Illegal initial capacity: " +
initialCapacity);
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new IllegalArgumentException("Illegal load factor: " +
loadFactor);
this.loadFactor = loadFactor;
threshold = initialCapacity;
init();
}
public HashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR);
}
public HashMap() {
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
}
//上面的构造器并没有执行桶的初始化。而是在第一次put时初始化
//只有这个需要初始化
public HashMap(Map<? extends K, ? extends V> m) {
this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR);
//初始化桶
inflateTable(threshold);
//塞数据
putAllForCreate(m);
}
来看下初始化桶的操作
//初始化桶
private void inflateTable(int toSize) {
// Find a power of 2 >= toSize
//把size设置为2的n次
int capacity = roundUpToPowerOf2(toSize);
//设置阈值
threshold = (int) Math.min(capacity * loadFactor, MAXIMUM_CAPACITY + 1);
//创建
table = new Entry[capacity];
//这个方法在每次扩容的时候都会执行,
//可以把它看成一个开关,如果开关打开,并且key的类型是String时可以采取sun.misc.Hashing.stringHash32方法获取其hash值。
/在JDK 8 中,hashSeed已经被移除掉了,移除掉的原因是调用sun.misc.Hashing.randomHashSeed计算hashSeed时会调用方法java.util.Random.nextInt(),该方法使用AtomicLong,在多线程情况下会有性能问题。
initHashSeedAsNeeded(capacity);
}
至于 putAllForCreate 看下 挺简单的
private void putAllForCreate(Map<? extends K, ? extends V> m) {
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
putForCreate(e.getKey(), e.getValue());
}
private void putForCreate(K key, V value) {
int hash = null == key ? 0 : hash(key);
int i = indexFor(hash, table.length);
/**
* Look for preexisting entry for key. This will never happen for
* clone or deserialize. It will only happen for construction if the
* input Map is a sorted map whose ordering is inconsistent w/ equals.
*/
for (Entry<K,V> e = table[i]; e != null; e = e.next) {
Object k;
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k)))) {
e.value = value;
return;
}
}
createEntry(hash, key, value, i);
}
有对应的key就直接替换value,没有就创建一个新的entry
put
public V put(K key, V value) {
//如果这个链表数组为空,就初始化一下
if (table == EMPTY_TABLE) {
inflateTable(threshold);
}
//null key 塞在0号链表上
if (key == null)
return putForNullKey(value);
//取到对应的索引,遍历他的链表,如果有这个key,那就替换value,否则就创建一个新的entry
int hash = hash(key);
int i = indexFor(hash, table.length);
for (Entry<K,V> e = table[i]; e != null; e = e.next) {
Object k;
if (e.hash == hash && ((k = e.key) == key || key.equals(k))) {
V oldValue = e.value;
e.value = value;
e.recordAccess(this);
return oldValue;
}
}
modCount++;
addEntry(hash, key, value, i);
return null;
}
//看一下addEntry
void addEntry(int hash, K key, V value, int bucketIndex) {
//如果容量上限那就要resize
if ((size >= threshold) && (null != table[bucketIndex])) {
resize(2 * table.length);
hash = (null != key) ? hash(key) : 0;
bucketIndex = indexFor(hash, table.length);
}
//创建新的entry
createEntry(hash, key, value, bucketIndex);
}
关键要看下扩容时如何实现的
void resize(int newCapacity) {
Entry[] oldTable = table;
int oldCapacity = oldTable.length;
if (oldCapacity == MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return;
}
Entry[] newTable = new Entry[newCapacity];
transfer(newTable, initHashSeedAsNeeded(newCapacity));
table = newTable;
threshold = (int)Math.min(newCapacity * loadFactor, MAXIMUM_CAPACITY + 1);
}
核心方法在transfer
void transfer(Entry[] newTable, boolean rehash) {
int newCapacity = newTable.length;
for (Entry<K,V> e : table) {
while(null != e) {
Entry<K,V> next = e.next;
if (rehash) {
e.hash = null == e.key ? 0 : hash(e.key);
}
//根据hash值和新的容量重新计算位置
int i = indexFor(e.hash, newCapacity);
e.next = newTable[i];
newTable[i] = e;
e = next;
}
}
}
get
public V get(Object key) {
if (key == null)
return getForNullKey();
Entry<K,V> entry = getEntry(key);
return null == entry ? null : entry.getValue();
}
final Entry<K,V> getEntry(Object key) {
if (size == 0) {
return null;
}
//根据hash值取到对应的index
int hash = (key == null) ? 0 : hash(key);
for (Entry<K,V> e = table[indexFor(hash, table.length)];
e != null;
e = e.next) {
//在遍历这个链表
Object k;
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
return e;
}
return null;
}
区别
JDK1.8下的HashMap较1.7做了很多优化:
1、由 数组+链表 到 数组+链表+红黑树。
【当链表的长度大于等于8时,会转成红黑树?为什么。因为长度8的链表查询一个元素最多需要4次,而一个8个元素的红黑树最多只需要查询3次。
当红黑树数据小于等于6时,会转回链表。因为小于等于6的链表查询已经很快了,如果依旧使用红黑树,它的左旋右旋操作反而会使性能降低。
其次,选择6和8,中间还有7,可以防止 频繁的在链表与红黑树之间变换。
】
2、实现了高效的Hash算法,用&取代%,在效果一样的前提下保证了高效,
整个过程本质上就是三步:
拿到key的hashCode值
将hashCode的高位参与运算,重新计算hash值
将计算出来的hash值与(table.length - 1)进行&运算
这种实现方法保证了扩容后 原桶的数据要么在原来的位置,要么在2*原桶的位置
3、JDK1.7下的HashMap在多线程并发put ,resize时会出现循环链表的情况。导致get出现死循环,
在1.8下不会出现。原理是:
声明两对指针,维护两个连链表【原桶 2*原桶】
依次在末端添加新的元素。(在多线程操作的情况下,无非是第二个线程重复第一个线程一模一样的操作)
1.8源码深入
这里就简单看下 resize和hash
final Node<K,V>[] resize() {
Node<K,V>[] oldTab = table;
int oldCap = (oldTab == null) ? 0 : oldTab.length;
int oldThr = threshold;
int newCap, newThr = 0;
if (oldCap > 0) {
if (oldCap >= MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return oldTab;
}
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
oldCap >= DEFAULT_INITIAL_CAPACITY)
newThr = oldThr << 1; // double threshold
}
else if (oldThr > 0) // initial capacity was placed in threshold
newCap = oldThr;
else { // zero initial threshold signifies using defaults
newCap = DEFAULT_INITIAL_CAPACITY;
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
}
if (newThr == 0) {
float ft = (float)newCap * loadFactor;
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
(int)ft : Integer.MAX_VALUE);
}
threshold = newThr;
@SuppressWarnings({"rawtypes","unchecked"})
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
table = newTab;
if (oldTab != null) {
for (int j = 0; j < oldCap; ++j) {
Node<K,V> e;
if ((e = oldTab[j]) != null) {
oldTab[j] = null;
if (e.next == null)
newTab[e.hash & (newCap - 1)] = e;
else if (e instanceof TreeNode)
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
else { // preserve order
Node<K,V> loHead = null, loTail = null;
Node<K,V> hiHead = null, hiTail = null;
Node<K,V> next;
do {
next = e.next;
if ((e.hash & oldCap) == 0) {
if (loTail == null)
loHead = e;
else
loTail.next = e;
loTail = e;
}
else {
if (hiTail == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
}
} while ((e = next) != null);
if (loTail != null) {
loTail.next = null;
newTab[j] = loHead;
}
if (hiTail != null) {
//关键的就是这个next置空,有效杜绝了循环链表
hiTail.next = null;
//将这个链表放到对应桶
newTab[j + oldCap] = hiHead;
}
}
}
}
}
return newTab;
}
static final int hash(Object key) {
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
}
final Node<K,V> getNode(int hash, Object key) {
Node<K,V>[] tab; Node<K,V> first, e; int n; K k;
if ((tab = table) != null && (n = tab.length) > 0 &&
//关注 如何取到桶索引的代码:first = tab[(n - 1) & hash])
(first = tab[(n - 1) & hash]) != null) {
if (first.hash == hash && // always check first node
((k = first.key) == key || (key != null && key.equals(k))))
return first;
if ((e = first.next) != null) {
if (first instanceof TreeNode)
return ((TreeNode<K,V>)first).getTreeNode(hash, key);
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
return e;
} while ((e = e.next) != null);
}
}
return null;
}
假设桶的长度为16,取一个key的桶位置,如图:
这篇博客讲解1.8的HashMap非常详细
https://blog.csdn.net/v123411739/article/details/78996181