数据结构
JDK1.8ConcurrentHashMap采纳数组+单链表+红黑树的数据结构,数组和链表存储的是一个个Node对象,红黑树存储的是TreeNode对象
static class Node<K,V> implements Map.Entry<K,V> { final int hash; final K key; volatile V val; volatile Node<K,V> next; } static final class TreeNode<K,V> extends Node<K,V> { TreeNode<K,V> parent; // red-black tree links TreeNode<K,V> left; TreeNode<K,V> right; TreeNode<K,V> prev; // needed to unlink next upon deletion boolean red; TreeNode(int hash, K key, V val, Node<K,V> next, TreeNode<K,V> parent) { super(hash, key, val, next); this.parent = parent; } }
罕用办法
应用
源码剖析
次要属性
//最大容量static final int MAXIMUM_CAPACITY = 1 << 30;//默认容量static final int DEFAULT_INITIAL_CAPACITY = 16;//数组最大容量static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;//加载因子static final float DEFAULT_LOAD_FACTOR = 0.75f;// 链表的树化阈值,即链表转成红黑树的阈值,当Node链表长度>该值时,则将链表转换成红黑树static final int TREEIFY_THRESHOLD = 8; // 链表的还原阈值,即红黑树转为链表的阈值,当在扩容时,HashMap的数据存储地位会从新计算,在从新计算存储地位后,当红黑树内TreeNode数量 < 6时,则将 红黑树转换成链表static final int UNTREEIFY_THRESHOLD = 6;// 最小链表树化容量阈值,即 当Node数组长度 > 该值时,才容许树形化链表,否则则间接扩容,而不是树形化static final int MIN_TREEIFY_CAPACITY = 64;
构造方法
public ConcurrentHashMap() { } public ConcurrentHashMap(int initialCapacity) { if (initialCapacity < 0) throw new IllegalArgumentException(); int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); this.sizeCtl = cap; } public ConcurrentHashMap(Map<? extends K, ? extends V> m) { this.sizeCtl = DEFAULT_CAPACITY; putAll(m); } public ConcurrentHashMap(int initialCapacity, float loadFactor) { this(initialCapacity, loadFactor, 1); } public ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) { if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) throw new IllegalArgumentException(); if (initialCapacity < concurrencyLevel) // Use at least as many bins initialCapacity = concurrencyLevel; // as estimated threads long size = (long)(1.0 + (long)initialCapacity / loadFactor); int cap = (size >= (long)MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int)size); this.sizeCtl = cap; }
put()办法
public V put(K key, V value) { return putVal(key, value, false); } final V putVal(K key, V value, boolean onlyIfAbsent) { if (key == null || value == null) throw new NullPointerException(); int hash = spread(key.hashCode()); int binCount = 0; //死循环 for (Node<K,V>[] tab = table;;) { Node<K,V> f; int n, i, fh; //1.Node数组初始化 if (tab == null || (n = tab.length) == 0) tab = initTable(); //2.计算key寄存Node数组中的数组下标,判断这个数组下标Node数组上是否有Node存在 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { //2.1若不存在,阐明没有hash抵触,则示意以后地位能够写入数据,利用CAS尝试写入,失败则自旋保障胜利 if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value, null))) break; // no lock when adding to empty bin } //3.以后地位的hashcode==MOVED==-1,则进行扩容 else if ((fh = f.hash) == MOVED) tab = helpTransfer(tab, f); else { //4.存在hash抵触,利用synchronized锁锁住链表或者红黑树的头结点写入数据 V oldVal = null; synchronized (f) { if (tabAt(tab, i) == f) { //4.1以后是Node是链表 if (fh >= 0) { binCount = 1; //遍历以该Node为头结点的链表,判断该key是否已存在 for (Node<K,V> e = f;; ++binCount) { K ek; ////若该key已存在,则用新value替换旧value if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { oldVal = e.val; if (!onlyIfAbsent) e.val = value; break; } Node<K,V> pred = e; //若该key不存在,则将key-value增加到Node数组中,这里采纳尾插法 if ((e = e.next) == null) { pred.next = new Node<K,V>(hash, key, value, null); break; } } } //4.1以后是Node是红黑树 else if (f instanceof TreeBin) { Node<K,V> p; binCount = 2; ////向红黑树插入或更新数据(键值对),遍历红黑树判断该节点的key是否与传入key雷同,雷同则新value笼罩旧value,不雷同则插入 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, value)) != null) { oldVal = p.val; if (!onlyIfAbsent) p.val = value; } } } } //6.如果链表中的Node节点>8则须要转换为红黑树 if (binCount != 0) { if (binCount >= TREEIFY_THRESHOLD) treeifyBin(tab, i); if (oldVal != null) return oldVal; break; } } } addCount(1L, binCount); return null; }
sizeCtl值含意:
-1:示意正在初始化
-n:示意正在扩容
0:示意还未初始化,默认值
大于0:示意下一次扩容的阈值
initTable()办法
private final Node<K,V>[] initTable() { Node<K,V>[] tab; int sc; while ((tab = table) == null || tab.length == 0) { //若以后有其余线程正在初始化,则让出CPU执行权,而后自旋 if ((sc = sizeCtl) < 0) Thread.yield(); // lost initialization race; just spin //若以后有没有其余线程正在初始化,将sizeCtl置为-1,相当于拿到了锁 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { try { if ((tab = table) == null || tab.length == 0) { int n = (sc > 0) ? sc : DEFAULT_CAPACITY; //初始化数组大小为16 @SuppressWarnings("unchecked") Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; table = tab = nt; //下一次扩容的大小,0.75n,和以前的扩容阀值绝对应 sc = n - (n >>> 2); } } finally { sizeCtl = sc; } break; } } return tab; }
get()办法
public V get(Object key) { Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek; //计算key寄存Node数组中的数组下标,判断这个数组下标Node数组上是否有Node存在 int h = spread(key.hashCode()); if ((tab = table) != null && (n = tab.length) > 0 && (e = tabAt(tab, (n - 1) & h)) != null) { 1.在Node数组中找key相等的Node if ((eh = e.hash) == h) { if ((ek = e.key) == key || (ek != null && key.equals(ek))) return e.val; } //2.在红黑树中找key相等的Node else if (eh < 0) return (p = e.find(h, key)) != null ? p.val : null; //3.在链表中找key相等的Node while ((e = e.next) != null) { if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) return e.val; } } return null; }
论断
1.JDK1.8ConcurrentHashMap采纳数组+单链表+红黑树的数据结构,数组和链表存储的是一个个Node对象,红黑树存储的是TreeNode对象
2.采纳了CAS + synchronized来保障并发安全性
3.增加key-value时会依据key值计算出对应的hash值,依据hash值计算出对应的Node数组下标,判断这个数组下标Node数组上是否有Node存在,若不存在,阐明没有hash抵触,则示意以后地位能够写入数据,利用CAS尝试写入,失败则自旋保障胜利;若存在阐明有hash抵触,利用synchronized锁锁住链表或者红黑树的头结点写入数据
ConcurrentHashMap1.8与ConcurrentHashMap1.7的区别:
1.1.7采纳数组+链表,1.8采纳数据+链表+红黑树优化了查问速度
2.1.7采纳Segment分段锁,1.8采纳CAS + synchronized升高锁的粒度:JDK1.7版本锁的粒度是基于Segment的,蕴含多个HashEntry,而JDK1.8锁的粒度就是HashEntry