共计 11355 个字符,预计需要花费 29 分钟才能阅读完成。
一, 什么是 ConcurrentHashMap
ConcurrentHashMap 和 HashMap 一样是一个用来存储键值对 <key,value> 的汇合类, 但和 HashMap 不同的是 ConcurrentHashMap 是线程平安的, 也就是多个线程同时对 ConcurrentHashMap 进行批改或者删除减少操作不会呈现数据谬误的问题.
二, 实现原理
和 HashMap 一样采纳数组 + 链表 + 红黑树实现
但和 HashMap 不同的是, 数组中存储的节点类型有所增加, 包含 Node<key,value>,TreeNode<key,value>,ForwardingNode<key,value>, 新增这个节点的目标就是为了线程并发帮助扩容时应用
三, 根本属性介绍
//01111111111111111111111111111111 该值能够保障计算出来的哈希值为负数 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
// 该属性用在扩容时生成一个负值, 示意正在扩容 //The number of bits used for generation stamp in sizeCtl. //sizeCtl 中用于生成戳记的位数。//Must be at least 6 for 32bit arrays. // 对于 32 位数组,必须至多为 6。private static int RESIZE_STAMP_BITS = 16;
// 和下面一样, 也是为了在扩容时生成一个负值, 具体在代码中解释 //The bit shift for recording size stamp in sizeCtl. // 在 sizeCtl 中记录大小戳的位移位。private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
// 示意以后桶位正在被迁徙 //Encodings for Node hash fields. See above for explanation. static final int MOVED = -1;
// 示意以后桶是以树来存储节点的 static final int TREEBIN = -2;
//Number of CPUS, to place bounds on some sizings //cpu 的数量, 用来计算元素数量时限度 CounterCell 数组大小 static final int NCPU = Runtime.getRuntime().availableProcessors();
/** * The next table to use; non-null only while resizing. * 用来扩容的哈希表 */ private transient volatile Node<K, V>[] nextTable;
/** * Base counter value, used mainly when there is no contention, but also as a fallback during table initialization * races. Updated via CAS. * 哈希表元素数量, 通过 longAdder 来保护 */ private transient volatile long baseCount;
/** * Table initialization and resizing control. * 哈希表初始化和扩容大小管制. * When negative, the table is being initialized or resized: * 当这个值为正数时, 示意哈希表正在初始化或从新计算大小 * -1 for initialization, * -1 示意正在初始化了 * else -(1 + the number of active resizing threads). * 示意哈希表正在扩容,-(1+n), 示意此时有 n 个线程正在共同完成哈希表的扩容 * Otherwise, when table is null, holds the initial table size to use upon creation,or 0 for default. * 否则, 当哈希表为空时, 保留要创立哈希表的大小 0 或默认 (16) * After initialization, holds the next element count value upon which to resize the table. * 初始化实现之后, 保留下一次须要扩容的阈值 */ private transient volatile int sizeCtl;
/** * The next table index (plus one) to split while resizing. * 扩容时的以后转移下标 */ private transient volatile int transferIndex;
/** * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. * 获取计算汇合元素容量的 CounterCell 对象的锁 */ private transient volatile int cellsBusy;
/** * Table of counter cells. When non-null, size is a power of 2. * 计算元素数量的数组 */ private transient volatile CounterCell[] counterCells;
四, 构造函数
/**
* 和 HashMap 构造函数不同的是, 数组容量的计算总是大于传入容量的 2 的幂
* 即如果传入 32 则数组初始容量为 64, 而不是 32, 而 HashMap 计算出来为 32
*/
public ConcurrentHashMap(int initialCapacity) {if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY : tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
五, 罕用办法介绍
/**
* 这个办法就是 HashMap 中的 hash 办法, 用来计算哈希值
*/
static final int spread(int h) {return (h ^ (h >>> 16)) & HASH_BITS;
}
获取节点
public V get(Object key) {Node<K, V>[] tab;
Node<K, V> e, p;
int n, eh;
K ek;
// 计算散列值
int h = spread(key.hashCode());
// 计算下标 (这一块同 HashMap 不再赘述)
if ((tab = table) != null && (n = tab.length) > 0 && (e = tabAt(tab, (n - 1) & h)) != null) {if ((eh = e.hash) == h) {if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
} else if (eh < 0)
// 哈希值小于 0, 示意为树节点, 从树中寻找, 这一步和 HashMap 统一
return (p = e.find(h, key)) != null ? p.val : null;
// 在链表中寻找
while ((e = e.next) != null) {if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
插入节点
public V put(K key, V value) {return putVal(key, value, false);
}
final V putVal(K key, V value, boolean onlyIfAbsent) {if (key == null || value == null) throw new NullPointerException();
// 计算哈希值
int hash = spread(key.hashCode());
// 插入桶的节点数量
int binCount = 0;
// 应用死循环, 目标是可能有的线程正在帮助扩容, 之后还须要插入或者更新, 或者须要操作的节点所在的桶曾经被其余线程锁定, 须要期待其余线程执行完之后再执行
for (Node<K, V>[] tab = table; ; ) {
Node<K, V> f;
int n, i, fh;
if (tab == null || (n = tab.length) == 0)
// 初始化哈希表
tab = initTable();
else if
// 计算下标, 并且计算该下标是否有元素
((f = tabAt(tab, i = (n - 1) & hash)) == null) {
//cas 插入, 这一步不须要锁, 因为以后桶为空
if (casTabAt(tab, i, null, new Node<K, V>(hash, key, value, null)))
break;
} else if
// 代表以后节点曾经被挪动, 正在扩容, 须要以后线程帮助扩容
((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
// 锁住头节点, 保障所有线程的插入都是线程平安的
synchronized (f) {
// 这一步判断的起因是, 可能插入元素之后会造成链表树化, 须要插入的地位曾经产生了变动
if (tabAt(tab, i) == f) {if (fh >= 0) {
// 以后桶上有多少个节点
binCount = 1;
for (Node<K, V> e = f; ; ++binCount) {
K ek;
// 查找到了 key 雷同的节点, 间接批改值并返回旧的值
if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K, V> pred = e;
// 没有找到雷同的 key, 间接向链表尾部插入节点
if ((e = e.next) == null) {pred.next = new Node<K, V>(hash, key, value, null);
break;
}
}
} else if (f instanceof TreeBin) {
// 给树外面插入节点
Node<K, V> p;
binCount = 2;
if ((p = ((TreeBin<K, V>) f).putTreeVal(hash, key, value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
// 须要树化
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
// 批改旧值, 间接将旧值返回
if (oldVal != null)
return oldVal;
break;
}
}
}
// 计算节点数量
addCount(1L, binCount);
return null;
}
初始化哈希表
private final Node<K, V>[] initTable() {Node<K, V>[] tab;
int sc;
while ((tab = table) == null || tab.length == 0) {
// 小于 0 示意正在初始化或者正在扩容, 让出 cpu
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
else if
// 判断 sc 是否与 SIZECTL 是否相等, 如果相等, 则将 SIZECTL 设置为 -1, 示意以后正在初始化 (只有一个线程能进行此操作, 其余线程会被挡在后面的判断上)
(U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
// 避免有线程曾经初始化了 1
if ((tab = table) == null || tab.length == 0) {
// 该 sc 如果在结构器上传入了, 则会被计算为大于其的 2 次幂, 否则会依照默认值初始化
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node<K, V>[] nt = (Node<K, V>[]) new Node<?, ?>[n];
table = tab = nt;
// 设置下一次扩容的阈值 n - (n >>> 2) = n - n / 4 = (3 / 4) * n = 0.75n, 即下一次的扩容阈值为以后哈希表数量的 0.75*n
sc = n - (n >>> 2);
}
} finally {
// 设置 sizeCtl 为 -1, 示意初始化动作曾经有线程在执行了
sizeCtl = sc;
}
break;
}
}
return tab;
}
计算节点数量
private final void addCount(long x, int check) {CounterCell[] as;
long b, s;
/*
* 保护数组长度
*/
// 尝试 cas 间接批改值, 如果批改失败
if ((as = counterCells) != null || !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
CounterCell a;
long v;
int m;
boolean uncontended = true;
// 数组为空或者长度小于 0 或者对应的地位为空或者间接批改数组对应地位上的值失败, 则进行批改操作
if (as == null || (m = as.length - 1) < 0 || (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
!(uncontended = U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {fullAddCount(x, uncontended);
return;
}
// 桶上的节点数量小于等于 1, 不须要判断扩容, 间接退出
if (check <= 1)
return;
// 获取以后数组的元素数量
s = sumCount();}
/*
* 判断是否须要扩容
*/
if (check >= 0) {Node<K, V>[] tab, nt;
int n, sc;
// 以后节点数量大于扩容阈值, 并且数组不为空并且数组长度小于最大值则须要扩容
while (s >= (long) (sc = sizeCtl) && (tab = table) != null && (n = tab.length) < MAXIMUM_CAPACITY) {
// 获取一个负值
int rs = resizeStamp(n);
// 如果 sc 小于 0, 阐明正在扩容, 须要帮助扩容
if (sc < 0) {
// 判断扩容是否实现
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS || (nt = nextTable) == null || transferIndex <= 0)
break;
// 帮助扩容, 这里 sc+ 1 代表新退出一个线程帮助扩容
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
} else if
/*
* 假如 rs = 00000000 00000000 10000000 00000000
* 将其向左移 16 位后果为 10000000 00000000 00000000 00000000 能够看出该值为负
* 这一步尝试将 sc 设置为正数
*/
(U.compareAndSwapInt(this, SIZECTL, sc, (rs << RESIZE_STAMP_SHIFT) + 2))
// 将旧数组置空, 外面会创立一个新的数组
transfer(tab, null);
// 计算汇合元素数量
s = sumCount();}
}
}
private final void fullAddCount(long x, boolean wasUncontended) {
int h;
// 获取以后线程的 hash 值
if ((h = ThreadLocalRandom.getProbe()) == 0) {ThreadLocalRandom.localInit(); // force initialization
h = ThreadLocalRandom.getProbe();
wasUncontended = true;
}
// 检测是否有抵触, 如果最初一个桶不为 null, 则为 true
boolean collide = false;
for (; ;) {CounterCell[] as;
CounterCell a;
int n;
long v;
// 数组如果不为空, 则优先对 CounterCell 外面的 counterCell 的 value 进行累加
if ((as = counterCells) != null && (n = as.length) > 0) {
// 以后地位为空
if ((a = as[(n - 1) & h]) == null) {
// 以后没有线程尝试批改该值
if (cellsBusy == 0) {CounterCell r = new CounterCell(x);
// 抢占批改的锁
if (cellsBusy == 0 && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean created = false;
try {CounterCell[] rs;
int m, j;
if ((rs = counterCells) != null && (m = rs.length) > 0 && rs[j = (m - 1) & h] == null) {rs[j] = r;
created = true;
}
} finally {
// 开释锁
cellsBusy = 0;
}
if (created)
break;
continue; // Slot is now non-empty
}
}
// 抢占失败
collide = false;
} else if
// 桶位不为空, 从新计算线程 hash 值, 持续循环
(!wasUncontended) // CAS already known to fail
wasUncontended = true; // Continue after rehash
/*
* 从新计算 hash 值之后, 对应的桶位还是不为空, 对 value 进行累加
* 尝试 cas 对 value 加值
*/
else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
break;
// 数组长度曾经大于等于 CPU 的核数了, 不须要再扩容了
else if (counterCells != as || n >= NCPU)
collide = false;
// 当没有抵触, 批改为有抵触, 从新计算 hash 值, 持续循环
else if (!collide)
collide = true;
else if
// 屡次循环没有设置胜利值, 则对原数组进行扩容
(cellsBusy == 0 && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
try {if (counterCells == as) {// Expand table unless stale
// 数组长度没有超过 cpu 核数, 将数组扩容两倍
CounterCell[] rs = new CounterCell[n << 1];
for (int i = 0; i < n; ++i)
// 扩容应用
rs[i] = as[i];
counterCells = rs;
}
} finally {cellsBusy = 0;}
collide = false;
continue; // Retry with expanded table
}
// 从新计算随机值
h = ThreadLocalRandom.advanceProbe(h);
} else if
// 初始进来数组为空, 须要初始化数组
(cellsBusy == 0 && counterCells == as && U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
boolean init = false;
try {if (counterCells == as) {CounterCell[] rs = new CounterCell[2];
rs[h & 1] = new CounterCell(x);
counterCells = rs;
init = true;
}
} finally {cellsBusy = 0;}
if (init)
break;
} else if
// 数组为空并且有其余线程正在创立数组, 尝试间接对 baseCount 进行累加
(U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
break; // Fall back on using base
}
}
扩容并迁徙
private final void transfer(Node<K, V>[] tab, Node<K, V>[] nextTab) {
//stride 示意迁徙数据的区间
int n = tab.length, stride;
/*
* 这里计算每个 CPU 负责迁徙元素的个数
* 如果这里的跨度区间小于 16, 则依照最小区间 16 来计算
*/
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE;
// 这里示意为第一个线程来扩容
if (nextTab == null) {
try {
// 扩容为两倍
@SuppressWarnings("unchecked")
Node<K, V>[] nt = (Node<K, V>[]) new Node<?, ?>[n << 1];
nextTab = nt;
} catch (Throwable ex) {
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
// 迁徙数据的 index
transferIndex = n;
}
// 新扩容数组的长度
int nextn = nextTab.length;
// 创立头节点, 该节点会被标识为 MOVED 示意数据正在迁徙中
ForwardingNode<K, V> fwd = new ForwardingNode<K, V>(nextTab);
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
// 从后向前迁徙
for (int i = 0, bound = 0; ;) {
Node<K, V> f;
int fh;
while (advance) {
int nextIndex, nextBound;
// 不属于本人的迁徙地位或者曾经迁徙实现间接退出
if (--i >= bound || finishing)
advance = false;
else if
// 下一个迁徙地位小于等于 0 间接退出
((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
} else if
// 计算迁徙地位 (多线程会划分多个区间)
(U.compareAndSwapInt(this, TRANSFERINDEX, nextIndex, nextBound = (nextIndex > stride ? nextIndex - stride : 0))) {
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
// 判断是否所有的线程都做完了工作
if (finishing) {
nextTable = null;
table = nextTab;
// 等于 0.75 * 2n, 也就是新数组扩容 2 倍 * 扩容因子
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
// 判断扩容是否胜利
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
} else if
// 如果以后地位为空, 直接插入 fwd 节点, 示意以后节点正在被迁徙
((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
else if ((fh = f.hash) == MOVED)
// 正在被迁徙, 须要从新计算地位
advance = true; // already processed
else {
// 迁徙代码
synchronized (f) {if (tabAt(tab, i) == f) {
Node<K, V> ln, hn;
if (fh >= 0) {
int runBit = fh & n;
Node<K, V> lastRun = f;
for (Node<K, V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
} else {
hn = lastRun;
ln = null;
}
for (Node<K, V> p = f; p != lastRun; p = p.next) {
int ph = p.hash;
K pk = p.key;
V pv = p.val;
if ((ph & n) == 0)
ln = new Node<K, V>(ph, pk, pv, ln);
else
hn = new Node<K, V>(ph, pk, pv, hn);
}
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
// 迁徙实现, 设置头节点为 fwd
setTabAt(tab, i, fwd);
advance = true;
} else if (f instanceof TreeBin) {TreeBin<K, V> t = (TreeBin<K, V>) f;
TreeNode<K, V> lo = null, loTail = null;
TreeNode<K, V> hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node<K, V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<K, V> p = new TreeNode<K, V>
(h, e.key, e.val, null, null);
if ((h & n) == 0) {if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
} else {if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin<K, V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin<K, V>(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
// 迁徙实现, 设置头节点为 fwd
setTabAt(tab, i, fwd);
// 从新计算地位持续迁徙
advance = true;
}
}
}
}
}
}
获取节点数量
public int size() {long n = sumCount();
return ((n < 0L) ? 0 : (n > (long) Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int) n);
}
/**
* 获取哈希表中节点的数量 (非线程平安)
* 这个办法返回的数据不肯定精确, 因为可能在调用该办法的时候, 有其余线程正在尝试给数组中的节点加值
*/
final long sumCount() {CounterCell[] as = counterCells;
CounterCell a;
long sum = baseCount;
if (as != null) {for (CounterCell counterCell : as) {if ((a = counterCell) != null)
sum += a.value;
}
}
return sum;
}
正文完