diff -r 000000000000 -r 588d5bf7a560 rt/emul/compact/src/main/java/java/util/concurrent/ConcurrentHashMap.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rt/emul/compact/src/main/java/java/util/concurrent/ConcurrentHashMap.java Thu Oct 03 15:40:35 2013 +0200 @@ -0,0 +1,1522 @@ +/* + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * This file is available under and governed by the GNU General Public + * License version 2 only, as published by the Free Software Foundation. + * However, the following notice accompanied the original version of this + * file: + * + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package java.util.concurrent; +import java.util.concurrent.locks.*; +import java.util.*; +import java.io.Serializable; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +/** + * A hash table supporting full concurrency of retrievals and + * adjustable expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * Hashtable. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with Hashtable in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including get) generally do not + * block, so may overlap with update operations (including + * put and remove). Retrievals reflect the results + * of the most recently completed update operations holding + * upon their onset. For aggregate operations such as putAll + * and clear, concurrent retrievals may reflect insertion or + * removal of only some entries. Similarly, Iterators and + * Enumerations return elements reflecting the state of the hash table + * at some point at or since the creation of the iterator/enumeration. + * They do not throw {@link ConcurrentModificationException}. + * However, iterators are designed to be used by only one thread at a time. + * + *

The allowed concurrency among update operations is guided by + * the optional concurrencyLevel constructor argument + * (default 16), which is used as a hint for internal sizing. The + * table is internally partitioned to try to permit the indicated + * number of concurrent updates without contention. Because placement + * in hash tables is essentially random, the actual concurrency will + * vary. Ideally, you should choose a value to accommodate as many + * threads as will ever concurrently modify the table. Using a + * significantly higher value than you need can waste space and time, + * and a significantly lower value can lead to thread contention. But + * overestimates and underestimates within an order of magnitude do + * not usually have much noticeable impact. A value of one is + * appropriate when it is known that only one thread will modify and + * all others will only read. Also, resizing this or any other kind of + * hash table is a relatively slow operation, so, when possible, it is + * a good idea to provide estimates of expected table sizes in + * constructors. + * + *

This class and its views and iterators implement all of the + * optional methods of the {@link Map} and {@link Iterator} + * interfaces. + * + *

Like {@link Hashtable} but unlike {@link HashMap}, this class + * does not allow null to be used as a key or value. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of keys maintained by this map + * @param the type of mapped values + */ +public class ConcurrentHashMap extends AbstractMap + implements ConcurrentMap, Serializable { + private static final long serialVersionUID = 7249069246763182397L; + + /* + * The basic strategy is to subdivide the table among Segments, + * each of which itself is a concurrently readable hash table. To + * reduce footprint, all but one segments are constructed only + * when first needed (see ensureSegment). To maintain visibility + * in the presence of lazy construction, accesses to segments as + * well as elements of segment's table must use volatile access, + * which is done via Unsafe within methods segmentAt etc + * below. These provide the functionality of AtomicReferenceArrays + * but reduce the levels of indirection. Additionally, + * volatile-writes of table elements and entry "next" fields + * within locked operations use the cheaper "lazySet" forms of + * writes (via putOrderedObject) because these writes are always + * followed by lock releases that maintain sequential consistency + * of table updates. + * + * Historical note: The previous version of this class relied + * heavily on "final" fields, which avoided some volatile reads at + * the expense of a large initial footprint. Some remnants of + * that design (including forced construction of segment 0) exist + * to ensure serialization compatibility. + */ + + /* ---------------- Constants -------------- */ + + /** + * The default initial capacity for this table, + * used when not otherwise specified in a constructor. + */ + static final int DEFAULT_INITIAL_CAPACITY = 16; + + /** + * The default load factor for this table, used when not + * otherwise specified in a constructor. + */ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + /** + * The default concurrency level for this table, used when not + * otherwise specified in a constructor. + */ + static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The maximum capacity, used if a higher value is implicitly + * specified by either of the constructors with arguments. MUST + * be a power of two <= 1<<30 to ensure that entries are indexable + * using ints. + */ + static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The minimum capacity for per-segment tables. Must be a power + * of two, at least two to avoid immediate resizing on next use + * after lazy construction. + */ + static final int MIN_SEGMENT_TABLE_CAPACITY = 2; + + /** + * The maximum number of segments to allow; used to bound + * constructor arguments. Must be power of two less than 1 << 24. + */ + static final int MAX_SEGMENTS = 1 << 16; // slightly conservative + + /** + * Number of unsynchronized retries in size and containsValue + * methods before resorting to locking. This is used to avoid + * unbounded retries if tables undergo continuous modification + * which would make it impossible to obtain an accurate result. + */ + static final int RETRIES_BEFORE_LOCK = 2; + + /* ---------------- Fields -------------- */ + + /** + * Mask value for indexing into segments. The upper bits of a + * key's hash code are used to choose the segment. + */ + final int segmentMask; + + /** + * Shift value for indexing within segments. + */ + final int segmentShift; + + /** + * The segments, each of which is a specialized hash table. + */ + final Segment[] segments; + + transient Set keySet; + transient Set> entrySet; + transient Collection values; + + /** + * ConcurrentHashMap list entry. Note that this is never exported + * out as a user-visible Map.Entry. + */ + static final class HashEntry { + final int hash; + final K key; + volatile V value; + volatile HashEntry next; + + HashEntry(int hash, K key, V value, HashEntry next) { + this.hash = hash; + this.key = key; + this.value = value; + this.next = next; + } + + /** + * Sets next field with volatile write semantics. (See above + * about use of putOrderedObject.) + */ + final void setNext(HashEntry n) { + UNSAFE.putOrderedObject(this, nextOffset, n); + } + + // Unsafe mechanics + static final sun.misc.Unsafe UNSAFE; + static final long nextOffset; + static { + try { + UNSAFE = sun.misc.Unsafe.getUnsafe(); + Class k = HashEntry.class; + nextOffset = UNSAFE.objectFieldOffset + (k.getDeclaredField("next")); + } catch (Exception e) { + throw new Error(e); + } + } + } + + /** + * Gets the ith element of given table (if nonnull) with volatile + * read semantics. Note: This is manually integrated into a few + * performance-sensitive methods to reduce call overhead. + */ + @SuppressWarnings("unchecked") + static final HashEntry entryAt(HashEntry[] tab, int i) { + return (tab == null) ? null : + (HashEntry) UNSAFE.getObjectVolatile + (tab, ((long)i << TSHIFT) + TBASE); + } + + /** + * Sets the ith element of given table, with volatile write + * semantics. (See above about use of putOrderedObject.) + */ + static final void setEntryAt(HashEntry[] tab, int i, + HashEntry e) { + UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e); + } + + /** + * Applies a supplemental hash function to a given hashCode, which + * defends against poor quality hash functions. This is critical + * because ConcurrentHashMap uses power-of-two length hash tables, + * that otherwise encounter collisions for hashCodes that do not + * differ in lower or upper bits. + */ + private static int hash(int h) { + // Spread bits to regularize both segment and index locations, + // using variant of single-word Wang/Jenkins hash. + h += (h << 15) ^ 0xffffcd7d; + h ^= (h >>> 10); + h += (h << 3); + h ^= (h >>> 6); + h += (h << 2) + (h << 14); + return h ^ (h >>> 16); + } + + /** + * Segments are specialized versions of hash tables. This + * subclasses from ReentrantLock opportunistically, just to + * simplify some locking and avoid separate construction. + */ + static final class Segment extends ReentrantLock implements Serializable { + /* + * Segments maintain a table of entry lists that are always + * kept in a consistent state, so can be read (via volatile + * reads of segments and tables) without locking. This + * requires replicating nodes when necessary during table + * resizing, so the old lists can be traversed by readers + * still using old version of table. + * + * This class defines only mutative methods requiring locking. + * Except as noted, the methods of this class perform the + * per-segment versions of ConcurrentHashMap methods. (Other + * methods are integrated directly into ConcurrentHashMap + * methods.) These mutative methods use a form of controlled + * spinning on contention via methods scanAndLock and + * scanAndLockForPut. These intersperse tryLocks with + * traversals to locate nodes. The main benefit is to absorb + * cache misses (which are very common for hash tables) while + * obtaining locks so that traversal is faster once + * acquired. We do not actually use the found nodes since they + * must be re-acquired under lock anyway to ensure sequential + * consistency of updates (and in any case may be undetectably + * stale), but they will normally be much faster to re-locate. + * Also, scanAndLockForPut speculatively creates a fresh node + * to use in put if no node is found. + */ + + private static final long serialVersionUID = 2249069246763182397L; + + /** + * The maximum number of times to tryLock in a prescan before + * possibly blocking on acquire in preparation for a locked + * segment operation. On multiprocessors, using a bounded + * number of retries maintains cache acquired while locating + * nodes. + */ + static final int MAX_SCAN_RETRIES = + Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; + + /** + * The per-segment table. Elements are accessed via + * entryAt/setEntryAt providing volatile semantics. + */ + transient volatile HashEntry[] table; + + /** + * The number of elements. Accessed only either within locks + * or among other volatile reads that maintain visibility. + */ + transient int count; + + /** + * The total number of mutative operations in this segment. + * Even though this may overflows 32 bits, it provides + * sufficient accuracy for stability checks in CHM isEmpty() + * and size() methods. Accessed only either within locks or + * among other volatile reads that maintain visibility. + */ + transient int modCount; + + /** + * The table is rehashed when its size exceeds this threshold. + * (The value of this field is always (int)(capacity * + * loadFactor).) + */ + transient int threshold; + + /** + * The load factor for the hash table. Even though this value + * is same for all segments, it is replicated to avoid needing + * links to outer object. + * @serial + */ + final float loadFactor; + + Segment(float lf, int threshold, HashEntry[] tab) { + this.loadFactor = lf; + this.threshold = threshold; + this.table = tab; + } + + final V put(K key, int hash, V value, boolean onlyIfAbsent) { + HashEntry node = tryLock() ? null : + scanAndLockForPut(key, hash, value); + V oldValue; + try { + HashEntry[] tab = table; + int index = (tab.length - 1) & hash; + HashEntry first = entryAt(tab, index); + for (HashEntry e = first;;) { + if (e != null) { + K k; + if ((k = e.key) == key || + (e.hash == hash && key.equals(k))) { + oldValue = e.value; + if (!onlyIfAbsent) { + e.value = value; + ++modCount; + } + break; + } + e = e.next; + } + else { + if (node != null) + node.setNext(first); + else + node = new HashEntry(hash, key, value, first); + int c = count + 1; + if (c > threshold && tab.length < MAXIMUM_CAPACITY) + rehash(node); + else + setEntryAt(tab, index, node); + ++modCount; + count = c; + oldValue = null; + break; + } + } + } finally { + unlock(); + } + return oldValue; + } + + /** + * Doubles size of table and repacks entries, also adding the + * given node to new table + */ + @SuppressWarnings("unchecked") + private void rehash(HashEntry node) { + /* + * Reclassify nodes in each list to new table. Because we + * are using power-of-two expansion, the elements from + * each bin must either stay at same index, or move with a + * power of two offset. We eliminate unnecessary node + * creation by catching cases where old nodes can be + * reused because their next fields won't change. + * Statistically, at the default threshold, only about + * one-sixth of them need cloning when a table + * doubles. The nodes they replace will be garbage + * collectable as soon as they are no longer referenced by + * any reader thread that may be in the midst of + * concurrently traversing table. Entry accesses use plain + * array indexing because they are followed by volatile + * table write. + */ + HashEntry[] oldTable = table; + int oldCapacity = oldTable.length; + int newCapacity = oldCapacity << 1; + threshold = (int)(newCapacity * loadFactor); + HashEntry[] newTable = + (HashEntry[]) new HashEntry[newCapacity]; + int sizeMask = newCapacity - 1; + for (int i = 0; i < oldCapacity ; i++) { + HashEntry e = oldTable[i]; + if (e != null) { + HashEntry next = e.next; + int idx = e.hash & sizeMask; + if (next == null) // Single node on list + newTable[idx] = e; + else { // Reuse consecutive sequence at same slot + HashEntry lastRun = e; + int lastIdx = idx; + for (HashEntry last = next; + last != null; + last = last.next) { + int k = last.hash & sizeMask; + if (k != lastIdx) { + lastIdx = k; + lastRun = last; + } + } + newTable[lastIdx] = lastRun; + // Clone remaining nodes + for (HashEntry p = e; p != lastRun; p = p.next) { + V v = p.value; + int h = p.hash; + int k = h & sizeMask; + HashEntry n = newTable[k]; + newTable[k] = new HashEntry(h, p.key, v, n); + } + } + } + } + int nodeIndex = node.hash & sizeMask; // add the new node + node.setNext(newTable[nodeIndex]); + newTable[nodeIndex] = node; + table = newTable; + } + + /** + * Scans for a node containing given key while trying to + * acquire lock, creating and returning one if not found. Upon + * return, guarantees that lock is held. UNlike in most + * methods, calls to method equals are not screened: Since + * traversal speed doesn't matter, we might as well help warm + * up the associated code and accesses as well. + * + * @return a new node if key not found, else null + */ + private HashEntry scanAndLockForPut(K key, int hash, V value) { + HashEntry first = entryForHash(this, hash); + HashEntry e = first; + HashEntry node = null; + int retries = -1; // negative while locating node + while (!tryLock()) { + HashEntry f; // to recheck first below + if (retries < 0) { + if (e == null) { + if (node == null) // speculatively create node + node = new HashEntry(hash, key, value, null); + retries = 0; + } + else if (key.equals(e.key)) + retries = 0; + else + e = e.next; + } + else if (++retries > MAX_SCAN_RETRIES) { + lock(); + break; + } + else if ((retries & 1) == 0 && + (f = entryForHash(this, hash)) != first) { + e = first = f; // re-traverse if entry changed + retries = -1; + } + } + return node; + } + + /** + * Scans for a node containing the given key while trying to + * acquire lock for a remove or replace operation. Upon + * return, guarantees that lock is held. Note that we must + * lock even if the key is not found, to ensure sequential + * consistency of updates. + */ + private void scanAndLock(Object key, int hash) { + // similar to but simpler than scanAndLockForPut + HashEntry first = entryForHash(this, hash); + HashEntry e = first; + int retries = -1; + while (!tryLock()) { + HashEntry f; + if (retries < 0) { + if (e == null || key.equals(e.key)) + retries = 0; + else + e = e.next; + } + else if (++retries > MAX_SCAN_RETRIES) { + lock(); + break; + } + else if ((retries & 1) == 0 && + (f = entryForHash(this, hash)) != first) { + e = first = f; + retries = -1; + } + } + } + + /** + * Remove; match on key only if value null, else match both. + */ + final V remove(Object key, int hash, Object value) { + if (!tryLock()) + scanAndLock(key, hash); + V oldValue = null; + try { + HashEntry[] tab = table; + int index = (tab.length - 1) & hash; + HashEntry e = entryAt(tab, index); + HashEntry pred = null; + while (e != null) { + K k; + HashEntry next = e.next; + if ((k = e.key) == key || + (e.hash == hash && key.equals(k))) { + V v = e.value; + if (value == null || value == v || value.equals(v)) { + if (pred == null) + setEntryAt(tab, index, next); + else + pred.setNext(next); + ++modCount; + --count; + oldValue = v; + } + break; + } + pred = e; + e = next; + } + } finally { + unlock(); + } + return oldValue; + } + + final boolean replace(K key, int hash, V oldValue, V newValue) { + if (!tryLock()) + scanAndLock(key, hash); + boolean replaced = false; + try { + HashEntry e; + for (e = entryForHash(this, hash); e != null; e = e.next) { + K k; + if ((k = e.key) == key || + (e.hash == hash && key.equals(k))) { + if (oldValue.equals(e.value)) { + e.value = newValue; + ++modCount; + replaced = true; + } + break; + } + } + } finally { + unlock(); + } + return replaced; + } + + final V replace(K key, int hash, V value) { + if (!tryLock()) + scanAndLock(key, hash); + V oldValue = null; + try { + HashEntry e; + for (e = entryForHash(this, hash); e != null; e = e.next) { + K k; + if ((k = e.key) == key || + (e.hash == hash && key.equals(k))) { + oldValue = e.value; + e.value = value; + ++modCount; + break; + } + } + } finally { + unlock(); + } + return oldValue; + } + + final void clear() { + lock(); + try { + HashEntry[] tab = table; + for (int i = 0; i < tab.length ; i++) + setEntryAt(tab, i, null); + ++modCount; + count = 0; + } finally { + unlock(); + } + } + } + + // Accessing segments + + /** + * Gets the jth element of given segment array (if nonnull) with + * volatile element access semantics via Unsafe. (The null check + * can trigger harmlessly only during deserialization.) Note: + * because each element of segments array is set only once (using + * fully ordered writes), some performance-sensitive methods rely + * on this method only as a recheck upon null reads. + */ + @SuppressWarnings("unchecked") + static final Segment segmentAt(Segment[] ss, int j) { + long u = (j << SSHIFT) + SBASE; + return ss == null ? null : + (Segment) UNSAFE.getObjectVolatile(ss, u); + } + + /** + * Returns the segment for the given index, creating it and + * recording in segment table (via CAS) if not already present. + * + * @param k the index + * @return the segment + */ + @SuppressWarnings("unchecked") + private Segment ensureSegment(int k) { + final Segment[] ss = this.segments; + long u = (k << SSHIFT) + SBASE; // raw offset + Segment seg; + if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) == null) { + Segment proto = ss[0]; // use segment 0 as prototype + int cap = proto.table.length; + float lf = proto.loadFactor; + int threshold = (int)(cap * lf); + HashEntry[] tab = (HashEntry[])new HashEntry[cap]; + if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) + == null) { // recheck + Segment s = new Segment(lf, threshold, tab); + while ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) + == null) { + if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) + break; + } + } + } + return seg; + } + + // Hash-based segment and entry accesses + + /** + * Get the segment for the given hash + */ + @SuppressWarnings("unchecked") + private Segment segmentForHash(int h) { + long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; + return (Segment) UNSAFE.getObjectVolatile(segments, u); + } + + /** + * Gets the table entry for the given segment and hash + */ + @SuppressWarnings("unchecked") + static final HashEntry entryForHash(Segment seg, int h) { + HashEntry[] tab; + return (seg == null || (tab = seg.table) == null) ? null : + (HashEntry) UNSAFE.getObjectVolatile + (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); + } + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the specified initial + * capacity, load factor and concurrency level. + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements. + * @param loadFactor the load factor threshold, used to control resizing. + * Resizing may be performed when the average number of elements per + * bin exceeds this threshold. + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation performs internal sizing + * to try to accommodate this many threads. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive. + */ + @SuppressWarnings("unchecked") + public ConcurrentHashMap(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + if (concurrencyLevel > MAX_SEGMENTS) + concurrencyLevel = MAX_SEGMENTS; + // Find power-of-two sizes best matching arguments + int sshift = 0; + int ssize = 1; + while (ssize < concurrencyLevel) { + ++sshift; + ssize <<= 1; + } + this.segmentShift = 32 - sshift; + this.segmentMask = ssize - 1; + if (initialCapacity > MAXIMUM_CAPACITY) + initialCapacity = MAXIMUM_CAPACITY; + int c = initialCapacity / ssize; + if (c * ssize < initialCapacity) + ++c; + int cap = MIN_SEGMENT_TABLE_CAPACITY; + while (cap < c) + cap <<= 1; + // create segments and segments[0] + Segment s0 = + new Segment(loadFactor, (int)(cap * loadFactor), + (HashEntry[])new HashEntry[cap]); + Segment[] ss = (Segment[])new Segment[ssize]; + UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0] + this.segments = ss; + } + + /** + * Creates a new, empty map with the specified initial capacity + * and load factor and with the default concurrencyLevel (16). + * + * @param initialCapacity The implementation performs internal + * sizing to accommodate this many elements. + * @param loadFactor the load factor threshold, used to control resizing. + * Resizing may be performed when the average number of elements per + * bin exceeds this threshold. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative or the load factor is nonpositive + * + * @since 1.6 + */ + public ConcurrentHashMap(int initialCapacity, float loadFactor) { + this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL); + } + + /** + * Creates a new, empty map with the specified initial capacity, + * and with default load factor (0.75) and concurrencyLevel (16). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative. + */ + public ConcurrentHashMap(int initialCapacity) { + this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + } + + /** + * Creates a new, empty map with a default initial capacity (16), + * load factor (0.75) and concurrencyLevel (16). + */ + public ConcurrentHashMap() { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + } + + /** + * Creates a new map with the same mappings as the given map. + * The map is created with a capacity of 1.5 times the number + * of mappings in the given map or 16 (whichever is greater), + * and a default load factor (0.75) and concurrencyLevel (16). + * + * @param m the map + */ + public ConcurrentHashMap(Map m) { + this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, + DEFAULT_INITIAL_CAPACITY), + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + putAll(m); + } + + /** + * Returns true if this map contains no key-value mappings. + * + * @return true if this map contains no key-value mappings + */ + public boolean isEmpty() { + /* + * Sum per-segment modCounts to avoid mis-reporting when + * elements are concurrently added and removed in one segment + * while checking another, in which case the table was never + * actually empty at any point. (The sum ensures accuracy up + * through at least 1<<31 per-segment modifications before + * recheck.) Methods size() and containsValue() use similar + * constructions for stability checks. + */ + long sum = 0L; + final Segment[] segments = this.segments; + for (int j = 0; j < segments.length; ++j) { + Segment seg = segmentAt(segments, j); + if (seg != null) { + if (seg.count != 0) + return false; + sum += seg.modCount; + } + } + if (sum != 0L) { // recheck unless no modifications + for (int j = 0; j < segments.length; ++j) { + Segment seg = segmentAt(segments, j); + if (seg != null) { + if (seg.count != 0) + return false; + sum -= seg.modCount; + } + } + if (sum != 0L) + return false; + } + return true; + } + + /** + * Returns the number of key-value mappings in this map. If the + * map contains more than Integer.MAX_VALUE elements, returns + * Integer.MAX_VALUE. + * + * @return the number of key-value mappings in this map + */ + public int size() { + // Try a few times to get accurate count. On failure due to + // continuous async changes in table, resort to locking. + final Segment[] segments = this.segments; + int size; + boolean overflow; // true if size overflows 32 bits + long sum; // sum of modCounts + long last = 0L; // previous sum + int retries = -1; // first iteration isn't retry + try { + for (;;) { + if (retries++ == RETRIES_BEFORE_LOCK) { + for (int j = 0; j < segments.length; ++j) + ensureSegment(j).lock(); // force creation + } + sum = 0L; + size = 0; + overflow = false; + for (int j = 0; j < segments.length; ++j) { + Segment seg = segmentAt(segments, j); + if (seg != null) { + sum += seg.modCount; + int c = seg.count; + if (c < 0 || (size += c) < 0) + overflow = true; + } + } + if (sum == last) + break; + last = sum; + } + } finally { + if (retries > RETRIES_BEFORE_LOCK) { + for (int j = 0; j < segments.length; ++j) + segmentAt(segments, j).unlock(); + } + } + return overflow ? Integer.MAX_VALUE : size; + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + public V get(Object key) { + Segment s; // manually integrate access methods to reduce overhead + HashEntry[] tab; + int h = hash(key.hashCode()); + long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; + if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null && + (tab = s.table) != null) { + for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile + (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); + e != null; e = e.next) { + K k; + if ((k = e.key) == key || (e.hash == h && key.equals(k))) + return e.value; + } + } + return null; + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return true if and only if the specified object + * is a key in this table, as determined by the + * equals method; false otherwise. + * @throws NullPointerException if the specified key is null + */ + @SuppressWarnings("unchecked") + public boolean containsKey(Object key) { + Segment s; // same as get() except no need for volatile value read + HashEntry[] tab; + int h = hash(key.hashCode()); + long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; + if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null && + (tab = s.table) != null) { + for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile + (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); + e != null; e = e.next) { + K k; + if ((k = e.key) == key || (e.hash == h && key.equals(k))) + return true; + } + } + return false; + } + + /** + * Returns true if this map maps one or more keys to the + * specified value. Note: This method requires a full internal + * traversal of the hash table, and so is much slower than + * method containsKey. + * + * @param value value whose presence in this map is to be tested + * @return true if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + // Same idea as size() + if (value == null) + throw new NullPointerException(); + final Segment[] segments = this.segments; + boolean found = false; + long last = 0; + int retries = -1; + try { + outer: for (;;) { + if (retries++ == RETRIES_BEFORE_LOCK) { + for (int j = 0; j < segments.length; ++j) + ensureSegment(j).lock(); // force creation + } + long hashSum = 0L; + int sum = 0; + for (int j = 0; j < segments.length; ++j) { + HashEntry[] tab; + Segment seg = segmentAt(segments, j); + if (seg != null && (tab = seg.table) != null) { + for (int i = 0 ; i < tab.length; i++) { + HashEntry e; + for (e = entryAt(tab, i); e != null; e = e.next) { + V v = e.value; + if (v != null && value.equals(v)) { + found = true; + break outer; + } + } + } + sum += seg.modCount; + } + } + if (retries > 0 && sum == last) + break; + last = sum; + } + } finally { + if (retries > RETRIES_BEFORE_LOCK) { + for (int j = 0; j < segments.length; ++j) + segmentAt(segments, j).unlock(); + } + } + return found; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + + * @param value a value to search for + * @return true if and only if some key maps to the + * value argument in this table as + * determined by the equals method; + * false otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the get method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") + public V put(K key, V value) { + Segment s; + if (value == null) + throw new NullPointerException(); + int hash = hash(key.hashCode()); + int j = (hash >>> segmentShift) & segmentMask; + if ((s = (Segment)UNSAFE.getObject // nonvolatile; recheck + (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment + s = ensureSegment(j); + return s.put(key, hash, value, false); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + @SuppressWarnings("unchecked") + public V putIfAbsent(K key, V value) { + Segment s; + if (value == null) + throw new NullPointerException(); + int hash = hash(key.hashCode()); + int j = (hash >>> segmentShift) & segmentMask; + if ((s = (Segment)UNSAFE.getObject + (segments, (j << SSHIFT) + SBASE)) == null) + s = ensureSegment(j); + return s.put(key, hash, value, true); + } + + /** + * Copies all of the mappings from the specified map to this one. + * These mappings replace any mappings that this map had for any of the + * keys currently in the specified map. + * + * @param m mappings to be stored in this map + */ + public void putAll(Map m) { + for (Map.Entry e : m.entrySet()) + put(e.getKey(), e.getValue()); + } + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key is null + */ + public V remove(Object key) { + int hash = hash(key.hashCode()); + Segment s = segmentForHash(hash); + return s == null ? null : s.remove(key, hash, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(Object key, Object value) { + int hash = hash(key.hashCode()); + Segment s; + return value != null && (s = segmentForHash(hash)) != null && + s.remove(key, hash, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(K key, V oldValue, V newValue) { + int hash = hash(key.hashCode()); + if (oldValue == null || newValue == null) + throw new NullPointerException(); + Segment s = segmentForHash(hash); + return s != null && s.replace(key, hash, oldValue, newValue); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + public V replace(K key, V value) { + int hash = hash(key.hashCode()); + if (value == null) + throw new NullPointerException(); + Segment s = segmentForHash(hash); + return s == null ? null : s.replace(key, hash, value); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + final Segment[] segments = this.segments; + for (int j = 0; j < segments.length; ++j) { + Segment s = segmentAt(segments, j); + if (s != null) + s.clear(); + } + } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from this map, + * via the Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or + * addAll operations. + * + *

The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set keySet() { + Set ks = keySet; + return (ks != null) ? ks : (keySet = new KeySet()); + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. The collection + * supports element removal, which removes the corresponding + * mapping from this map, via the Iterator.remove, + * Collection.remove, removeAll, + * retainAll, and clear operations. It does not + * support the add or addAll operations. + * + *

The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Collection values() { + Collection vs = values; + return (vs != null) ? vs : (values = new Values()); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. The set supports element + * removal, which removes the corresponding mapping from the map, + * via the Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or + * addAll operations. + * + *

The view's iterator is a "weakly consistent" iterator + * that will never throw {@link ConcurrentModificationException}, + * and guarantees to traverse elements as they existed upon + * construction of the iterator, and may (but is not guaranteed to) + * reflect any modifications subsequent to construction. + */ + public Set> entrySet() { + Set> es = entrySet; + return (es != null) ? es : (entrySet = new EntrySet()); + } + + /** + * Returns an enumeration of the keys in this table. + * + * @return an enumeration of the keys in this table + * @see #keySet() + */ + public Enumeration keys() { + return new KeyIterator(); + } + + /** + * Returns an enumeration of the values in this table. + * + * @return an enumeration of the values in this table + * @see #values() + */ + public Enumeration elements() { + return new ValueIterator(); + } + + /* ---------------- Iterator Support -------------- */ + + abstract class HashIterator { + int nextSegmentIndex; + int nextTableIndex; + HashEntry[] currentTable; + HashEntry nextEntry; + HashEntry lastReturned; + + HashIterator() { + nextSegmentIndex = segments.length - 1; + nextTableIndex = -1; + advance(); + } + + /** + * Set nextEntry to first node of next non-empty table + * (in backwards order, to simplify checks). + */ + final void advance() { + for (;;) { + if (nextTableIndex >= 0) { + if ((nextEntry = entryAt(currentTable, + nextTableIndex--)) != null) + break; + } + else if (nextSegmentIndex >= 0) { + Segment seg = segmentAt(segments, nextSegmentIndex--); + if (seg != null && (currentTable = seg.table) != null) + nextTableIndex = currentTable.length - 1; + } + else + break; + } + } + + final HashEntry nextEntry() { + HashEntry e = nextEntry; + if (e == null) + throw new NoSuchElementException(); + lastReturned = e; // cannot assign until after null check + if ((nextEntry = e.next) == null) + advance(); + return e; + } + + public final boolean hasNext() { return nextEntry != null; } + public final boolean hasMoreElements() { return nextEntry != null; } + + public final void remove() { + if (lastReturned == null) + throw new IllegalStateException(); + ConcurrentHashMap.this.remove(lastReturned.key); + lastReturned = null; + } + } + + final class KeyIterator + extends HashIterator + implements Iterator, Enumeration + { + public final K next() { return super.nextEntry().key; } + public final K nextElement() { return super.nextEntry().key; } + } + + final class ValueIterator + extends HashIterator + implements Iterator, Enumeration + { + public final V next() { return super.nextEntry().value; } + public final V nextElement() { return super.nextEntry().value; } + } + + /** + * Custom Entry class used by EntryIterator.next(), that relays + * setValue changes to the underlying map. + */ + final class WriteThroughEntry + extends AbstractMap.SimpleEntry + { + WriteThroughEntry(K k, V v) { + super(k,v); + } + + /** + * Set our entry's value and write through to the map. The + * value to return is somewhat arbitrary here. Since a + * WriteThroughEntry does not necessarily track asynchronous + * changes, the most recent "previous" value could be + * different from what we return (or could even have been + * removed in which case the put will re-establish). We do not + * and cannot guarantee more. + */ + public V setValue(V value) { + if (value == null) throw new NullPointerException(); + V v = super.setValue(value); + ConcurrentHashMap.this.put(getKey(), value); + return v; + } + } + + final class EntryIterator + extends HashIterator + implements Iterator> + { + public Map.Entry next() { + HashEntry e = super.nextEntry(); + return new WriteThroughEntry(e.key, e.value); + } + } + + final class KeySet extends AbstractSet { + public Iterator iterator() { + return new KeyIterator(); + } + public int size() { + return ConcurrentHashMap.this.size(); + } + public boolean isEmpty() { + return ConcurrentHashMap.this.isEmpty(); + } + public boolean contains(Object o) { + return ConcurrentHashMap.this.containsKey(o); + } + public boolean remove(Object o) { + return ConcurrentHashMap.this.remove(o) != null; + } + public void clear() { + ConcurrentHashMap.this.clear(); + } + } + + final class Values extends AbstractCollection { + public Iterator iterator() { + return new ValueIterator(); + } + public int size() { + return ConcurrentHashMap.this.size(); + } + public boolean isEmpty() { + return ConcurrentHashMap.this.isEmpty(); + } + public boolean contains(Object o) { + return ConcurrentHashMap.this.containsValue(o); + } + public void clear() { + ConcurrentHashMap.this.clear(); + } + } + + final class EntrySet extends AbstractSet> { + public Iterator> iterator() { + return new EntryIterator(); + } + public boolean contains(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry e = (Map.Entry)o; + V v = ConcurrentHashMap.this.get(e.getKey()); + return v != null && v.equals(e.getValue()); + } + public boolean remove(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry e = (Map.Entry)o; + return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()); + } + public int size() { + return ConcurrentHashMap.this.size(); + } + public boolean isEmpty() { + return ConcurrentHashMap.this.isEmpty(); + } + public void clear() { + ConcurrentHashMap.this.clear(); + } + } + + /* ---------------- Serialization Support -------------- */ + + /** + * Save the state of the ConcurrentHashMap instance to a + * stream (i.e., serialize it). + * @param s the stream + * @serialData + * the key (Object) and value (Object) + * for each key-value mapping, followed by a null pair. + * The key-value mappings are emitted in no particular order. + */ + private void writeObject(java.io.ObjectOutputStream s) throws IOException { + // force all segments for serialization compatibility + for (int k = 0; k < segments.length; ++k) + ensureSegment(k); + s.defaultWriteObject(); + + final Segment[] segments = this.segments; + for (int k = 0; k < segments.length; ++k) { + Segment seg = segmentAt(segments, k); + seg.lock(); + try { + HashEntry[] tab = seg.table; + for (int i = 0; i < tab.length; ++i) { + HashEntry e; + for (e = entryAt(tab, i); e != null; e = e.next) { + s.writeObject(e.key); + s.writeObject(e.value); + } + } + } finally { + seg.unlock(); + } + } + s.writeObject(null); + s.writeObject(null); + } + + /** + * Reconstitute the ConcurrentHashMap instance from a + * stream (i.e., deserialize it). + * @param s the stream + */ + @SuppressWarnings("unchecked") + private void readObject(java.io.ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + + // Re-initialize segments to be minimally sized, and let grow. + int cap = MIN_SEGMENT_TABLE_CAPACITY; + final Segment[] segments = this.segments; + for (int k = 0; k < segments.length; ++k) { + Segment seg = segments[k]; + if (seg != null) { + seg.threshold = (int)(cap * seg.loadFactor); + seg.table = (HashEntry[]) new HashEntry[cap]; + } + } + + // Read the keys and values, and put the mappings in the table + for (;;) { + K key = (K) s.readObject(); + V value = (V) s.readObject(); + if (key == null) + break; + put(key, value); + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long SBASE; + private static final int SSHIFT; + private static final long TBASE; + private static final int TSHIFT; + + static { + int ss, ts; + try { + UNSAFE = sun.misc.Unsafe.getUnsafe(); + Class tc = HashEntry[].class; + Class sc = Segment[].class; + TBASE = UNSAFE.arrayBaseOffset(tc); + SBASE = UNSAFE.arrayBaseOffset(sc); + ts = UNSAFE.arrayIndexScale(tc); + ss = UNSAFE.arrayIndexScale(sc); + } catch (Exception e) { + throw new Error(e); + } + if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0) + throw new Error("data type scale not a power of two"); + SSHIFT = 31 - Integer.numberOfLeadingZeros(ss); + TSHIFT = 31 - Integer.numberOfLeadingZeros(ts); + } + +}