1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/rt/emul/compact/src/main/java/java/util/concurrent/ConcurrentHashMap.java Thu Oct 03 15:40:35 2013 +0200
1.3 @@ -0,0 +1,1522 @@
1.4 +/*
1.5 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
1.6 + *
1.7 + * This code is free software; you can redistribute it and/or modify it
1.8 + * under the terms of the GNU General Public License version 2 only, as
1.9 + * published by the Free Software Foundation. Oracle designates this
1.10 + * particular file as subject to the "Classpath" exception as provided
1.11 + * by Oracle in the LICENSE file that accompanied this code.
1.12 + *
1.13 + * This code is distributed in the hope that it will be useful, but WITHOUT
1.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1.15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
1.16 + * version 2 for more details (a copy is included in the LICENSE file that
1.17 + * accompanied this code).
1.18 + *
1.19 + * You should have received a copy of the GNU General Public License version
1.20 + * 2 along with this work; if not, write to the Free Software Foundation,
1.21 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
1.22 + *
1.23 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
1.24 + * or visit www.oracle.com if you need additional information or have any
1.25 + * questions.
1.26 + */
1.27 +
1.28 +/*
1.29 + * This file is available under and governed by the GNU General Public
1.30 + * License version 2 only, as published by the Free Software Foundation.
1.31 + * However, the following notice accompanied the original version of this
1.32 + * file:
1.33 + *
1.34 + * Written by Doug Lea with assistance from members of JCP JSR-166
1.35 + * Expert Group and released to the public domain, as explained at
1.36 + * http://creativecommons.org/publicdomain/zero/1.0/
1.37 + */
1.38 +
1.39 +package java.util.concurrent;
1.40 +import java.util.concurrent.locks.*;
1.41 +import java.util.*;
1.42 +import java.io.Serializable;
1.43 +import java.io.IOException;
1.44 +import java.io.ObjectInputStream;
1.45 +import java.io.ObjectOutputStream;
1.46 +
1.47 +/**
1.48 + * A hash table supporting full concurrency of retrievals and
1.49 + * adjustable expected concurrency for updates. This class obeys the
1.50 + * same functional specification as {@link java.util.Hashtable}, and
1.51 + * includes versions of methods corresponding to each method of
1.52 + * <tt>Hashtable</tt>. However, even though all operations are
1.53 + * thread-safe, retrieval operations do <em>not</em> entail locking,
1.54 + * and there is <em>not</em> any support for locking the entire table
1.55 + * in a way that prevents all access. This class is fully
1.56 + * interoperable with <tt>Hashtable</tt> in programs that rely on its
1.57 + * thread safety but not on its synchronization details.
1.58 + *
1.59 + * <p> Retrieval operations (including <tt>get</tt>) generally do not
1.60 + * block, so may overlap with update operations (including
1.61 + * <tt>put</tt> and <tt>remove</tt>). Retrievals reflect the results
1.62 + * of the most recently <em>completed</em> update operations holding
1.63 + * upon their onset. For aggregate operations such as <tt>putAll</tt>
1.64 + * and <tt>clear</tt>, concurrent retrievals may reflect insertion or
1.65 + * removal of only some entries. Similarly, Iterators and
1.66 + * Enumerations return elements reflecting the state of the hash table
1.67 + * at some point at or since the creation of the iterator/enumeration.
1.68 + * They do <em>not</em> throw {@link ConcurrentModificationException}.
1.69 + * However, iterators are designed to be used by only one thread at a time.
1.70 + *
1.71 + * <p> The allowed concurrency among update operations is guided by
1.72 + * the optional <tt>concurrencyLevel</tt> constructor argument
1.73 + * (default <tt>16</tt>), which is used as a hint for internal sizing. The
1.74 + * table is internally partitioned to try to permit the indicated
1.75 + * number of concurrent updates without contention. Because placement
1.76 + * in hash tables is essentially random, the actual concurrency will
1.77 + * vary. Ideally, you should choose a value to accommodate as many
1.78 + * threads as will ever concurrently modify the table. Using a
1.79 + * significantly higher value than you need can waste space and time,
1.80 + * and a significantly lower value can lead to thread contention. But
1.81 + * overestimates and underestimates within an order of magnitude do
1.82 + * not usually have much noticeable impact. A value of one is
1.83 + * appropriate when it is known that only one thread will modify and
1.84 + * all others will only read. Also, resizing this or any other kind of
1.85 + * hash table is a relatively slow operation, so, when possible, it is
1.86 + * a good idea to provide estimates of expected table sizes in
1.87 + * constructors.
1.88 + *
1.89 + * <p>This class and its views and iterators implement all of the
1.90 + * <em>optional</em> methods of the {@link Map} and {@link Iterator}
1.91 + * interfaces.
1.92 + *
1.93 + * <p> Like {@link Hashtable} but unlike {@link HashMap}, this class
1.94 + * does <em>not</em> allow <tt>null</tt> to be used as a key or value.
1.95 + *
1.96 + * <p>This class is a member of the
1.97 + * <a href="{@docRoot}/../technotes/guides/collections/index.html">
1.98 + * Java Collections Framework</a>.
1.99 + *
1.100 + * @since 1.5
1.101 + * @author Doug Lea
1.102 + * @param <K> the type of keys maintained by this map
1.103 + * @param <V> the type of mapped values
1.104 + */
1.105 +public class ConcurrentHashMap<K, V> extends AbstractMap<K, V>
1.106 + implements ConcurrentMap<K, V>, Serializable {
1.107 + private static final long serialVersionUID = 7249069246763182397L;
1.108 +
1.109 + /*
1.110 + * The basic strategy is to subdivide the table among Segments,
1.111 + * each of which itself is a concurrently readable hash table. To
1.112 + * reduce footprint, all but one segments are constructed only
1.113 + * when first needed (see ensureSegment). To maintain visibility
1.114 + * in the presence of lazy construction, accesses to segments as
1.115 + * well as elements of segment's table must use volatile access,
1.116 + * which is done via Unsafe within methods segmentAt etc
1.117 + * below. These provide the functionality of AtomicReferenceArrays
1.118 + * but reduce the levels of indirection. Additionally,
1.119 + * volatile-writes of table elements and entry "next" fields
1.120 + * within locked operations use the cheaper "lazySet" forms of
1.121 + * writes (via putOrderedObject) because these writes are always
1.122 + * followed by lock releases that maintain sequential consistency
1.123 + * of table updates.
1.124 + *
1.125 + * Historical note: The previous version of this class relied
1.126 + * heavily on "final" fields, which avoided some volatile reads at
1.127 + * the expense of a large initial footprint. Some remnants of
1.128 + * that design (including forced construction of segment 0) exist
1.129 + * to ensure serialization compatibility.
1.130 + */
1.131 +
1.132 + /* ---------------- Constants -------------- */
1.133 +
1.134 + /**
1.135 + * The default initial capacity for this table,
1.136 + * used when not otherwise specified in a constructor.
1.137 + */
1.138 + static final int DEFAULT_INITIAL_CAPACITY = 16;
1.139 +
1.140 + /**
1.141 + * The default load factor for this table, used when not
1.142 + * otherwise specified in a constructor.
1.143 + */
1.144 + static final float DEFAULT_LOAD_FACTOR = 0.75f;
1.145 +
1.146 + /**
1.147 + * The default concurrency level for this table, used when not
1.148 + * otherwise specified in a constructor.
1.149 + */
1.150 + static final int DEFAULT_CONCURRENCY_LEVEL = 16;
1.151 +
1.152 + /**
1.153 + * The maximum capacity, used if a higher value is implicitly
1.154 + * specified by either of the constructors with arguments. MUST
1.155 + * be a power of two <= 1<<30 to ensure that entries are indexable
1.156 + * using ints.
1.157 + */
1.158 + static final int MAXIMUM_CAPACITY = 1 << 30;
1.159 +
1.160 + /**
1.161 + * The minimum capacity for per-segment tables. Must be a power
1.162 + * of two, at least two to avoid immediate resizing on next use
1.163 + * after lazy construction.
1.164 + */
1.165 + static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
1.166 +
1.167 + /**
1.168 + * The maximum number of segments to allow; used to bound
1.169 + * constructor arguments. Must be power of two less than 1 << 24.
1.170 + */
1.171 + static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
1.172 +
1.173 + /**
1.174 + * Number of unsynchronized retries in size and containsValue
1.175 + * methods before resorting to locking. This is used to avoid
1.176 + * unbounded retries if tables undergo continuous modification
1.177 + * which would make it impossible to obtain an accurate result.
1.178 + */
1.179 + static final int RETRIES_BEFORE_LOCK = 2;
1.180 +
1.181 + /* ---------------- Fields -------------- */
1.182 +
1.183 + /**
1.184 + * Mask value for indexing into segments. The upper bits of a
1.185 + * key's hash code are used to choose the segment.
1.186 + */
1.187 + final int segmentMask;
1.188 +
1.189 + /**
1.190 + * Shift value for indexing within segments.
1.191 + */
1.192 + final int segmentShift;
1.193 +
1.194 + /**
1.195 + * The segments, each of which is a specialized hash table.
1.196 + */
1.197 + final Segment<K,V>[] segments;
1.198 +
1.199 + transient Set<K> keySet;
1.200 + transient Set<Map.Entry<K,V>> entrySet;
1.201 + transient Collection<V> values;
1.202 +
1.203 + /**
1.204 + * ConcurrentHashMap list entry. Note that this is never exported
1.205 + * out as a user-visible Map.Entry.
1.206 + */
1.207 + static final class HashEntry<K,V> {
1.208 + final int hash;
1.209 + final K key;
1.210 + volatile V value;
1.211 + volatile HashEntry<K,V> next;
1.212 +
1.213 + HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
1.214 + this.hash = hash;
1.215 + this.key = key;
1.216 + this.value = value;
1.217 + this.next = next;
1.218 + }
1.219 +
1.220 + /**
1.221 + * Sets next field with volatile write semantics. (See above
1.222 + * about use of putOrderedObject.)
1.223 + */
1.224 + final void setNext(HashEntry<K,V> n) {
1.225 + UNSAFE.putOrderedObject(this, nextOffset, n);
1.226 + }
1.227 +
1.228 + // Unsafe mechanics
1.229 + static final sun.misc.Unsafe UNSAFE;
1.230 + static final long nextOffset;
1.231 + static {
1.232 + try {
1.233 + UNSAFE = sun.misc.Unsafe.getUnsafe();
1.234 + Class k = HashEntry.class;
1.235 + nextOffset = UNSAFE.objectFieldOffset
1.236 + (k.getDeclaredField("next"));
1.237 + } catch (Exception e) {
1.238 + throw new Error(e);
1.239 + }
1.240 + }
1.241 + }
1.242 +
1.243 + /**
1.244 + * Gets the ith element of given table (if nonnull) with volatile
1.245 + * read semantics. Note: This is manually integrated into a few
1.246 + * performance-sensitive methods to reduce call overhead.
1.247 + */
1.248 + @SuppressWarnings("unchecked")
1.249 + static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) {
1.250 + return (tab == null) ? null :
1.251 + (HashEntry<K,V>) UNSAFE.getObjectVolatile
1.252 + (tab, ((long)i << TSHIFT) + TBASE);
1.253 + }
1.254 +
1.255 + /**
1.256 + * Sets the ith element of given table, with volatile write
1.257 + * semantics. (See above about use of putOrderedObject.)
1.258 + */
1.259 + static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i,
1.260 + HashEntry<K,V> e) {
1.261 + UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e);
1.262 + }
1.263 +
1.264 + /**
1.265 + * Applies a supplemental hash function to a given hashCode, which
1.266 + * defends against poor quality hash functions. This is critical
1.267 + * because ConcurrentHashMap uses power-of-two length hash tables,
1.268 + * that otherwise encounter collisions for hashCodes that do not
1.269 + * differ in lower or upper bits.
1.270 + */
1.271 + private static int hash(int h) {
1.272 + // Spread bits to regularize both segment and index locations,
1.273 + // using variant of single-word Wang/Jenkins hash.
1.274 + h += (h << 15) ^ 0xffffcd7d;
1.275 + h ^= (h >>> 10);
1.276 + h += (h << 3);
1.277 + h ^= (h >>> 6);
1.278 + h += (h << 2) + (h << 14);
1.279 + return h ^ (h >>> 16);
1.280 + }
1.281 +
1.282 + /**
1.283 + * Segments are specialized versions of hash tables. This
1.284 + * subclasses from ReentrantLock opportunistically, just to
1.285 + * simplify some locking and avoid separate construction.
1.286 + */
1.287 + static final class Segment<K,V> extends ReentrantLock implements Serializable {
1.288 + /*
1.289 + * Segments maintain a table of entry lists that are always
1.290 + * kept in a consistent state, so can be read (via volatile
1.291 + * reads of segments and tables) without locking. This
1.292 + * requires replicating nodes when necessary during table
1.293 + * resizing, so the old lists can be traversed by readers
1.294 + * still using old version of table.
1.295 + *
1.296 + * This class defines only mutative methods requiring locking.
1.297 + * Except as noted, the methods of this class perform the
1.298 + * per-segment versions of ConcurrentHashMap methods. (Other
1.299 + * methods are integrated directly into ConcurrentHashMap
1.300 + * methods.) These mutative methods use a form of controlled
1.301 + * spinning on contention via methods scanAndLock and
1.302 + * scanAndLockForPut. These intersperse tryLocks with
1.303 + * traversals to locate nodes. The main benefit is to absorb
1.304 + * cache misses (which are very common for hash tables) while
1.305 + * obtaining locks so that traversal is faster once
1.306 + * acquired. We do not actually use the found nodes since they
1.307 + * must be re-acquired under lock anyway to ensure sequential
1.308 + * consistency of updates (and in any case may be undetectably
1.309 + * stale), but they will normally be much faster to re-locate.
1.310 + * Also, scanAndLockForPut speculatively creates a fresh node
1.311 + * to use in put if no node is found.
1.312 + */
1.313 +
1.314 + private static final long serialVersionUID = 2249069246763182397L;
1.315 +
1.316 + /**
1.317 + * The maximum number of times to tryLock in a prescan before
1.318 + * possibly blocking on acquire in preparation for a locked
1.319 + * segment operation. On multiprocessors, using a bounded
1.320 + * number of retries maintains cache acquired while locating
1.321 + * nodes.
1.322 + */
1.323 + static final int MAX_SCAN_RETRIES =
1.324 + Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
1.325 +
1.326 + /**
1.327 + * The per-segment table. Elements are accessed via
1.328 + * entryAt/setEntryAt providing volatile semantics.
1.329 + */
1.330 + transient volatile HashEntry<K,V>[] table;
1.331 +
1.332 + /**
1.333 + * The number of elements. Accessed only either within locks
1.334 + * or among other volatile reads that maintain visibility.
1.335 + */
1.336 + transient int count;
1.337 +
1.338 + /**
1.339 + * The total number of mutative operations in this segment.
1.340 + * Even though this may overflows 32 bits, it provides
1.341 + * sufficient accuracy for stability checks in CHM isEmpty()
1.342 + * and size() methods. Accessed only either within locks or
1.343 + * among other volatile reads that maintain visibility.
1.344 + */
1.345 + transient int modCount;
1.346 +
1.347 + /**
1.348 + * The table is rehashed when its size exceeds this threshold.
1.349 + * (The value of this field is always <tt>(int)(capacity *
1.350 + * loadFactor)</tt>.)
1.351 + */
1.352 + transient int threshold;
1.353 +
1.354 + /**
1.355 + * The load factor for the hash table. Even though this value
1.356 + * is same for all segments, it is replicated to avoid needing
1.357 + * links to outer object.
1.358 + * @serial
1.359 + */
1.360 + final float loadFactor;
1.361 +
1.362 + Segment(float lf, int threshold, HashEntry<K,V>[] tab) {
1.363 + this.loadFactor = lf;
1.364 + this.threshold = threshold;
1.365 + this.table = tab;
1.366 + }
1.367 +
1.368 + final V put(K key, int hash, V value, boolean onlyIfAbsent) {
1.369 + HashEntry<K,V> node = tryLock() ? null :
1.370 + scanAndLockForPut(key, hash, value);
1.371 + V oldValue;
1.372 + try {
1.373 + HashEntry<K,V>[] tab = table;
1.374 + int index = (tab.length - 1) & hash;
1.375 + HashEntry<K,V> first = entryAt(tab, index);
1.376 + for (HashEntry<K,V> e = first;;) {
1.377 + if (e != null) {
1.378 + K k;
1.379 + if ((k = e.key) == key ||
1.380 + (e.hash == hash && key.equals(k))) {
1.381 + oldValue = e.value;
1.382 + if (!onlyIfAbsent) {
1.383 + e.value = value;
1.384 + ++modCount;
1.385 + }
1.386 + break;
1.387 + }
1.388 + e = e.next;
1.389 + }
1.390 + else {
1.391 + if (node != null)
1.392 + node.setNext(first);
1.393 + else
1.394 + node = new HashEntry<K,V>(hash, key, value, first);
1.395 + int c = count + 1;
1.396 + if (c > threshold && tab.length < MAXIMUM_CAPACITY)
1.397 + rehash(node);
1.398 + else
1.399 + setEntryAt(tab, index, node);
1.400 + ++modCount;
1.401 + count = c;
1.402 + oldValue = null;
1.403 + break;
1.404 + }
1.405 + }
1.406 + } finally {
1.407 + unlock();
1.408 + }
1.409 + return oldValue;
1.410 + }
1.411 +
1.412 + /**
1.413 + * Doubles size of table and repacks entries, also adding the
1.414 + * given node to new table
1.415 + */
1.416 + @SuppressWarnings("unchecked")
1.417 + private void rehash(HashEntry<K,V> node) {
1.418 + /*
1.419 + * Reclassify nodes in each list to new table. Because we
1.420 + * are using power-of-two expansion, the elements from
1.421 + * each bin must either stay at same index, or move with a
1.422 + * power of two offset. We eliminate unnecessary node
1.423 + * creation by catching cases where old nodes can be
1.424 + * reused because their next fields won't change.
1.425 + * Statistically, at the default threshold, only about
1.426 + * one-sixth of them need cloning when a table
1.427 + * doubles. The nodes they replace will be garbage
1.428 + * collectable as soon as they are no longer referenced by
1.429 + * any reader thread that may be in the midst of
1.430 + * concurrently traversing table. Entry accesses use plain
1.431 + * array indexing because they are followed by volatile
1.432 + * table write.
1.433 + */
1.434 + HashEntry<K,V>[] oldTable = table;
1.435 + int oldCapacity = oldTable.length;
1.436 + int newCapacity = oldCapacity << 1;
1.437 + threshold = (int)(newCapacity * loadFactor);
1.438 + HashEntry<K,V>[] newTable =
1.439 + (HashEntry<K,V>[]) new HashEntry[newCapacity];
1.440 + int sizeMask = newCapacity - 1;
1.441 + for (int i = 0; i < oldCapacity ; i++) {
1.442 + HashEntry<K,V> e = oldTable[i];
1.443 + if (e != null) {
1.444 + HashEntry<K,V> next = e.next;
1.445 + int idx = e.hash & sizeMask;
1.446 + if (next == null) // Single node on list
1.447 + newTable[idx] = e;
1.448 + else { // Reuse consecutive sequence at same slot
1.449 + HashEntry<K,V> lastRun = e;
1.450 + int lastIdx = idx;
1.451 + for (HashEntry<K,V> last = next;
1.452 + last != null;
1.453 + last = last.next) {
1.454 + int k = last.hash & sizeMask;
1.455 + if (k != lastIdx) {
1.456 + lastIdx = k;
1.457 + lastRun = last;
1.458 + }
1.459 + }
1.460 + newTable[lastIdx] = lastRun;
1.461 + // Clone remaining nodes
1.462 + for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
1.463 + V v = p.value;
1.464 + int h = p.hash;
1.465 + int k = h & sizeMask;
1.466 + HashEntry<K,V> n = newTable[k];
1.467 + newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
1.468 + }
1.469 + }
1.470 + }
1.471 + }
1.472 + int nodeIndex = node.hash & sizeMask; // add the new node
1.473 + node.setNext(newTable[nodeIndex]);
1.474 + newTable[nodeIndex] = node;
1.475 + table = newTable;
1.476 + }
1.477 +
1.478 + /**
1.479 + * Scans for a node containing given key while trying to
1.480 + * acquire lock, creating and returning one if not found. Upon
1.481 + * return, guarantees that lock is held. UNlike in most
1.482 + * methods, calls to method equals are not screened: Since
1.483 + * traversal speed doesn't matter, we might as well help warm
1.484 + * up the associated code and accesses as well.
1.485 + *
1.486 + * @return a new node if key not found, else null
1.487 + */
1.488 + private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
1.489 + HashEntry<K,V> first = entryForHash(this, hash);
1.490 + HashEntry<K,V> e = first;
1.491 + HashEntry<K,V> node = null;
1.492 + int retries = -1; // negative while locating node
1.493 + while (!tryLock()) {
1.494 + HashEntry<K,V> f; // to recheck first below
1.495 + if (retries < 0) {
1.496 + if (e == null) {
1.497 + if (node == null) // speculatively create node
1.498 + node = new HashEntry<K,V>(hash, key, value, null);
1.499 + retries = 0;
1.500 + }
1.501 + else if (key.equals(e.key))
1.502 + retries = 0;
1.503 + else
1.504 + e = e.next;
1.505 + }
1.506 + else if (++retries > MAX_SCAN_RETRIES) {
1.507 + lock();
1.508 + break;
1.509 + }
1.510 + else if ((retries & 1) == 0 &&
1.511 + (f = entryForHash(this, hash)) != first) {
1.512 + e = first = f; // re-traverse if entry changed
1.513 + retries = -1;
1.514 + }
1.515 + }
1.516 + return node;
1.517 + }
1.518 +
1.519 + /**
1.520 + * Scans for a node containing the given key while trying to
1.521 + * acquire lock for a remove or replace operation. Upon
1.522 + * return, guarantees that lock is held. Note that we must
1.523 + * lock even if the key is not found, to ensure sequential
1.524 + * consistency of updates.
1.525 + */
1.526 + private void scanAndLock(Object key, int hash) {
1.527 + // similar to but simpler than scanAndLockForPut
1.528 + HashEntry<K,V> first = entryForHash(this, hash);
1.529 + HashEntry<K,V> e = first;
1.530 + int retries = -1;
1.531 + while (!tryLock()) {
1.532 + HashEntry<K,V> f;
1.533 + if (retries < 0) {
1.534 + if (e == null || key.equals(e.key))
1.535 + retries = 0;
1.536 + else
1.537 + e = e.next;
1.538 + }
1.539 + else if (++retries > MAX_SCAN_RETRIES) {
1.540 + lock();
1.541 + break;
1.542 + }
1.543 + else if ((retries & 1) == 0 &&
1.544 + (f = entryForHash(this, hash)) != first) {
1.545 + e = first = f;
1.546 + retries = -1;
1.547 + }
1.548 + }
1.549 + }
1.550 +
1.551 + /**
1.552 + * Remove; match on key only if value null, else match both.
1.553 + */
1.554 + final V remove(Object key, int hash, Object value) {
1.555 + if (!tryLock())
1.556 + scanAndLock(key, hash);
1.557 + V oldValue = null;
1.558 + try {
1.559 + HashEntry<K,V>[] tab = table;
1.560 + int index = (tab.length - 1) & hash;
1.561 + HashEntry<K,V> e = entryAt(tab, index);
1.562 + HashEntry<K,V> pred = null;
1.563 + while (e != null) {
1.564 + K k;
1.565 + HashEntry<K,V> next = e.next;
1.566 + if ((k = e.key) == key ||
1.567 + (e.hash == hash && key.equals(k))) {
1.568 + V v = e.value;
1.569 + if (value == null || value == v || value.equals(v)) {
1.570 + if (pred == null)
1.571 + setEntryAt(tab, index, next);
1.572 + else
1.573 + pred.setNext(next);
1.574 + ++modCount;
1.575 + --count;
1.576 + oldValue = v;
1.577 + }
1.578 + break;
1.579 + }
1.580 + pred = e;
1.581 + e = next;
1.582 + }
1.583 + } finally {
1.584 + unlock();
1.585 + }
1.586 + return oldValue;
1.587 + }
1.588 +
1.589 + final boolean replace(K key, int hash, V oldValue, V newValue) {
1.590 + if (!tryLock())
1.591 + scanAndLock(key, hash);
1.592 + boolean replaced = false;
1.593 + try {
1.594 + HashEntry<K,V> e;
1.595 + for (e = entryForHash(this, hash); e != null; e = e.next) {
1.596 + K k;
1.597 + if ((k = e.key) == key ||
1.598 + (e.hash == hash && key.equals(k))) {
1.599 + if (oldValue.equals(e.value)) {
1.600 + e.value = newValue;
1.601 + ++modCount;
1.602 + replaced = true;
1.603 + }
1.604 + break;
1.605 + }
1.606 + }
1.607 + } finally {
1.608 + unlock();
1.609 + }
1.610 + return replaced;
1.611 + }
1.612 +
1.613 + final V replace(K key, int hash, V value) {
1.614 + if (!tryLock())
1.615 + scanAndLock(key, hash);
1.616 + V oldValue = null;
1.617 + try {
1.618 + HashEntry<K,V> e;
1.619 + for (e = entryForHash(this, hash); e != null; e = e.next) {
1.620 + K k;
1.621 + if ((k = e.key) == key ||
1.622 + (e.hash == hash && key.equals(k))) {
1.623 + oldValue = e.value;
1.624 + e.value = value;
1.625 + ++modCount;
1.626 + break;
1.627 + }
1.628 + }
1.629 + } finally {
1.630 + unlock();
1.631 + }
1.632 + return oldValue;
1.633 + }
1.634 +
1.635 + final void clear() {
1.636 + lock();
1.637 + try {
1.638 + HashEntry<K,V>[] tab = table;
1.639 + for (int i = 0; i < tab.length ; i++)
1.640 + setEntryAt(tab, i, null);
1.641 + ++modCount;
1.642 + count = 0;
1.643 + } finally {
1.644 + unlock();
1.645 + }
1.646 + }
1.647 + }
1.648 +
1.649 + // Accessing segments
1.650 +
1.651 + /**
1.652 + * Gets the jth element of given segment array (if nonnull) with
1.653 + * volatile element access semantics via Unsafe. (The null check
1.654 + * can trigger harmlessly only during deserialization.) Note:
1.655 + * because each element of segments array is set only once (using
1.656 + * fully ordered writes), some performance-sensitive methods rely
1.657 + * on this method only as a recheck upon null reads.
1.658 + */
1.659 + @SuppressWarnings("unchecked")
1.660 + static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) {
1.661 + long u = (j << SSHIFT) + SBASE;
1.662 + return ss == null ? null :
1.663 + (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u);
1.664 + }
1.665 +
1.666 + /**
1.667 + * Returns the segment for the given index, creating it and
1.668 + * recording in segment table (via CAS) if not already present.
1.669 + *
1.670 + * @param k the index
1.671 + * @return the segment
1.672 + */
1.673 + @SuppressWarnings("unchecked")
1.674 + private Segment<K,V> ensureSegment(int k) {
1.675 + final Segment<K,V>[] ss = this.segments;
1.676 + long u = (k << SSHIFT) + SBASE; // raw offset
1.677 + Segment<K,V> seg;
1.678 + if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
1.679 + Segment<K,V> proto = ss[0]; // use segment 0 as prototype
1.680 + int cap = proto.table.length;
1.681 + float lf = proto.loadFactor;
1.682 + int threshold = (int)(cap * lf);
1.683 + HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
1.684 + if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
1.685 + == null) { // recheck
1.686 + Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
1.687 + while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
1.688 + == null) {
1.689 + if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
1.690 + break;
1.691 + }
1.692 + }
1.693 + }
1.694 + return seg;
1.695 + }
1.696 +
1.697 + // Hash-based segment and entry accesses
1.698 +
1.699 + /**
1.700 + * Get the segment for the given hash
1.701 + */
1.702 + @SuppressWarnings("unchecked")
1.703 + private Segment<K,V> segmentForHash(int h) {
1.704 + long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
1.705 + return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u);
1.706 + }
1.707 +
1.708 + /**
1.709 + * Gets the table entry for the given segment and hash
1.710 + */
1.711 + @SuppressWarnings("unchecked")
1.712 + static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) {
1.713 + HashEntry<K,V>[] tab;
1.714 + return (seg == null || (tab = seg.table) == null) ? null :
1.715 + (HashEntry<K,V>) UNSAFE.getObjectVolatile
1.716 + (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
1.717 + }
1.718 +
1.719 + /* ---------------- Public operations -------------- */
1.720 +
1.721 + /**
1.722 + * Creates a new, empty map with the specified initial
1.723 + * capacity, load factor and concurrency level.
1.724 + *
1.725 + * @param initialCapacity the initial capacity. The implementation
1.726 + * performs internal sizing to accommodate this many elements.
1.727 + * @param loadFactor the load factor threshold, used to control resizing.
1.728 + * Resizing may be performed when the average number of elements per
1.729 + * bin exceeds this threshold.
1.730 + * @param concurrencyLevel the estimated number of concurrently
1.731 + * updating threads. The implementation performs internal sizing
1.732 + * to try to accommodate this many threads.
1.733 + * @throws IllegalArgumentException if the initial capacity is
1.734 + * negative or the load factor or concurrencyLevel are
1.735 + * nonpositive.
1.736 + */
1.737 + @SuppressWarnings("unchecked")
1.738 + public ConcurrentHashMap(int initialCapacity,
1.739 + float loadFactor, int concurrencyLevel) {
1.740 + if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
1.741 + throw new IllegalArgumentException();
1.742 + if (concurrencyLevel > MAX_SEGMENTS)
1.743 + concurrencyLevel = MAX_SEGMENTS;
1.744 + // Find power-of-two sizes best matching arguments
1.745 + int sshift = 0;
1.746 + int ssize = 1;
1.747 + while (ssize < concurrencyLevel) {
1.748 + ++sshift;
1.749 + ssize <<= 1;
1.750 + }
1.751 + this.segmentShift = 32 - sshift;
1.752 + this.segmentMask = ssize - 1;
1.753 + if (initialCapacity > MAXIMUM_CAPACITY)
1.754 + initialCapacity = MAXIMUM_CAPACITY;
1.755 + int c = initialCapacity / ssize;
1.756 + if (c * ssize < initialCapacity)
1.757 + ++c;
1.758 + int cap = MIN_SEGMENT_TABLE_CAPACITY;
1.759 + while (cap < c)
1.760 + cap <<= 1;
1.761 + // create segments and segments[0]
1.762 + Segment<K,V> s0 =
1.763 + new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
1.764 + (HashEntry<K,V>[])new HashEntry[cap]);
1.765 + Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
1.766 + UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
1.767 + this.segments = ss;
1.768 + }
1.769 +
1.770 + /**
1.771 + * Creates a new, empty map with the specified initial capacity
1.772 + * and load factor and with the default concurrencyLevel (16).
1.773 + *
1.774 + * @param initialCapacity The implementation performs internal
1.775 + * sizing to accommodate this many elements.
1.776 + * @param loadFactor the load factor threshold, used to control resizing.
1.777 + * Resizing may be performed when the average number of elements per
1.778 + * bin exceeds this threshold.
1.779 + * @throws IllegalArgumentException if the initial capacity of
1.780 + * elements is negative or the load factor is nonpositive
1.781 + *
1.782 + * @since 1.6
1.783 + */
1.784 + public ConcurrentHashMap(int initialCapacity, float loadFactor) {
1.785 + this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
1.786 + }
1.787 +
1.788 + /**
1.789 + * Creates a new, empty map with the specified initial capacity,
1.790 + * and with default load factor (0.75) and concurrencyLevel (16).
1.791 + *
1.792 + * @param initialCapacity the initial capacity. The implementation
1.793 + * performs internal sizing to accommodate this many elements.
1.794 + * @throws IllegalArgumentException if the initial capacity of
1.795 + * elements is negative.
1.796 + */
1.797 + public ConcurrentHashMap(int initialCapacity) {
1.798 + this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
1.799 + }
1.800 +
1.801 + /**
1.802 + * Creates a new, empty map with a default initial capacity (16),
1.803 + * load factor (0.75) and concurrencyLevel (16).
1.804 + */
1.805 + public ConcurrentHashMap() {
1.806 + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
1.807 + }
1.808 +
1.809 + /**
1.810 + * Creates a new map with the same mappings as the given map.
1.811 + * The map is created with a capacity of 1.5 times the number
1.812 + * of mappings in the given map or 16 (whichever is greater),
1.813 + * and a default load factor (0.75) and concurrencyLevel (16).
1.814 + *
1.815 + * @param m the map
1.816 + */
1.817 + public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
1.818 + this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
1.819 + DEFAULT_INITIAL_CAPACITY),
1.820 + DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
1.821 + putAll(m);
1.822 + }
1.823 +
1.824 + /**
1.825 + * Returns <tt>true</tt> if this map contains no key-value mappings.
1.826 + *
1.827 + * @return <tt>true</tt> if this map contains no key-value mappings
1.828 + */
1.829 + public boolean isEmpty() {
1.830 + /*
1.831 + * Sum per-segment modCounts to avoid mis-reporting when
1.832 + * elements are concurrently added and removed in one segment
1.833 + * while checking another, in which case the table was never
1.834 + * actually empty at any point. (The sum ensures accuracy up
1.835 + * through at least 1<<31 per-segment modifications before
1.836 + * recheck.) Methods size() and containsValue() use similar
1.837 + * constructions for stability checks.
1.838 + */
1.839 + long sum = 0L;
1.840 + final Segment<K,V>[] segments = this.segments;
1.841 + for (int j = 0; j < segments.length; ++j) {
1.842 + Segment<K,V> seg = segmentAt(segments, j);
1.843 + if (seg != null) {
1.844 + if (seg.count != 0)
1.845 + return false;
1.846 + sum += seg.modCount;
1.847 + }
1.848 + }
1.849 + if (sum != 0L) { // recheck unless no modifications
1.850 + for (int j = 0; j < segments.length; ++j) {
1.851 + Segment<K,V> seg = segmentAt(segments, j);
1.852 + if (seg != null) {
1.853 + if (seg.count != 0)
1.854 + return false;
1.855 + sum -= seg.modCount;
1.856 + }
1.857 + }
1.858 + if (sum != 0L)
1.859 + return false;
1.860 + }
1.861 + return true;
1.862 + }
1.863 +
1.864 + /**
1.865 + * Returns the number of key-value mappings in this map. If the
1.866 + * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns
1.867 + * <tt>Integer.MAX_VALUE</tt>.
1.868 + *
1.869 + * @return the number of key-value mappings in this map
1.870 + */
1.871 + public int size() {
1.872 + // Try a few times to get accurate count. On failure due to
1.873 + // continuous async changes in table, resort to locking.
1.874 + final Segment<K,V>[] segments = this.segments;
1.875 + int size;
1.876 + boolean overflow; // true if size overflows 32 bits
1.877 + long sum; // sum of modCounts
1.878 + long last = 0L; // previous sum
1.879 + int retries = -1; // first iteration isn't retry
1.880 + try {
1.881 + for (;;) {
1.882 + if (retries++ == RETRIES_BEFORE_LOCK) {
1.883 + for (int j = 0; j < segments.length; ++j)
1.884 + ensureSegment(j).lock(); // force creation
1.885 + }
1.886 + sum = 0L;
1.887 + size = 0;
1.888 + overflow = false;
1.889 + for (int j = 0; j < segments.length; ++j) {
1.890 + Segment<K,V> seg = segmentAt(segments, j);
1.891 + if (seg != null) {
1.892 + sum += seg.modCount;
1.893 + int c = seg.count;
1.894 + if (c < 0 || (size += c) < 0)
1.895 + overflow = true;
1.896 + }
1.897 + }
1.898 + if (sum == last)
1.899 + break;
1.900 + last = sum;
1.901 + }
1.902 + } finally {
1.903 + if (retries > RETRIES_BEFORE_LOCK) {
1.904 + for (int j = 0; j < segments.length; ++j)
1.905 + segmentAt(segments, j).unlock();
1.906 + }
1.907 + }
1.908 + return overflow ? Integer.MAX_VALUE : size;
1.909 + }
1.910 +
1.911 + /**
1.912 + * Returns the value to which the specified key is mapped,
1.913 + * or {@code null} if this map contains no mapping for the key.
1.914 + *
1.915 + * <p>More formally, if this map contains a mapping from a key
1.916 + * {@code k} to a value {@code v} such that {@code key.equals(k)},
1.917 + * then this method returns {@code v}; otherwise it returns
1.918 + * {@code null}. (There can be at most one such mapping.)
1.919 + *
1.920 + * @throws NullPointerException if the specified key is null
1.921 + */
1.922 + public V get(Object key) {
1.923 + Segment<K,V> s; // manually integrate access methods to reduce overhead
1.924 + HashEntry<K,V>[] tab;
1.925 + int h = hash(key.hashCode());
1.926 + long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
1.927 + if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
1.928 + (tab = s.table) != null) {
1.929 + for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
1.930 + (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
1.931 + e != null; e = e.next) {
1.932 + K k;
1.933 + if ((k = e.key) == key || (e.hash == h && key.equals(k)))
1.934 + return e.value;
1.935 + }
1.936 + }
1.937 + return null;
1.938 + }
1.939 +
1.940 + /**
1.941 + * Tests if the specified object is a key in this table.
1.942 + *
1.943 + * @param key possible key
1.944 + * @return <tt>true</tt> if and only if the specified object
1.945 + * is a key in this table, as determined by the
1.946 + * <tt>equals</tt> method; <tt>false</tt> otherwise.
1.947 + * @throws NullPointerException if the specified key is null
1.948 + */
1.949 + @SuppressWarnings("unchecked")
1.950 + public boolean containsKey(Object key) {
1.951 + Segment<K,V> s; // same as get() except no need for volatile value read
1.952 + HashEntry<K,V>[] tab;
1.953 + int h = hash(key.hashCode());
1.954 + long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
1.955 + if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
1.956 + (tab = s.table) != null) {
1.957 + for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
1.958 + (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
1.959 + e != null; e = e.next) {
1.960 + K k;
1.961 + if ((k = e.key) == key || (e.hash == h && key.equals(k)))
1.962 + return true;
1.963 + }
1.964 + }
1.965 + return false;
1.966 + }
1.967 +
1.968 + /**
1.969 + * Returns <tt>true</tt> if this map maps one or more keys to the
1.970 + * specified value. Note: This method requires a full internal
1.971 + * traversal of the hash table, and so is much slower than
1.972 + * method <tt>containsKey</tt>.
1.973 + *
1.974 + * @param value value whose presence in this map is to be tested
1.975 + * @return <tt>true</tt> if this map maps one or more keys to the
1.976 + * specified value
1.977 + * @throws NullPointerException if the specified value is null
1.978 + */
1.979 + public boolean containsValue(Object value) {
1.980 + // Same idea as size()
1.981 + if (value == null)
1.982 + throw new NullPointerException();
1.983 + final Segment<K,V>[] segments = this.segments;
1.984 + boolean found = false;
1.985 + long last = 0;
1.986 + int retries = -1;
1.987 + try {
1.988 + outer: for (;;) {
1.989 + if (retries++ == RETRIES_BEFORE_LOCK) {
1.990 + for (int j = 0; j < segments.length; ++j)
1.991 + ensureSegment(j).lock(); // force creation
1.992 + }
1.993 + long hashSum = 0L;
1.994 + int sum = 0;
1.995 + for (int j = 0; j < segments.length; ++j) {
1.996 + HashEntry<K,V>[] tab;
1.997 + Segment<K,V> seg = segmentAt(segments, j);
1.998 + if (seg != null && (tab = seg.table) != null) {
1.999 + for (int i = 0 ; i < tab.length; i++) {
1.1000 + HashEntry<K,V> e;
1.1001 + for (e = entryAt(tab, i); e != null; e = e.next) {
1.1002 + V v = e.value;
1.1003 + if (v != null && value.equals(v)) {
1.1004 + found = true;
1.1005 + break outer;
1.1006 + }
1.1007 + }
1.1008 + }
1.1009 + sum += seg.modCount;
1.1010 + }
1.1011 + }
1.1012 + if (retries > 0 && sum == last)
1.1013 + break;
1.1014 + last = sum;
1.1015 + }
1.1016 + } finally {
1.1017 + if (retries > RETRIES_BEFORE_LOCK) {
1.1018 + for (int j = 0; j < segments.length; ++j)
1.1019 + segmentAt(segments, j).unlock();
1.1020 + }
1.1021 + }
1.1022 + return found;
1.1023 + }
1.1024 +
1.1025 + /**
1.1026 + * Legacy method testing if some key maps into the specified value
1.1027 + * in this table. This method is identical in functionality to
1.1028 + * {@link #containsValue}, and exists solely to ensure
1.1029 + * full compatibility with class {@link java.util.Hashtable},
1.1030 + * which supported this method prior to introduction of the
1.1031 + * Java Collections framework.
1.1032 +
1.1033 + * @param value a value to search for
1.1034 + * @return <tt>true</tt> if and only if some key maps to the
1.1035 + * <tt>value</tt> argument in this table as
1.1036 + * determined by the <tt>equals</tt> method;
1.1037 + * <tt>false</tt> otherwise
1.1038 + * @throws NullPointerException if the specified value is null
1.1039 + */
1.1040 + public boolean contains(Object value) {
1.1041 + return containsValue(value);
1.1042 + }
1.1043 +
1.1044 + /**
1.1045 + * Maps the specified key to the specified value in this table.
1.1046 + * Neither the key nor the value can be null.
1.1047 + *
1.1048 + * <p> The value can be retrieved by calling the <tt>get</tt> method
1.1049 + * with a key that is equal to the original key.
1.1050 + *
1.1051 + * @param key key with which the specified value is to be associated
1.1052 + * @param value value to be associated with the specified key
1.1053 + * @return the previous value associated with <tt>key</tt>, or
1.1054 + * <tt>null</tt> if there was no mapping for <tt>key</tt>
1.1055 + * @throws NullPointerException if the specified key or value is null
1.1056 + */
1.1057 + @SuppressWarnings("unchecked")
1.1058 + public V put(K key, V value) {
1.1059 + Segment<K,V> s;
1.1060 + if (value == null)
1.1061 + throw new NullPointerException();
1.1062 + int hash = hash(key.hashCode());
1.1063 + int j = (hash >>> segmentShift) & segmentMask;
1.1064 + if ((s = (Segment<K,V>)UNSAFE.getObject // nonvolatile; recheck
1.1065 + (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment
1.1066 + s = ensureSegment(j);
1.1067 + return s.put(key, hash, value, false);
1.1068 + }
1.1069 +
1.1070 + /**
1.1071 + * {@inheritDoc}
1.1072 + *
1.1073 + * @return the previous value associated with the specified key,
1.1074 + * or <tt>null</tt> if there was no mapping for the key
1.1075 + * @throws NullPointerException if the specified key or value is null
1.1076 + */
1.1077 + @SuppressWarnings("unchecked")
1.1078 + public V putIfAbsent(K key, V value) {
1.1079 + Segment<K,V> s;
1.1080 + if (value == null)
1.1081 + throw new NullPointerException();
1.1082 + int hash = hash(key.hashCode());
1.1083 + int j = (hash >>> segmentShift) & segmentMask;
1.1084 + if ((s = (Segment<K,V>)UNSAFE.getObject
1.1085 + (segments, (j << SSHIFT) + SBASE)) == null)
1.1086 + s = ensureSegment(j);
1.1087 + return s.put(key, hash, value, true);
1.1088 + }
1.1089 +
1.1090 + /**
1.1091 + * Copies all of the mappings from the specified map to this one.
1.1092 + * These mappings replace any mappings that this map had for any of the
1.1093 + * keys currently in the specified map.
1.1094 + *
1.1095 + * @param m mappings to be stored in this map
1.1096 + */
1.1097 + public void putAll(Map<? extends K, ? extends V> m) {
1.1098 + for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1.1099 + put(e.getKey(), e.getValue());
1.1100 + }
1.1101 +
1.1102 + /**
1.1103 + * Removes the key (and its corresponding value) from this map.
1.1104 + * This method does nothing if the key is not in the map.
1.1105 + *
1.1106 + * @param key the key that needs to be removed
1.1107 + * @return the previous value associated with <tt>key</tt>, or
1.1108 + * <tt>null</tt> if there was no mapping for <tt>key</tt>
1.1109 + * @throws NullPointerException if the specified key is null
1.1110 + */
1.1111 + public V remove(Object key) {
1.1112 + int hash = hash(key.hashCode());
1.1113 + Segment<K,V> s = segmentForHash(hash);
1.1114 + return s == null ? null : s.remove(key, hash, null);
1.1115 + }
1.1116 +
1.1117 + /**
1.1118 + * {@inheritDoc}
1.1119 + *
1.1120 + * @throws NullPointerException if the specified key is null
1.1121 + */
1.1122 + public boolean remove(Object key, Object value) {
1.1123 + int hash = hash(key.hashCode());
1.1124 + Segment<K,V> s;
1.1125 + return value != null && (s = segmentForHash(hash)) != null &&
1.1126 + s.remove(key, hash, value) != null;
1.1127 + }
1.1128 +
1.1129 + /**
1.1130 + * {@inheritDoc}
1.1131 + *
1.1132 + * @throws NullPointerException if any of the arguments are null
1.1133 + */
1.1134 + public boolean replace(K key, V oldValue, V newValue) {
1.1135 + int hash = hash(key.hashCode());
1.1136 + if (oldValue == null || newValue == null)
1.1137 + throw new NullPointerException();
1.1138 + Segment<K,V> s = segmentForHash(hash);
1.1139 + return s != null && s.replace(key, hash, oldValue, newValue);
1.1140 + }
1.1141 +
1.1142 + /**
1.1143 + * {@inheritDoc}
1.1144 + *
1.1145 + * @return the previous value associated with the specified key,
1.1146 + * or <tt>null</tt> if there was no mapping for the key
1.1147 + * @throws NullPointerException if the specified key or value is null
1.1148 + */
1.1149 + public V replace(K key, V value) {
1.1150 + int hash = hash(key.hashCode());
1.1151 + if (value == null)
1.1152 + throw new NullPointerException();
1.1153 + Segment<K,V> s = segmentForHash(hash);
1.1154 + return s == null ? null : s.replace(key, hash, value);
1.1155 + }
1.1156 +
1.1157 + /**
1.1158 + * Removes all of the mappings from this map.
1.1159 + */
1.1160 + public void clear() {
1.1161 + final Segment<K,V>[] segments = this.segments;
1.1162 + for (int j = 0; j < segments.length; ++j) {
1.1163 + Segment<K,V> s = segmentAt(segments, j);
1.1164 + if (s != null)
1.1165 + s.clear();
1.1166 + }
1.1167 + }
1.1168 +
1.1169 + /**
1.1170 + * Returns a {@link Set} view of the keys contained in this map.
1.1171 + * The set is backed by the map, so changes to the map are
1.1172 + * reflected in the set, and vice-versa. The set supports element
1.1173 + * removal, which removes the corresponding mapping from this map,
1.1174 + * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
1.1175 + * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
1.1176 + * operations. It does not support the <tt>add</tt> or
1.1177 + * <tt>addAll</tt> operations.
1.1178 + *
1.1179 + * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1.1180 + * that will never throw {@link ConcurrentModificationException},
1.1181 + * and guarantees to traverse elements as they existed upon
1.1182 + * construction of the iterator, and may (but is not guaranteed to)
1.1183 + * reflect any modifications subsequent to construction.
1.1184 + */
1.1185 + public Set<K> keySet() {
1.1186 + Set<K> ks = keySet;
1.1187 + return (ks != null) ? ks : (keySet = new KeySet());
1.1188 + }
1.1189 +
1.1190 + /**
1.1191 + * Returns a {@link Collection} view of the values contained in this map.
1.1192 + * The collection is backed by the map, so changes to the map are
1.1193 + * reflected in the collection, and vice-versa. The collection
1.1194 + * supports element removal, which removes the corresponding
1.1195 + * mapping from this map, via the <tt>Iterator.remove</tt>,
1.1196 + * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
1.1197 + * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not
1.1198 + * support the <tt>add</tt> or <tt>addAll</tt> operations.
1.1199 + *
1.1200 + * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1.1201 + * that will never throw {@link ConcurrentModificationException},
1.1202 + * and guarantees to traverse elements as they existed upon
1.1203 + * construction of the iterator, and may (but is not guaranteed to)
1.1204 + * reflect any modifications subsequent to construction.
1.1205 + */
1.1206 + public Collection<V> values() {
1.1207 + Collection<V> vs = values;
1.1208 + return (vs != null) ? vs : (values = new Values());
1.1209 + }
1.1210 +
1.1211 + /**
1.1212 + * Returns a {@link Set} view of the mappings contained in this map.
1.1213 + * The set is backed by the map, so changes to the map are
1.1214 + * reflected in the set, and vice-versa. The set supports element
1.1215 + * removal, which removes the corresponding mapping from the map,
1.1216 + * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
1.1217 + * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
1.1218 + * operations. It does not support the <tt>add</tt> or
1.1219 + * <tt>addAll</tt> operations.
1.1220 + *
1.1221 + * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator
1.1222 + * that will never throw {@link ConcurrentModificationException},
1.1223 + * and guarantees to traverse elements as they existed upon
1.1224 + * construction of the iterator, and may (but is not guaranteed to)
1.1225 + * reflect any modifications subsequent to construction.
1.1226 + */
1.1227 + public Set<Map.Entry<K,V>> entrySet() {
1.1228 + Set<Map.Entry<K,V>> es = entrySet;
1.1229 + return (es != null) ? es : (entrySet = new EntrySet());
1.1230 + }
1.1231 +
1.1232 + /**
1.1233 + * Returns an enumeration of the keys in this table.
1.1234 + *
1.1235 + * @return an enumeration of the keys in this table
1.1236 + * @see #keySet()
1.1237 + */
1.1238 + public Enumeration<K> keys() {
1.1239 + return new KeyIterator();
1.1240 + }
1.1241 +
1.1242 + /**
1.1243 + * Returns an enumeration of the values in this table.
1.1244 + *
1.1245 + * @return an enumeration of the values in this table
1.1246 + * @see #values()
1.1247 + */
1.1248 + public Enumeration<V> elements() {
1.1249 + return new ValueIterator();
1.1250 + }
1.1251 +
1.1252 + /* ---------------- Iterator Support -------------- */
1.1253 +
1.1254 + abstract class HashIterator {
1.1255 + int nextSegmentIndex;
1.1256 + int nextTableIndex;
1.1257 + HashEntry<K,V>[] currentTable;
1.1258 + HashEntry<K, V> nextEntry;
1.1259 + HashEntry<K, V> lastReturned;
1.1260 +
1.1261 + HashIterator() {
1.1262 + nextSegmentIndex = segments.length - 1;
1.1263 + nextTableIndex = -1;
1.1264 + advance();
1.1265 + }
1.1266 +
1.1267 + /**
1.1268 + * Set nextEntry to first node of next non-empty table
1.1269 + * (in backwards order, to simplify checks).
1.1270 + */
1.1271 + final void advance() {
1.1272 + for (;;) {
1.1273 + if (nextTableIndex >= 0) {
1.1274 + if ((nextEntry = entryAt(currentTable,
1.1275 + nextTableIndex--)) != null)
1.1276 + break;
1.1277 + }
1.1278 + else if (nextSegmentIndex >= 0) {
1.1279 + Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--);
1.1280 + if (seg != null && (currentTable = seg.table) != null)
1.1281 + nextTableIndex = currentTable.length - 1;
1.1282 + }
1.1283 + else
1.1284 + break;
1.1285 + }
1.1286 + }
1.1287 +
1.1288 + final HashEntry<K,V> nextEntry() {
1.1289 + HashEntry<K,V> e = nextEntry;
1.1290 + if (e == null)
1.1291 + throw new NoSuchElementException();
1.1292 + lastReturned = e; // cannot assign until after null check
1.1293 + if ((nextEntry = e.next) == null)
1.1294 + advance();
1.1295 + return e;
1.1296 + }
1.1297 +
1.1298 + public final boolean hasNext() { return nextEntry != null; }
1.1299 + public final boolean hasMoreElements() { return nextEntry != null; }
1.1300 +
1.1301 + public final void remove() {
1.1302 + if (lastReturned == null)
1.1303 + throw new IllegalStateException();
1.1304 + ConcurrentHashMap.this.remove(lastReturned.key);
1.1305 + lastReturned = null;
1.1306 + }
1.1307 + }
1.1308 +
1.1309 + final class KeyIterator
1.1310 + extends HashIterator
1.1311 + implements Iterator<K>, Enumeration<K>
1.1312 + {
1.1313 + public final K next() { return super.nextEntry().key; }
1.1314 + public final K nextElement() { return super.nextEntry().key; }
1.1315 + }
1.1316 +
1.1317 + final class ValueIterator
1.1318 + extends HashIterator
1.1319 + implements Iterator<V>, Enumeration<V>
1.1320 + {
1.1321 + public final V next() { return super.nextEntry().value; }
1.1322 + public final V nextElement() { return super.nextEntry().value; }
1.1323 + }
1.1324 +
1.1325 + /**
1.1326 + * Custom Entry class used by EntryIterator.next(), that relays
1.1327 + * setValue changes to the underlying map.
1.1328 + */
1.1329 + final class WriteThroughEntry
1.1330 + extends AbstractMap.SimpleEntry<K,V>
1.1331 + {
1.1332 + WriteThroughEntry(K k, V v) {
1.1333 + super(k,v);
1.1334 + }
1.1335 +
1.1336 + /**
1.1337 + * Set our entry's value and write through to the map. The
1.1338 + * value to return is somewhat arbitrary here. Since a
1.1339 + * WriteThroughEntry does not necessarily track asynchronous
1.1340 + * changes, the most recent "previous" value could be
1.1341 + * different from what we return (or could even have been
1.1342 + * removed in which case the put will re-establish). We do not
1.1343 + * and cannot guarantee more.
1.1344 + */
1.1345 + public V setValue(V value) {
1.1346 + if (value == null) throw new NullPointerException();
1.1347 + V v = super.setValue(value);
1.1348 + ConcurrentHashMap.this.put(getKey(), value);
1.1349 + return v;
1.1350 + }
1.1351 + }
1.1352 +
1.1353 + final class EntryIterator
1.1354 + extends HashIterator
1.1355 + implements Iterator<Entry<K,V>>
1.1356 + {
1.1357 + public Map.Entry<K,V> next() {
1.1358 + HashEntry<K,V> e = super.nextEntry();
1.1359 + return new WriteThroughEntry(e.key, e.value);
1.1360 + }
1.1361 + }
1.1362 +
1.1363 + final class KeySet extends AbstractSet<K> {
1.1364 + public Iterator<K> iterator() {
1.1365 + return new KeyIterator();
1.1366 + }
1.1367 + public int size() {
1.1368 + return ConcurrentHashMap.this.size();
1.1369 + }
1.1370 + public boolean isEmpty() {
1.1371 + return ConcurrentHashMap.this.isEmpty();
1.1372 + }
1.1373 + public boolean contains(Object o) {
1.1374 + return ConcurrentHashMap.this.containsKey(o);
1.1375 + }
1.1376 + public boolean remove(Object o) {
1.1377 + return ConcurrentHashMap.this.remove(o) != null;
1.1378 + }
1.1379 + public void clear() {
1.1380 + ConcurrentHashMap.this.clear();
1.1381 + }
1.1382 + }
1.1383 +
1.1384 + final class Values extends AbstractCollection<V> {
1.1385 + public Iterator<V> iterator() {
1.1386 + return new ValueIterator();
1.1387 + }
1.1388 + public int size() {
1.1389 + return ConcurrentHashMap.this.size();
1.1390 + }
1.1391 + public boolean isEmpty() {
1.1392 + return ConcurrentHashMap.this.isEmpty();
1.1393 + }
1.1394 + public boolean contains(Object o) {
1.1395 + return ConcurrentHashMap.this.containsValue(o);
1.1396 + }
1.1397 + public void clear() {
1.1398 + ConcurrentHashMap.this.clear();
1.1399 + }
1.1400 + }
1.1401 +
1.1402 + final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
1.1403 + public Iterator<Map.Entry<K,V>> iterator() {
1.1404 + return new EntryIterator();
1.1405 + }
1.1406 + public boolean contains(Object o) {
1.1407 + if (!(o instanceof Map.Entry))
1.1408 + return false;
1.1409 + Map.Entry<?,?> e = (Map.Entry<?,?>)o;
1.1410 + V v = ConcurrentHashMap.this.get(e.getKey());
1.1411 + return v != null && v.equals(e.getValue());
1.1412 + }
1.1413 + public boolean remove(Object o) {
1.1414 + if (!(o instanceof Map.Entry))
1.1415 + return false;
1.1416 + Map.Entry<?,?> e = (Map.Entry<?,?>)o;
1.1417 + return ConcurrentHashMap.this.remove(e.getKey(), e.getValue());
1.1418 + }
1.1419 + public int size() {
1.1420 + return ConcurrentHashMap.this.size();
1.1421 + }
1.1422 + public boolean isEmpty() {
1.1423 + return ConcurrentHashMap.this.isEmpty();
1.1424 + }
1.1425 + public void clear() {
1.1426 + ConcurrentHashMap.this.clear();
1.1427 + }
1.1428 + }
1.1429 +
1.1430 + /* ---------------- Serialization Support -------------- */
1.1431 +
1.1432 + /**
1.1433 + * Save the state of the <tt>ConcurrentHashMap</tt> instance to a
1.1434 + * stream (i.e., serialize it).
1.1435 + * @param s the stream
1.1436 + * @serialData
1.1437 + * the key (Object) and value (Object)
1.1438 + * for each key-value mapping, followed by a null pair.
1.1439 + * The key-value mappings are emitted in no particular order.
1.1440 + */
1.1441 + private void writeObject(java.io.ObjectOutputStream s) throws IOException {
1.1442 + // force all segments for serialization compatibility
1.1443 + for (int k = 0; k < segments.length; ++k)
1.1444 + ensureSegment(k);
1.1445 + s.defaultWriteObject();
1.1446 +
1.1447 + final Segment<K,V>[] segments = this.segments;
1.1448 + for (int k = 0; k < segments.length; ++k) {
1.1449 + Segment<K,V> seg = segmentAt(segments, k);
1.1450 + seg.lock();
1.1451 + try {
1.1452 + HashEntry<K,V>[] tab = seg.table;
1.1453 + for (int i = 0; i < tab.length; ++i) {
1.1454 + HashEntry<K,V> e;
1.1455 + for (e = entryAt(tab, i); e != null; e = e.next) {
1.1456 + s.writeObject(e.key);
1.1457 + s.writeObject(e.value);
1.1458 + }
1.1459 + }
1.1460 + } finally {
1.1461 + seg.unlock();
1.1462 + }
1.1463 + }
1.1464 + s.writeObject(null);
1.1465 + s.writeObject(null);
1.1466 + }
1.1467 +
1.1468 + /**
1.1469 + * Reconstitute the <tt>ConcurrentHashMap</tt> instance from a
1.1470 + * stream (i.e., deserialize it).
1.1471 + * @param s the stream
1.1472 + */
1.1473 + @SuppressWarnings("unchecked")
1.1474 + private void readObject(java.io.ObjectInputStream s)
1.1475 + throws IOException, ClassNotFoundException {
1.1476 + s.defaultReadObject();
1.1477 +
1.1478 + // Re-initialize segments to be minimally sized, and let grow.
1.1479 + int cap = MIN_SEGMENT_TABLE_CAPACITY;
1.1480 + final Segment<K,V>[] segments = this.segments;
1.1481 + for (int k = 0; k < segments.length; ++k) {
1.1482 + Segment<K,V> seg = segments[k];
1.1483 + if (seg != null) {
1.1484 + seg.threshold = (int)(cap * seg.loadFactor);
1.1485 + seg.table = (HashEntry<K,V>[]) new HashEntry[cap];
1.1486 + }
1.1487 + }
1.1488 +
1.1489 + // Read the keys and values, and put the mappings in the table
1.1490 + for (;;) {
1.1491 + K key = (K) s.readObject();
1.1492 + V value = (V) s.readObject();
1.1493 + if (key == null)
1.1494 + break;
1.1495 + put(key, value);
1.1496 + }
1.1497 + }
1.1498 +
1.1499 + // Unsafe mechanics
1.1500 + private static final sun.misc.Unsafe UNSAFE;
1.1501 + private static final long SBASE;
1.1502 + private static final int SSHIFT;
1.1503 + private static final long TBASE;
1.1504 + private static final int TSHIFT;
1.1505 +
1.1506 + static {
1.1507 + int ss, ts;
1.1508 + try {
1.1509 + UNSAFE = sun.misc.Unsafe.getUnsafe();
1.1510 + Class tc = HashEntry[].class;
1.1511 + Class sc = Segment[].class;
1.1512 + TBASE = UNSAFE.arrayBaseOffset(tc);
1.1513 + SBASE = UNSAFE.arrayBaseOffset(sc);
1.1514 + ts = UNSAFE.arrayIndexScale(tc);
1.1515 + ss = UNSAFE.arrayIndexScale(sc);
1.1516 + } catch (Exception e) {
1.1517 + throw new Error(e);
1.1518 + }
1.1519 + if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0)
1.1520 + throw new Error("data type scale not a power of two");
1.1521 + SSHIFT = 31 - Integer.numberOfLeadingZeros(ss);
1.1522 + TSHIFT = 31 - Integer.numberOfLeadingZeros(ts);
1.1523 + }
1.1524 +
1.1525 +}