2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation. Oracle designates this
7 * particular file as subject to the "Classpath" exception as provided
8 * by Oracle in the LICENSE file that accompanied this code.
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
26 * This file is available under and governed by the GNU General Public
27 * License version 2 only, as published by the Free Software Foundation.
28 * However, the following notice accompanied the original version of this
31 * Written by Doug Lea with assistance from members of JCP JSR-166
32 * Expert Group and released to the public domain, as explained at
33 * http://creativecommons.org/publicdomain/zero/1.0/
36 package java.util.concurrent;
38 import java.util.AbstractQueue;
39 import java.util.Collection;
40 import java.util.Iterator;
41 import java.util.NoSuchElementException;
42 import java.util.Queue;
43 import java.util.concurrent.TimeUnit;
44 import java.util.concurrent.locks.LockSupport;
47 * An unbounded {@link TransferQueue} based on linked nodes.
48 * This queue orders elements FIFO (first-in-first-out) with respect
49 * to any given producer. The <em>head</em> of the queue is that
50 * element that has been on the queue the longest time for some
51 * producer. The <em>tail</em> of the queue is that element that has
52 * been on the queue the shortest time for some producer.
54 * <p>Beware that, unlike in most collections, the {@code size} method
55 * is <em>NOT</em> a constant-time operation. Because of the
56 * asynchronous nature of these queues, determining the current number
57 * of elements requires a traversal of the elements, and so may report
58 * inaccurate results if this collection is modified during traversal.
59 * Additionally, the bulk operations {@code addAll},
60 * {@code removeAll}, {@code retainAll}, {@code containsAll},
61 * {@code equals}, and {@code toArray} are <em>not</em> guaranteed
62 * to be performed atomically. For example, an iterator operating
63 * concurrently with an {@code addAll} operation might view only some
64 * of the added elements.
66 * <p>This class and its iterator implement all of the
67 * <em>optional</em> methods of the {@link Collection} and {@link
68 * Iterator} interfaces.
70 * <p>Memory consistency effects: As with other concurrent
71 * collections, actions in a thread prior to placing an object into a
72 * {@code LinkedTransferQueue}
73 * <a href="package-summary.html#MemoryVisibility"><i>happen-before</i></a>
74 * actions subsequent to the access or removal of that element from
75 * the {@code LinkedTransferQueue} in another thread.
77 * <p>This class is a member of the
78 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
79 * Java Collections Framework</a>.
83 * @param <E> the type of elements held in this collection
85 public class LinkedTransferQueue<E> extends AbstractQueue<E>
86 implements TransferQueue<E>, java.io.Serializable {
87 private static final long serialVersionUID = -3223113410248163686L;
90 * *** Overview of Dual Queues with Slack ***
92 * Dual Queues, introduced by Scherer and Scott
93 * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are
94 * (linked) queues in which nodes may represent either data or
95 * requests. When a thread tries to enqueue a data node, but
96 * encounters a request node, it instead "matches" and removes it;
97 * and vice versa for enqueuing requests. Blocking Dual Queues
98 * arrange that threads enqueuing unmatched requests block until
99 * other threads provide the match. Dual Synchronous Queues (see
100 * Scherer, Lea, & Scott
101 * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf)
102 * additionally arrange that threads enqueuing unmatched data also
103 * block. Dual Transfer Queues support all of these modes, as
104 * dictated by callers.
106 * A FIFO dual queue may be implemented using a variation of the
107 * Michael & Scott (M&S) lock-free queue algorithm
108 * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf).
109 * It maintains two pointer fields, "head", pointing to a
110 * (matched) node that in turn points to the first actual
111 * (unmatched) queue node (or null if empty); and "tail" that
112 * points to the last node on the queue (or again null if
113 * empty). For example, here is a possible queue with four data
119 * M -> U -> U -> U -> U
121 * The M&S queue algorithm is known to be prone to scalability and
122 * overhead limitations when maintaining (via CAS) these head and
123 * tail pointers. This has led to the development of
124 * contention-reducing variants such as elimination arrays (see
125 * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and
126 * optimistic back pointers (see Ladan-Mozes & Shavit
127 * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf).
128 * However, the nature of dual queues enables a simpler tactic for
129 * improving M&S-style implementations when dual-ness is needed.
131 * In a dual queue, each node must atomically maintain its match
132 * status. While there are other possible variants, we implement
133 * this here as: for a data-mode node, matching entails CASing an
134 * "item" field from a non-null data value to null upon match, and
135 * vice-versa for request nodes, CASing from null to a data
136 * value. (Note that the linearization properties of this style of
137 * queue are easy to verify -- elements are made available by
138 * linking, and unavailable by matching.) Compared to plain M&S
139 * queues, this property of dual queues requires one additional
140 * successful atomic operation per enq/deq pair. But it also
141 * enables lower cost variants of queue maintenance mechanics. (A
142 * variation of this idea applies even for non-dual queues that
143 * support deletion of interior elements, such as
144 * j.u.c.ConcurrentLinkedQueue.)
146 * Once a node is matched, its match status can never again
147 * change. We may thus arrange that the linked list of them
148 * contain a prefix of zero or more matched nodes, followed by a
149 * suffix of zero or more unmatched nodes. (Note that we allow
150 * both the prefix and suffix to be zero length, which in turn
151 * means that we do not use a dummy header.) If we were not
152 * concerned with either time or space efficiency, we could
153 * correctly perform enqueue and dequeue operations by traversing
154 * from a pointer to the initial node; CASing the item of the
155 * first unmatched node on match and CASing the next field of the
156 * trailing node on appends. (Plus some special-casing when
157 * initially empty). While this would be a terrible idea in
158 * itself, it does have the benefit of not requiring ANY atomic
159 * updates on head/tail fields.
161 * We introduce here an approach that lies between the extremes of
162 * never versus always updating queue (head and tail) pointers.
163 * This offers a tradeoff between sometimes requiring extra
164 * traversal steps to locate the first and/or last unmatched
165 * nodes, versus the reduced overhead and contention of fewer
166 * updates to queue pointers. For example, a possible snapshot of
172 * M -> M -> U -> U -> U -> U
174 * The best value for this "slack" (the targeted maximum distance
175 * between the value of "head" and the first unmatched node, and
176 * similarly for "tail") is an empirical matter. We have found
177 * that using very small constants in the range of 1-3 work best
178 * over a range of platforms. Larger values introduce increasing
179 * costs of cache misses and risks of long traversal chains, while
180 * smaller values increase CAS contention and overhead.
182 * Dual queues with slack differ from plain M&S dual queues by
183 * virtue of only sometimes updating head or tail pointers when
184 * matching, appending, or even traversing nodes; in order to
185 * maintain a targeted slack. The idea of "sometimes" may be
186 * operationalized in several ways. The simplest is to use a
187 * per-operation counter incremented on each traversal step, and
188 * to try (via CAS) to update the associated queue pointer
189 * whenever the count exceeds a threshold. Another, that requires
190 * more overhead, is to use random number generators to update
191 * with a given probability per traversal step.
193 * In any strategy along these lines, because CASes updating
194 * fields may fail, the actual slack may exceed targeted
195 * slack. However, they may be retried at any time to maintain
196 * targets. Even when using very small slack values, this
197 * approach works well for dual queues because it allows all
198 * operations up to the point of matching or appending an item
199 * (hence potentially allowing progress by another thread) to be
200 * read-only, thus not introducing any further contention. As
201 * described below, we implement this by performing slack
202 * maintenance retries only after these points.
204 * As an accompaniment to such techniques, traversal overhead can
205 * be further reduced without increasing contention of head
206 * pointer updates: Threads may sometimes shortcut the "next" link
207 * path from the current "head" node to be closer to the currently
208 * known first unmatched node, and similarly for tail. Again, this
209 * may be triggered with using thresholds or randomization.
211 * These ideas must be further extended to avoid unbounded amounts
212 * of costly-to-reclaim garbage caused by the sequential "next"
213 * links of nodes starting at old forgotten head nodes: As first
214 * described in detail by Boehm
215 * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC
216 * delays noticing that any arbitrarily old node has become
217 * garbage, all newer dead nodes will also be unreclaimed.
218 * (Similar issues arise in non-GC environments.) To cope with
219 * this in our implementation, upon CASing to advance the head
220 * pointer, we set the "next" link of the previous head to point
221 * only to itself; thus limiting the length of connected dead lists.
222 * (We also take similar care to wipe out possibly garbage
223 * retaining values held in other Node fields.) However, doing so
224 * adds some further complexity to traversal: If any "next"
225 * pointer links to itself, it indicates that the current thread
226 * has lagged behind a head-update, and so the traversal must
227 * continue from the "head". Traversals trying to find the
228 * current tail starting from "tail" may also encounter
229 * self-links, in which case they also continue at "head".
231 * It is tempting in slack-based scheme to not even use CAS for
232 * updates (similarly to Ladan-Mozes & Shavit). However, this
233 * cannot be done for head updates under the above link-forgetting
234 * mechanics because an update may leave head at a detached node.
235 * And while direct writes are possible for tail updates, they
236 * increase the risk of long retraversals, and hence long garbage
237 * chains, which can be much more costly than is worthwhile
238 * considering that the cost difference of performing a CAS vs
239 * write is smaller when they are not triggered on each operation
240 * (especially considering that writes and CASes equally require
241 * additional GC bookkeeping ("write barriers") that are sometimes
242 * more costly than the writes themselves because of contention).
244 * *** Overview of implementation ***
246 * We use a threshold-based approach to updates, with a slack
247 * threshold of two -- that is, we update head/tail when the
248 * current pointer appears to be two or more steps away from the
249 * first/last node. The slack value is hard-wired: a path greater
250 * than one is naturally implemented by checking equality of
251 * traversal pointers except when the list has only one element,
252 * in which case we keep slack threshold at one. Avoiding tracking
253 * explicit counts across method calls slightly simplifies an
254 * already-messy implementation. Using randomization would
255 * probably work better if there were a low-quality dirt-cheap
256 * per-thread one available, but even ThreadLocalRandom is too
257 * heavy for these purposes.
259 * With such a small slack threshold value, it is not worthwhile
260 * to augment this with path short-circuiting (i.e., unsplicing
261 * interior nodes) except in the case of cancellation/removal (see
264 * We allow both the head and tail fields to be null before any
265 * nodes are enqueued; initializing upon first append. This
266 * simplifies some other logic, as well as providing more
267 * efficient explicit control paths instead of letting JVMs insert
268 * implicit NullPointerExceptions when they are null. While not
269 * currently fully implemented, we also leave open the possibility
270 * of re-nulling these fields when empty (which is complicated to
271 * arrange, for little benefit.)
273 * All enqueue/dequeue operations are handled by the single method
274 * "xfer" with parameters indicating whether to act as some form
275 * of offer, put, poll, take, or transfer (each possibly with
276 * timeout). The relative complexity of using one monolithic
277 * method outweighs the code bulk and maintenance problems of
278 * using separate methods for each case.
280 * Operation consists of up to three phases. The first is
281 * implemented within method xfer, the second in tryAppend, and
282 * the third in method awaitMatch.
284 * 1. Try to match an existing node
286 * Starting at head, skip already-matched nodes until finding
287 * an unmatched node of opposite mode, if one exists, in which
288 * case matching it and returning, also if necessary updating
289 * head to one past the matched node (or the node itself if the
290 * list has no other unmatched nodes). If the CAS misses, then
291 * a loop retries advancing head by two steps until either
292 * success or the slack is at most two. By requiring that each
293 * attempt advances head by two (if applicable), we ensure that
294 * the slack does not grow without bound. Traversals also check
295 * if the initial head is now off-list, in which case they
296 * start at the new head.
298 * If no candidates are found and the call was untimed
299 * poll/offer, (argument "how" is NOW) return.
301 * 2. Try to append a new node (method tryAppend)
303 * Starting at current tail pointer, find the actual last node
304 * and try to append a new node (or if head was null, establish
305 * the first node). Nodes can be appended only if their
306 * predecessors are either already matched or are of the same
307 * mode. If we detect otherwise, then a new node with opposite
308 * mode must have been appended during traversal, so we must
309 * restart at phase 1. The traversal and update steps are
310 * otherwise similar to phase 1: Retrying upon CAS misses and
311 * checking for staleness. In particular, if a self-link is
312 * encountered, then we can safely jump to a node on the list
313 * by continuing the traversal at current head.
315 * On successful append, if the call was ASYNC, return.
317 * 3. Await match or cancellation (method awaitMatch)
319 * Wait for another thread to match node; instead cancelling if
320 * the current thread was interrupted or the wait timed out. On
321 * multiprocessors, we use front-of-queue spinning: If a node
322 * appears to be the first unmatched node in the queue, it
323 * spins a bit before blocking. In either case, before blocking
324 * it tries to unsplice any nodes between the current "head"
325 * and the first unmatched node.
327 * Front-of-queue spinning vastly improves performance of
328 * heavily contended queues. And so long as it is relatively
329 * brief and "quiet", spinning does not much impact performance
330 * of less-contended queues. During spins threads check their
331 * interrupt status and generate a thread-local random number
332 * to decide to occasionally perform a Thread.yield. While
333 * yield has underdefined specs, we assume that might it help,
334 * and will not hurt in limiting impact of spinning on busy
335 * systems. We also use smaller (1/2) spins for nodes that are
336 * not known to be front but whose predecessors have not
337 * blocked -- these "chained" spins avoid artifacts of
338 * front-of-queue rules which otherwise lead to alternating
339 * nodes spinning vs blocking. Further, front threads that
340 * represent phase changes (from data to request node or vice
341 * versa) compared to their predecessors receive additional
342 * chained spins, reflecting longer paths typically required to
343 * unblock threads during phase changes.
346 * ** Unlinking removed interior nodes **
348 * In addition to minimizing garbage retention via self-linking
349 * described above, we also unlink removed interior nodes. These
350 * may arise due to timed out or interrupted waits, or calls to
351 * remove(x) or Iterator.remove. Normally, given a node that was
352 * at one time known to be the predecessor of some node s that is
353 * to be removed, we can unsplice s by CASing the next field of
354 * its predecessor if it still points to s (otherwise s must
355 * already have been removed or is now offlist). But there are two
356 * situations in which we cannot guarantee to make node s
357 * unreachable in this way: (1) If s is the trailing node of list
358 * (i.e., with null next), then it is pinned as the target node
359 * for appends, so can only be removed later after other nodes are
360 * appended. (2) We cannot necessarily unlink s given a
361 * predecessor node that is matched (including the case of being
362 * cancelled): the predecessor may already be unspliced, in which
363 * case some previous reachable node may still point to s.
364 * (For further explanation see Herlihy & Shavit "The Art of
365 * Multiprocessor Programming" chapter 9). Although, in both
366 * cases, we can rule out the need for further action if either s
367 * or its predecessor are (or can be made to be) at, or fall off
368 * from, the head of list.
370 * Without taking these into account, it would be possible for an
371 * unbounded number of supposedly removed nodes to remain
372 * reachable. Situations leading to such buildup are uncommon but
373 * can occur in practice; for example when a series of short timed
374 * calls to poll repeatedly time out but never otherwise fall off
375 * the list because of an untimed call to take at the front of the
378 * When these cases arise, rather than always retraversing the
379 * entire list to find an actual predecessor to unlink (which
380 * won't help for case (1) anyway), we record a conservative
381 * estimate of possible unsplice failures (in "sweepVotes").
382 * We trigger a full sweep when the estimate exceeds a threshold
383 * ("SWEEP_THRESHOLD") indicating the maximum number of estimated
384 * removal failures to tolerate before sweeping through, unlinking
385 * cancelled nodes that were not unlinked upon initial removal.
386 * We perform sweeps by the thread hitting threshold (rather than
387 * background threads or by spreading work to other threads)
388 * because in the main contexts in which removal occurs, the
389 * caller is already timed-out, cancelled, or performing a
390 * potentially O(n) operation (e.g. remove(x)), none of which are
391 * time-critical enough to warrant the overhead that alternatives
392 * would impose on other threads.
394 * Because the sweepVotes estimate is conservative, and because
395 * nodes become unlinked "naturally" as they fall off the head of
396 * the queue, and because we allow votes to accumulate even while
397 * sweeps are in progress, there are typically significantly fewer
398 * such nodes than estimated. Choice of a threshold value
399 * balances the likelihood of wasted effort and contention, versus
400 * providing a worst-case bound on retention of interior nodes in
401 * quiescent queues. The value defined below was chosen
402 * empirically to balance these under various timeout scenarios.
404 * Note that we cannot self-link unlinked interior nodes during
405 * sweeps. However, the associated garbage chains terminate when
406 * some successor ultimately falls off the head of the list and is
410 /** True if on multiprocessor */
411 private static final boolean MP =
412 Runtime.getRuntime().availableProcessors() > 1;
415 * The number of times to spin (with randomly interspersed calls
416 * to Thread.yield) on multiprocessor before blocking when a node
417 * is apparently the first waiter in the queue. See above for
418 * explanation. Must be a power of two. The value is empirically
419 * derived -- it works pretty well across a variety of processors,
420 * numbers of CPUs, and OSes.
422 private static final int FRONT_SPINS = 1 << 7;
425 * The number of times to spin before blocking when a node is
426 * preceded by another node that is apparently spinning. Also
427 * serves as an increment to FRONT_SPINS on phase changes, and as
428 * base average frequency for yielding during spins. Must be a
431 private static final int CHAINED_SPINS = FRONT_SPINS >>> 1;
434 * The maximum number of estimated removal failures (sweepVotes)
435 * to tolerate before sweeping through the queue unlinking
436 * cancelled nodes that were not unlinked upon initial
437 * removal. See above for explanation. The value must be at least
438 * two to avoid useless sweeps when removing trailing nodes.
440 static final int SWEEP_THRESHOLD = 32;
443 * Queue nodes. Uses Object, not E, for items to allow forgetting
444 * them after use. Relies heavily on Unsafe mechanics to minimize
445 * unnecessary ordering constraints: Writes that are intrinsically
446 * ordered wrt other accesses or CASes use simple relaxed forms.
448 static final class Node {
449 final boolean isData; // false if this is a request node
450 volatile Object item; // initially non-null if isData; CASed to match
452 volatile Thread waiter; // null until waiting
454 // CAS methods for fields
455 final boolean casNext(Node cmp, Node val) {
456 return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
459 final boolean casItem(Object cmp, Object val) {
460 // assert cmp == null || cmp.getClass() != Node.class;
461 return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
465 * Constructs a new node. Uses relaxed write because item can
466 * only be seen after publication via casNext.
468 Node(Object item, boolean isData) {
469 UNSAFE.putObject(this, itemOffset, item); // relaxed write
470 this.isData = isData;
474 * Links node to itself to avoid garbage retention. Called
475 * only after CASing head field, so uses relaxed write.
477 final void forgetNext() {
478 UNSAFE.putObject(this, nextOffset, this);
482 * Sets item to self and waiter to null, to avoid garbage
483 * retention after matching or cancelling. Uses relaxed writes
484 * because order is already constrained in the only calling
485 * contexts: item is forgotten only after volatile/atomic
486 * mechanics that extract items. Similarly, clearing waiter
487 * follows either CAS or return from park (if ever parked;
488 * else we don't care).
490 final void forgetContents() {
491 UNSAFE.putObject(this, itemOffset, this);
492 UNSAFE.putObject(this, waiterOffset, null);
496 * Returns true if this node has been matched, including the
497 * case of artificial matches due to cancellation.
499 final boolean isMatched() {
501 return (x == this) || ((x == null) == isData);
505 * Returns true if this is an unmatched request node.
507 final boolean isUnmatchedRequest() {
508 return !isData && item == null;
512 * Returns true if a node with the given mode cannot be
513 * appended to this node because this node is unmatched and
514 * has opposite data mode.
516 final boolean cannotPrecede(boolean haveData) {
519 return d != haveData && (x = item) != this && (x != null) == d;
523 * Tries to artificially match a data node -- used by remove.
525 final boolean tryMatchData() {
528 if (x != null && x != this && casItem(x, null)) {
529 LockSupport.unpark(waiter);
535 private static final long serialVersionUID = -3375979862319811754L;
538 private static final sun.misc.Unsafe UNSAFE;
539 private static final long itemOffset;
540 private static final long nextOffset;
541 private static final long waiterOffset;
544 UNSAFE = sun.misc.Unsafe.getUnsafe();
545 Class k = Node.class;
546 itemOffset = UNSAFE.objectFieldOffset
547 (k.getDeclaredField("item"));
548 nextOffset = UNSAFE.objectFieldOffset
549 (k.getDeclaredField("next"));
550 waiterOffset = UNSAFE.objectFieldOffset
551 (k.getDeclaredField("waiter"));
552 } catch (Exception e) {
558 /** head of the queue; null until first enqueue */
559 transient volatile Node head;
561 /** tail of the queue; null until first append */
562 private transient volatile Node tail;
564 /** The number of apparent failures to unsplice removed nodes */
565 private transient volatile int sweepVotes;
567 // CAS methods for fields
568 private boolean casTail(Node cmp, Node val) {
569 return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
572 private boolean casHead(Node cmp, Node val) {
573 return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
576 private boolean casSweepVotes(int cmp, int val) {
577 return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val);
581 * Possible values for "how" argument in xfer method.
583 private static final int NOW = 0; // for untimed poll, tryTransfer
584 private static final int ASYNC = 1; // for offer, put, add
585 private static final int SYNC = 2; // for transfer, take
586 private static final int TIMED = 3; // for timed poll, tryTransfer
588 @SuppressWarnings("unchecked")
589 static <E> E cast(Object item) {
590 // assert item == null || item.getClass() != Node.class;
595 * Implements all queuing methods. See above for explanation.
597 * @param e the item or null for take
598 * @param haveData true if this is a put, else a take
599 * @param how NOW, ASYNC, SYNC, or TIMED
600 * @param nanos timeout in nanosecs, used only if mode is TIMED
601 * @return an item if matched, else e
602 * @throws NullPointerException if haveData mode but e is null
604 private E xfer(E e, boolean haveData, int how, long nanos) {
605 if (haveData && (e == null))
606 throw new NullPointerException();
607 Node s = null; // the node to append, if needed
610 for (;;) { // restart on append race
612 for (Node h = head, p = h; p != null;) { // find & match first node
613 boolean isData = p.isData;
614 Object item = p.item;
615 if (item != p && (item != null) == isData) { // unmatched
616 if (isData == haveData) // can't match
618 if (p.casItem(item, e)) { // match
619 for (Node q = p; q != h;) {
620 Node n = q.next; // update by 2 unless singleton
621 if (head == h && casHead(h, n == null ? q : n)) {
624 } // advance and retry
625 if ((h = head) == null ||
626 (q = h.next) == null || !q.isMatched())
627 break; // unless slack < 2
629 LockSupport.unpark(p.waiter);
630 return this.<E>cast(item);
634 p = (p != n) ? n : (h = head); // Use head if p offlist
637 if (how != NOW) { // No matches available
639 s = new Node(e, haveData);
640 Node pred = tryAppend(s, haveData);
642 continue retry; // lost race vs opposite mode
644 return awaitMatch(s, pred, e, (how == TIMED), nanos);
646 return e; // not waiting
651 * Tries to append node s as tail.
653 * @param s the node to append
654 * @param haveData true if appending in data mode
655 * @return null on failure due to losing race with append in
656 * different mode, else s's predecessor, or s itself if no
659 private Node tryAppend(Node s, boolean haveData) {
660 for (Node t = tail, p = t;;) { // move p to last node and append
661 Node n, u; // temps for reads of next & tail
662 if (p == null && (p = head) == null) {
663 if (casHead(null, s))
664 return s; // initialize
666 else if (p.cannotPrecede(haveData))
667 return null; // lost race vs opposite mode
668 else if ((n = p.next) != null) // not last; keep traversing
669 p = p != t && t != (u = tail) ? (t = u) : // stale tail
670 (p != n) ? n : null; // restart if off list
671 else if (!p.casNext(null, s))
672 p = p.next; // re-read on CAS failure
674 if (p != t) { // update if slack now >= 2
675 while ((tail != t || !casTail(t, s)) &&
676 (t = tail) != null &&
677 (s = t.next) != null && // advance and retry
678 (s = s.next) != null && s != t);
686 * Spins/yields/blocks until node s is matched or caller gives up.
688 * @param s the waiting node
689 * @param pred the predecessor of s, or s itself if it has no
690 * predecessor, or null if unknown (the null case does not occur
691 * in any current calls but may in possible future extensions)
692 * @param e the comparison value for checking match
693 * @param timed if true, wait only until timeout elapses
694 * @param nanos timeout in nanosecs, used only if timed is true
695 * @return matched item, or e if unmatched on interrupt or timeout
697 private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
698 long lastTime = timed ? System.nanoTime() : 0L;
699 Thread w = Thread.currentThread();
700 int spins = -1; // initialized after first item and cancel checks
701 ThreadLocalRandom randomYields = null; // bound if needed
704 Object item = s.item;
705 if (item != e) { // matched
707 s.forgetContents(); // avoid garbage
708 return this.<E>cast(item);
710 if ((w.isInterrupted() || (timed && nanos <= 0)) &&
711 s.casItem(e, s)) { // cancel
716 if (spins < 0) { // establish spins at/near front
717 if ((spins = spinsFor(pred, s.isData)) > 0)
718 randomYields = ThreadLocalRandom.current();
720 else if (spins > 0) { // spin
722 if (randomYields.nextInt(CHAINED_SPINS) == 0)
723 Thread.yield(); // occasionally yield
725 else if (s.waiter == null) {
726 s.waiter = w; // request unpark then recheck
729 long now = System.nanoTime();
730 if ((nanos -= now - lastTime) > 0)
731 LockSupport.parkNanos(this, nanos);
735 LockSupport.park(this);
741 * Returns spin/yield value for a node with given predecessor and
742 * data mode. See above for explanation.
744 private static int spinsFor(Node pred, boolean haveData) {
745 if (MP && pred != null) {
746 if (pred.isData != haveData) // phase change
747 return FRONT_SPINS + CHAINED_SPINS;
748 if (pred.isMatched()) // probably at front
750 if (pred.waiter == null) // pred apparently spinning
751 return CHAINED_SPINS;
756 /* -------------- Traversal methods -------------- */
759 * Returns the successor of p, or the head node if p.next has been
760 * linked to self, which will only be true if traversing with a
761 * stale pointer that is now off the list.
763 final Node succ(Node p) {
765 return (p == next) ? head : next;
769 * Returns the first unmatched node of the given mode, or null if
770 * none. Used by methods isEmpty, hasWaitingConsumer.
772 private Node firstOfMode(boolean isData) {
773 for (Node p = head; p != null; p = succ(p)) {
775 return (p.isData == isData) ? p : null;
781 * Returns the item in the first unmatched node with isData; or
782 * null if none. Used by peek.
784 private E firstDataItem() {
785 for (Node p = head; p != null; p = succ(p)) {
786 Object item = p.item;
788 if (item != null && item != p)
789 return this.<E>cast(item);
791 else if (item == null)
798 * Traverses and counts unmatched nodes of the given mode.
799 * Used by methods size and getWaitingConsumerCount.
801 private int countOfMode(boolean data) {
803 for (Node p = head; p != null; ) {
804 if (!p.isMatched()) {
805 if (p.isData != data)
807 if (++count == Integer.MAX_VALUE) // saturated
821 final class Itr implements Iterator<E> {
822 private Node nextNode; // next node to return item for
823 private E nextItem; // the corresponding item
824 private Node lastRet; // last returned node, to support remove
825 private Node lastPred; // predecessor to unlink lastRet
828 * Moves to next node after prev, or first node if prev null.
830 private void advance(Node prev) {
832 * To track and avoid buildup of deleted nodes in the face
833 * of calls to both Queue.remove and Itr.remove, we must
834 * include variants of unsplice and sweep upon each
835 * advance: Upon Itr.remove, we may need to catch up links
836 * from lastPred, and upon other removes, we might need to
837 * skip ahead from stale nodes and unsplice deleted ones
838 * found while advancing.
841 Node r, b; // reset lastPred upon possible deletion of lastRet
842 if ((r = lastRet) != null && !r.isMatched())
843 lastPred = r; // next lastPred is old lastRet
844 else if ((b = lastPred) == null || b.isMatched())
845 lastPred = null; // at start of list
847 Node s, n; // help with removal of lastPred.next
848 while ((s = b.next) != null &&
849 s != b && s.isMatched() &&
850 (n = s.next) != null && n != s)
856 for (Node p = prev, s, n;;) {
857 s = (p == null) ? head : p.next;
864 Object item = s.item;
866 if (item != null && item != s) {
867 nextItem = LinkedTransferQueue.<E>cast(item);
872 else if (item == null)
874 // assert s.isMatched();
877 else if ((n = s.next) == null)
892 public final boolean hasNext() {
893 return nextNode != null;
896 public final E next() {
898 if (p == null) throw new NoSuchElementException();
904 public final void remove() {
905 final Node lastRet = this.lastRet;
907 throw new IllegalStateException();
909 if (lastRet.tryMatchData())
910 unsplice(lastPred, lastRet);
914 /* -------------- Removal methods -------------- */
917 * Unsplices (now or later) the given deleted/cancelled node with
918 * the given predecessor.
920 * @param pred a node that was at one time known to be the
921 * predecessor of s, or null or s itself if s is/was at head
922 * @param s the node to be unspliced
924 final void unsplice(Node pred, Node s) {
925 s.forgetContents(); // forget unneeded fields
927 * See above for rationale. Briefly: if pred still points to
928 * s, try to unlink s. If s cannot be unlinked, because it is
929 * trailing node or pred might be unlinked, and neither pred
930 * nor s are head or offlist, add to sweepVotes, and if enough
931 * votes have accumulated, sweep.
933 if (pred != null && pred != s && pred.next == s) {
936 (n != s && pred.casNext(s, n) && pred.isMatched())) {
937 for (;;) { // check if at, or could be, head
939 if (h == pred || h == s || h == null)
940 return; // at head or list empty
946 if (hn != h && casHead(h, hn))
947 h.forgetNext(); // advance head
949 if (pred.next != pred && s.next != s) { // recheck if offlist
950 for (;;) { // sweep now if enough votes
952 if (v < SWEEP_THRESHOLD) {
953 if (casSweepVotes(v, v + 1))
956 else if (casSweepVotes(v, 0)) {
967 * Unlinks matched (typically cancelled) nodes encountered in a
968 * traversal from head.
970 private void sweep() {
971 for (Node p = head, s, n; p != null && (s = p.next) != null; ) {
973 // Unmatched nodes are never self-linked
975 else if ((n = s.next) == null) // trailing node is pinned
977 else if (s == n) // stale
978 // No need to also check for p == s, since that implies s == n
986 * Main implementation of remove(Object)
988 private boolean findAndRemove(Object e) {
990 for (Node pred = null, p = head; p != null; ) {
991 Object item = p.item;
993 if (item != null && item != p && e.equals(item) &&
999 else if (item == null)
1002 if ((p = p.next) == pred) { // stale
1013 * Creates an initially empty {@code LinkedTransferQueue}.
1015 public LinkedTransferQueue() {
1019 * Creates a {@code LinkedTransferQueue}
1020 * initially containing the elements of the given collection,
1021 * added in traversal order of the collection's iterator.
1023 * @param c the collection of elements to initially contain
1024 * @throws NullPointerException if the specified collection or any
1025 * of its elements are null
1027 public LinkedTransferQueue(Collection<? extends E> c) {
1033 * Inserts the specified element at the tail of this queue.
1034 * As the queue is unbounded, this method will never block.
1036 * @throws NullPointerException if the specified element is null
1038 public void put(E e) {
1039 xfer(e, true, ASYNC, 0);
1043 * Inserts the specified element at the tail of this queue.
1044 * As the queue is unbounded, this method will never block or
1045 * return {@code false}.
1047 * @return {@code true} (as specified by
1048 * {@link BlockingQueue#offer(Object,long,TimeUnit) BlockingQueue.offer})
1049 * @throws NullPointerException if the specified element is null
1051 public boolean offer(E e, long timeout, TimeUnit unit) {
1052 xfer(e, true, ASYNC, 0);
1057 * Inserts the specified element at the tail of this queue.
1058 * As the queue is unbounded, this method will never return {@code false}.
1060 * @return {@code true} (as specified by {@link Queue#offer})
1061 * @throws NullPointerException if the specified element is null
1063 public boolean offer(E e) {
1064 xfer(e, true, ASYNC, 0);
1069 * Inserts the specified element at the tail of this queue.
1070 * As the queue is unbounded, this method will never throw
1071 * {@link IllegalStateException} or return {@code false}.
1073 * @return {@code true} (as specified by {@link Collection#add})
1074 * @throws NullPointerException if the specified element is null
1076 public boolean add(E e) {
1077 xfer(e, true, ASYNC, 0);
1082 * Transfers the element to a waiting consumer immediately, if possible.
1084 * <p>More precisely, transfers the specified element immediately
1085 * if there exists a consumer already waiting to receive it (in
1086 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1087 * otherwise returning {@code false} without enqueuing the element.
1089 * @throws NullPointerException if the specified element is null
1091 public boolean tryTransfer(E e) {
1092 return xfer(e, true, NOW, 0) == null;
1096 * Transfers the element to a consumer, waiting if necessary to do so.
1098 * <p>More precisely, transfers the specified element immediately
1099 * if there exists a consumer already waiting to receive it (in
1100 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1101 * else inserts the specified element at the tail of this queue
1102 * and waits until the element is received by a consumer.
1104 * @throws NullPointerException if the specified element is null
1106 public void transfer(E e) throws InterruptedException {
1107 if (xfer(e, true, SYNC, 0) != null) {
1108 Thread.interrupted(); // failure possible only due to interrupt
1109 throw new InterruptedException();
1114 * Transfers the element to a consumer if it is possible to do so
1115 * before the timeout elapses.
1117 * <p>More precisely, transfers the specified element immediately
1118 * if there exists a consumer already waiting to receive it (in
1119 * {@link #take} or timed {@link #poll(long,TimeUnit) poll}),
1120 * else inserts the specified element at the tail of this queue
1121 * and waits until the element is received by a consumer,
1122 * returning {@code false} if the specified wait time elapses
1123 * before the element can be transferred.
1125 * @throws NullPointerException if the specified element is null
1127 public boolean tryTransfer(E e, long timeout, TimeUnit unit)
1128 throws InterruptedException {
1129 if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null)
1131 if (!Thread.interrupted())
1133 throw new InterruptedException();
1136 public E take() throws InterruptedException {
1137 E e = xfer(null, false, SYNC, 0);
1140 Thread.interrupted();
1141 throw new InterruptedException();
1144 public E poll(long timeout, TimeUnit unit) throws InterruptedException {
1145 E e = xfer(null, false, TIMED, unit.toNanos(timeout));
1146 if (e != null || !Thread.interrupted())
1148 throw new InterruptedException();
1152 return xfer(null, false, NOW, 0);
1156 * @throws NullPointerException {@inheritDoc}
1157 * @throws IllegalArgumentException {@inheritDoc}
1159 public int drainTo(Collection<? super E> c) {
1161 throw new NullPointerException();
1163 throw new IllegalArgumentException();
1166 while ( (e = poll()) != null) {
1174 * @throws NullPointerException {@inheritDoc}
1175 * @throws IllegalArgumentException {@inheritDoc}
1177 public int drainTo(Collection<? super E> c, int maxElements) {
1179 throw new NullPointerException();
1181 throw new IllegalArgumentException();
1184 while (n < maxElements && (e = poll()) != null) {
1192 * Returns an iterator over the elements in this queue in proper sequence.
1193 * The elements will be returned in order from first (head) to last (tail).
1195 * <p>The returned iterator is a "weakly consistent" iterator that
1196 * will never throw {@link java.util.ConcurrentModificationException
1197 * ConcurrentModificationException}, and guarantees to traverse
1198 * elements as they existed upon construction of the iterator, and
1199 * may (but is not guaranteed to) reflect any modifications
1200 * subsequent to construction.
1202 * @return an iterator over the elements in this queue in proper sequence
1204 public Iterator<E> iterator() {
1209 return firstDataItem();
1213 * Returns {@code true} if this queue contains no elements.
1215 * @return {@code true} if this queue contains no elements
1217 public boolean isEmpty() {
1218 for (Node p = head; p != null; p = succ(p)) {
1225 public boolean hasWaitingConsumer() {
1226 return firstOfMode(false) != null;
1230 * Returns the number of elements in this queue. If this queue
1231 * contains more than {@code Integer.MAX_VALUE} elements, returns
1232 * {@code Integer.MAX_VALUE}.
1234 * <p>Beware that, unlike in most collections, this method is
1235 * <em>NOT</em> a constant-time operation. Because of the
1236 * asynchronous nature of these queues, determining the current
1237 * number of elements requires an O(n) traversal.
1239 * @return the number of elements in this queue
1242 return countOfMode(true);
1245 public int getWaitingConsumerCount() {
1246 return countOfMode(false);
1250 * Removes a single instance of the specified element from this queue,
1251 * if it is present. More formally, removes an element {@code e} such
1252 * that {@code o.equals(e)}, if this queue contains one or more such
1254 * Returns {@code true} if this queue contained the specified element
1255 * (or equivalently, if this queue changed as a result of the call).
1257 * @param o element to be removed from this queue, if present
1258 * @return {@code true} if this queue changed as a result of the call
1260 public boolean remove(Object o) {
1261 return findAndRemove(o);
1265 * Returns {@code true} if this queue contains the specified element.
1266 * More formally, returns {@code true} if and only if this queue contains
1267 * at least one element {@code e} such that {@code o.equals(e)}.
1269 * @param o object to be checked for containment in this queue
1270 * @return {@code true} if this queue contains the specified element
1272 public boolean contains(Object o) {
1273 if (o == null) return false;
1274 for (Node p = head; p != null; p = succ(p)) {
1275 Object item = p.item;
1277 if (item != null && item != p && o.equals(item))
1280 else if (item == null)
1287 * Always returns {@code Integer.MAX_VALUE} because a
1288 * {@code LinkedTransferQueue} is not capacity constrained.
1290 * @return {@code Integer.MAX_VALUE} (as specified by
1291 * {@link BlockingQueue#remainingCapacity()})
1293 public int remainingCapacity() {
1294 return Integer.MAX_VALUE;
1298 * Saves the state to a stream (that is, serializes it).
1300 * @serialData All of the elements (each an {@code E}) in
1301 * the proper order, followed by a null
1302 * @param s the stream
1304 private void writeObject(java.io.ObjectOutputStream s)
1305 throws java.io.IOException {
1306 s.defaultWriteObject();
1309 // Use trailing null as sentinel
1310 s.writeObject(null);
1314 * Reconstitutes the Queue instance from a stream (that is,
1317 * @param s the stream
1319 private void readObject(java.io.ObjectInputStream s)
1320 throws java.io.IOException, ClassNotFoundException {
1321 s.defaultReadObject();
1323 @SuppressWarnings("unchecked") E item = (E) s.readObject();
1333 private static final sun.misc.Unsafe UNSAFE;
1334 private static final long headOffset;
1335 private static final long tailOffset;
1336 private static final long sweepVotesOffset;
1339 UNSAFE = sun.misc.Unsafe.getUnsafe();
1340 Class k = LinkedTransferQueue.class;
1341 headOffset = UNSAFE.objectFieldOffset
1342 (k.getDeclaredField("head"));
1343 tailOffset = UNSAFE.objectFieldOffset
1344 (k.getDeclaredField("tail"));
1345 sweepVotesOffset = UNSAFE.objectFieldOffset
1346 (k.getDeclaredField("sweepVotes"));
1347 } catch (Exception e) {