From 5120bc853d12a7e4c3624a6b918ae41a26861c7c Mon Sep 17 00:00:00 2001 From: mszabo-wikia Date: Wed, 31 Oct 2018 19:12:46 +0100 Subject: [PATCH] Cleanup old JDK collection compatibility classes Signed-off-by: mszabo-wikia --- .../jersey/internal/util/JdkVersion.java | 7 +- .../collection/ByteBufferInputStream.java | 3 +- .../util/collection/ConcurrentHashMapV8.java | 3541 ----------------- .../util/collection/DataStructures.java | 130 +- .../util/collection/LinkedTransferQueue.java | 1371 ------- .../util/collection/ThreadLocalRandom.java | 212 - .../util/collection/TransferQueue.java | 1 + .../message/internal/MessageBodyFactory.java | 13 +- .../internal/util/JdkVersionCompareTest.java | 35 + .../internal/util/JdkVersionParseTest.java | 52 + .../filtering/EntityGraphProviderImpl.java | 6 +- .../filtering/ServerScopeProvider.java | 4 +- .../filtering/spi/FilteringHelper.java | 4 +- .../mvc/spi/AbstractTemplateProcessor.java | 4 +- 14 files changed, 143 insertions(+), 5240 deletions(-) delete mode 100644 core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ConcurrentHashMapV8.java delete mode 100644 core-common/src/main/java/org/glassfish/jersey/internal/util/collection/LinkedTransferQueue.java delete mode 100644 core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ThreadLocalRandom.java create mode 100644 core-common/src/test/java/org/glassfish/jersey/internal/util/JdkVersionCompareTest.java create mode 100644 core-common/src/test/java/org/glassfish/jersey/internal/util/JdkVersionParseTest.java diff --git a/core-common/src/main/java/org/glassfish/jersey/internal/util/JdkVersion.java b/core-common/src/main/java/org/glassfish/jersey/internal/util/JdkVersion.java index 2905bfe6b2..b36aa08023 100644 --- a/core-common/src/main/java/org/glassfish/jersey/internal/util/JdkVersion.java +++ b/core-common/src/main/java/org/glassfish/jersey/internal/util/JdkVersion.java @@ -74,15 +74,20 @@ public static JdkVersion parseVersion(String versionString) { } final String[] parts = versionString.split("\\.|_"); if (parts.length == 3) { + // e.g. "1.8.0" or "9.0.1" return new JdkVersion(Integer.parseInt(parts[0]), Integer.parseInt(parts[1]), Integer.parseInt(parts[2]), 0); - } else { + } else if (parts.length == 4) { + // e.g. "1.8.0_141" return new JdkVersion(Integer.parseInt(parts[0]), Integer.parseInt(parts[1]), Integer.parseInt(parts[2]), Integer.parseInt(parts[3])); + } else { + // e.g "11" + return new JdkVersion(Integer.parseInt(parts[0]), 0, 0, 0); } } catch (final Exception e) { return UNKNOWN_VERSION; diff --git a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ByteBufferInputStream.java b/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ByteBufferInputStream.java index e9754e34f5..ea8eb4c3c9 100644 --- a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ByteBufferInputStream.java +++ b/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ByteBufferInputStream.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedTransferQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -70,7 +71,7 @@ public final class ByteBufferInputStream extends NonBlockingInputStream { * to be read. */ public ByteBufferInputStream() { - this.buffers = DataStructures.createLinkedTransferQueue(); + this.buffers = new LinkedTransferQueue<>(); this.current = null; } diff --git a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ConcurrentHashMapV8.java b/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ConcurrentHashMapV8.java deleted file mode 100644 index dc435d3de3..0000000000 --- a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ConcurrentHashMapV8.java +++ /dev/null @@ -1,3541 +0,0 @@ -/* - * Copyright (c) 2013, 2018 Oracle and/or its affiliates. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v. 2.0, which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * This Source Code may also be made available under the following Secondary - * Licenses when the conditions for such availability set forth in the - * Eclipse Public License v. 2.0 are satisfied: GNU General Public License, - * version 2 with the GNU Classpath Exception, which is available at - * https://www.gnu.org/software/classpath/license.html. - * - * SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 - */ - -/** - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ -package org.glassfish.jersey.internal.util.collection; - -import java.io.ObjectStreamField; -import java.io.Serializable; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.util.AbstractMap; -import java.util.Arrays; -import java.util.Collection; -import java.util.ConcurrentModificationException; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.LockSupport; -import java.util.concurrent.locks.ReentrantLock; - -/** - * A hash table supporting full concurrency of retrievals and - * high expected concurrency for updates. This class obeys the - * same functional specification as {@link java.util.Hashtable}, and - * includes versions of methods corresponding to each method of - * {@code Hashtable}. However, even though all operations are - * thread-safe, retrieval operations do not entail locking, - * and there is not any support for locking the entire table - * in a way that prevents all access. This class is fully - * interoperable with {@code Hashtable} in programs that rely on its - * thread safety but not on its synchronization details. - * - *

Retrieval operations (including {@code get}) generally do not - * block, so may overlap with update operations (including {@code put} - * and {@code remove}). Retrievals reflect the results of the most - * recently completed update operations holding upon their - * onset. (More formally, an update operation for a given key bears a - * happens-before relation with any (non-null) retrieval for - * that key reporting the updated value.) For aggregate operations - * such as {@code putAll} and {@code clear}, concurrent retrievals may - * reflect insertion or removal of only some entries. Similarly, - * Iterators and Enumerations return elements reflecting the state of - * the hash table at some point at or since the creation of the - * iterator/enumeration. They do not throw {@link - * ConcurrentModificationException}. However, iterators are designed - * to be used by only one thread at a time. Bear in mind that the - * results of aggregate status methods including {@code size}, {@code - * isEmpty}, and {@code containsValue} are typically useful only when - * a map is not undergoing concurrent updates in other threads. - * Otherwise the results of these methods reflect transient states - * that may be adequate for monitoring or estimation purposes, but not - * for program control. - * - *

The table is dynamically expanded when there are too many - * collisions (i.e., keys that have distinct hash codes but fall into - * the same slot modulo the table size), with the expected average - * effect of maintaining roughly two bins per mapping (corresponding - * to a 0.75 load factor threshold for resizing). There may be much - * variance around this average as mappings are added and removed, but - * overall, this maintains a commonly accepted time/space tradeoff for - * hash tables. However, resizing this or any other kind of hash - * table may be a relatively slow operation. When possible, it is a - * good idea to provide a size estimate as an optional {@code - * initialCapacity} constructor argument. An additional optional - * {@code loadFactor} constructor argument provides a further means of - * customizing initial table capacity by specifying the table density - * to be used in calculating the amount of space to allocate for the - * given number of elements. Also, for compatibility with previous - * versions of this class, constructors may optionally specify an - * expected {@code concurrencyLevel} as an additional hint for - * internal sizing. Note that using many keys with exactly the same - * {@code hashCode()} is a sure way to slow down performance of any - * hash table. To ameliorate impact, when keys are {@link Comparable}, - * this class may use comparison order among keys to help break ties. - * - *

A {@link Set} projection of a ConcurrentHashMapV8 may be created - * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed - * (using {@link #keySet(Object)} when only keys are of interest, and the - * mapped values are (perhaps transiently) not used or all take the - * same mapping value. - * - *

This class and its views and iterators implement all of the - * optional methods of the {@link Map} and {@link Iterator} - * interfaces. - * - *

Like {@link java.util.Hashtable} but unlike {@link HashMap}, this class - * does not allow {@code null} to be used as a key or value. - * - *

ConcurrentHashMapV8s support a set of sequential and parallel bulk - * operations that are designed - * to be safely, and often sensibly, applied even with maps that are - * being concurrently updated by other threads; for example, when - * computing a snapshot summary of the values in a shared registry. - * There are three kinds of operation, each with four forms, accepting - * functions with Keys, Values, Entries, and (Key, Value) arguments - * and/or return values. Because the elements of a ConcurrentHashMapV8 - * are not ordered in any particular way, and may be processed in - * different orders in different parallel executions, the correctness - * of supplied functions should not depend on any ordering, or on any - * other objects or values that may transiently change while - * computation is in progress; and except for forEach actions, should - * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry} - * objects do not support method {@code setValue}. - * - *

- * - *

These bulk operations accept a {@code parallelismThreshold} - * argument. Methods proceed sequentially if the current map size is - * estimated to be less than the given threshold. Using a value of - * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value - * of {@code 1} results in maximal parallelism by partitioning into - * enough subtasks to fully utilize the {@code - * ForkJoinPool#commonPool()} that is used for all parallel - * computations. Normally, you would initially choose one of these - * extreme values, and then measure performance of using in-between - * values that trade off overhead versus throughput. - * - *

The concurrency properties of bulk operations follow - * from those of ConcurrentHashMapV8: Any non-null result returned - * from {@code get(key)} and related access methods bears a - * happens-before relation with the associated insertion or - * update. The result of any bulk operation reflects the - * composition of these per-element relations (but is not - * necessarily atomic with respect to the map as a whole unless it - * is somehow known to be quiescent). Conversely, because keys - * and values in the map are never null, null serves as a reliable - * atomic indicator of the current lack of any result. To - * maintain this property, null serves as an implicit basis for - * all non-scalar reduction operations. For the double, long, and - * int versions, the basis should be one that, when combined with - * any other value, returns that other value (more formally, it - * should be the identity element for the reduction). Most common - * reductions have these properties; for example, computing a sum - * with basis 0 or a minimum with basis MAX_VALUE. - * - *

Search and transformation functions provided as arguments - * should similarly return null to indicate the lack of any result - * (in which case it is not used). In the case of mapped - * reductions, this also enables transformations to serve as - * filters, returning null (or, in the case of primitive - * specializations, the identity basis) if the element should not - * be combined. You can create compound transformations and - * filterings by composing them yourself under this "null means - * there is nothing there now" rule before using them in search or - * reduce operations. - * - *

Methods accepting and/or returning Entry arguments maintain - * key-value associations. They may be useful for example when - * finding the key for the greatest value. Note that "plain" Entry - * arguments can be supplied using {@code new - * AbstractMap.SimpleEntry(k,v)}. - * - *

Bulk operations may complete abruptly, throwing an - * exception encountered in the application of a supplied - * function. Bear in mind when handling such exceptions that other - * concurrently executing functions could also have thrown - * exceptions, or would have done so if the first exception had - * not occurred. - * - *

Speedups for parallel compared to sequential forms are common - * but not guaranteed. Parallel operations involving brief functions - * on small maps may execute more slowly than sequential forms if the - * underlying work to parallelize the computation is more expensive - * than the computation itself. Similarly, parallelization may not - * lead to much actual parallelism if all processors are busy - * performing unrelated tasks. - * - *

All arguments to all task methods must be non-null. - * - *

jsr166e note: During transition, this class - * uses nested functional interfaces with different names but the - * same forms as those expected for JDK8. - * - *

This class is a member of the - * - * Java Collections Framework. - * - * @param the type of keys maintained by this map - * @param the type of mapped values - * @author Doug Lea - */ -class ConcurrentHashMapV8 extends AbstractMap - implements ConcurrentMap, Serializable { - private static final long serialVersionUID = 7249069246763182397L; - - /* - * Overview: - * - * The primary design goal of this hash table is to maintain - * concurrent readability (typically method get(), but also - * iterators and related methods) while minimizing update - * contention. Secondary goals are to keep space consumption about - * the same or better than java.util.HashMap, and to support high - * initial insertion rates on an empty table by many threads. - * - * This map usually acts as a binned (bucketed) hash table. Each - * key-value mapping is held in a Node. Most nodes are instances - * of the basic Node class with hash, key, value, and next - * fields. However, various subclasses exist: TreeNodes are - * arranged in balanced trees, not lists. TreeBins hold the roots - * of sets of TreeNodes. ForwardingNodes are placed at the heads - * of bins during resizing. ReservationNodes are used as - * placeholders while establishing values in computeIfAbsent and - * related methods. The types TreeBin, ForwardingNode, and - * ReservationNode do not hold normal user keys, values, or - * hashes, and are readily distinguishable during search etc - * because they have negative hash fields and null key and value - * fields. (These special nodes are either uncommon or transient, - * so the impact of carrying around some unused fields is - * insignificant.) - * - * The table is lazily initialized to a power-of-two size upon the - * first insertion. Each bin in the table normally contains a - * list of Nodes (most often, the list has only zero or one Node). - * Table accesses require volatile/atomic reads, writes, and - * CASes. Because there is no other way to arrange this without - * adding further indirections, we use intrinsics - * (sun.misc.Unsafe) operations. - * - * We use the top (sign) bit of Node hash fields for control - * purposes -- it is available anyway because of addressing - * constraints. Nodes with negative hash fields are specially - * handled or ignored in map methods. - * - * Insertion (via put or its variants) of the first node in an - * empty bin is performed by just CASing it to the bin. This is - * by far the most common case for put operations under most - * key/hash distributions. Other update operations (insert, - * delete, and replace) require locks. We do not want to waste - * the space required to associate a distinct lock object with - * each bin, so instead use the first node of a bin list itself as - * a lock. Locking support for these locks relies on builtin - * "synchronized" monitors. - * - * Using the first node of a list as a lock does not by itself - * suffice though: When a node is locked, any update must first - * validate that it is still the first node after locking it, and - * retry if not. Because new nodes are always appended to lists, - * once a node is first in a bin, it remains first until deleted - * or the bin becomes invalidated (upon resizing). - * - * The main disadvantage of per-bin locks is that other update - * operations on other nodes in a bin list protected by the same - * lock can stall, for example when user equals() or mapping - * functions take a long time. However, statistically, under - * random hash codes, this is not a common problem. Ideally, the - * frequency of nodes in bins follows a Poisson distribution - * (http://en.wikipedia.org/wiki/Poisson_distribution) with a - * parameter of about 0.5 on average, given the resizing threshold - * of 0.75, although with a large variance because of resizing - * granularity. Ignoring variance, the expected occurrences of - * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The - * first values are: - * - * 0: 0.60653066 - * 1: 0.30326533 - * 2: 0.07581633 - * 3: 0.01263606 - * 4: 0.00157952 - * 5: 0.00015795 - * 6: 0.00001316 - * 7: 0.00000094 - * 8: 0.00000006 - * more: less than 1 in ten million - * - * Lock contention probability for two threads accessing distinct - * elements is roughly 1 / (8 * #elements) under random hashes. - * - * Actual hash code distributions encountered in practice - * sometimes deviate significantly from uniform randomness. This - * includes the case when N > (1<<30), so some keys MUST collide. - * Similarly for dumb or hostile usages in which multiple keys are - * designed to have identical hash codes or ones that differs only - * in masked-out high bits. So we use a secondary strategy that - * applies when the number of nodes in a bin exceeds a - * threshold. These TreeBins use a balanced tree to hold nodes (a - * specialized form of red-black trees), bounding search time to - * O(log N). Each search step in a TreeBin is at least twice as - * slow as in a regular list, but given that N cannot exceed - * (1<<64) (before running out of addresses) this bounds search - * steps, lock hold times, etc, to reasonable constants (roughly - * 100 nodes inspected per operation worst case) so long as keys - * are Comparable (which is very common -- String, Long, etc). - * TreeBin nodes (TreeNodes) also maintain the same "next" - * traversal pointers as regular nodes, so can be traversed in - * iterators in the same way. - * - * The table is resized when occupancy exceeds a percentage - * threshold (nominally, 0.75, but see below). Any thread - * noticing an overfull bin may assist in resizing after the - * initiating thread allocates and sets up the replacement - * array. However, rather than stalling, these other threads may - * proceed with insertions etc. The use of TreeBins shields us - * from the worst case effects of overfilling while resizes are in - * progress. Resizing proceeds by transferring bins, one by one, - * from the table to the next table. To enable concurrency, the - * next table must be (incrementally) prefilled with place-holders - * serving as reverse forwarders to the old table. Because we are - * using power-of-two expansion, the elements from each bin must - * either stay at same index, or move with a power of two - * offset. We eliminate unnecessary node creation by catching - * cases where old nodes can be reused because their next fields - * won't change. On average, only about one-sixth of them need - * cloning when a table doubles. The nodes they replace will be - * garbage collectable as soon as they are no longer referenced by - * any reader thread that may be in the midst of concurrently - * traversing table. Upon transfer, the old table bin contains - * only a special forwarding node (with hash field "MOVED") that - * contains the next table as its key. On encountering a - * forwarding node, access and update operations restart, using - * the new table. - * - * Each bin transfer requires its bin lock, which can stall - * waiting for locks while resizing. However, because other - * threads can join in and help resize rather than contend for - * locks, average aggregate waits become shorter as resizing - * progresses. The transfer operation must also ensure that all - * accessible bins in both the old and new table are usable by any - * traversal. This is arranged by proceeding from the last bin - * (table.length - 1) up towards the first. Upon seeing a - * forwarding node, traversals (see class Traverser) arrange to - * move to the new table without revisiting nodes. However, to - * ensure that no intervening nodes are skipped, bin splitting can - * only begin after the associated reverse-forwarders are in - * place. - * - * The traversal scheme also applies to partial traversals of - * ranges of bins (via an alternate Traverser constructor) - * to support partitioned aggregate operations. Also, read-only - * operations give up if ever forwarded to a null table, which - * provides support for shutdown-style clearing, which is also not - * currently implemented. - * - * Lazy table initialization minimizes footprint until first use, - * and also avoids resizings when the first operation is from a - * putAll, constructor with map argument, or deserialization. - * These cases attempt to override the initial capacity settings, - * but harmlessly fail to take effect in cases of races. - * - * The element count is maintained using a specialization of - * LongAdder. We need to incorporate a specialization rather than - * just use a LongAdder in order to access implicit - * contention-sensing that leads to creation of multiple - * CounterCells. The counter mechanics avoid contention on - * updates but can encounter cache thrashing if read too - * frequently during concurrent access. To avoid reading so often, - * resizing under contention is attempted only upon adding to a - * bin already holding two or more nodes. Under uniform hash - * distributions, the probability of this occurring at threshold - * is around 13%, meaning that only about 1 in 8 puts check - * threshold (and after resizing, many fewer do so). - * - * TreeBins use a special form of comparison for search and - * related operations (which is the main reason we cannot use - * existing collections such as TreeMaps). TreeBins contain - * Comparable elements, but may contain others, as well as - * elements that are Comparable but not necessarily Comparable for - * the same T, so we cannot invoke compareTo among them. To handle - * this, the tree is ordered primarily by hash value, then by - * Comparable.compareTo order if applicable. On lookup at a node, - * if elements are not comparable or compare as 0 then both left - * and right children may need to be searched in the case of tied - * hash values. (This corresponds to the full list search that - * would be necessary if all elements were non-Comparable and had - * tied hashes.) On insertion, to keep a total ordering (or as - * close as is required here) across rebalancings, we compare - * classes and identityHashCodes as tie-breakers. The red-black - * balancing code is updated from pre-jdk-collections - * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) - * based in turn on Cormen, Leiserson, and Rivest "Introduction to - * Algorithms" (CLR). - * - * TreeBins also require an additional locking mechanism. While - * list traversal is always possible by readers even during - * updates, tree traversal is not, mainly because of tree-rotations - * that may change the root node and/or its linkages. TreeBins - * include a simple read-write lock mechanism parasitic on the - * main bin-synchronization strategy: Structural adjustments - * associated with an insertion or removal are already bin-locked - * (and so cannot conflict with other writers) but must wait for - * ongoing readers to finish. Since there can be only one such - * waiter, we use a simple scheme using a single "waiter" field to - * block writers. However, readers need never block. If the root - * lock is held, they proceed along the slow traversal path (via - * next-pointers) until the lock becomes available or the list is - * exhausted, whichever comes first. These cases are not fast, but - * maximize aggregate expected throughput. - * - * Maintaining API and serialization compatibility with previous - * versions of this class introduces several oddities. Mainly: We - * leave untouched but unused constructor arguments refering to - * concurrencyLevel. We accept a loadFactor constructor argument, - * but apply it only to initial table capacity (which is the only - * time that we can guarantee to honor it.) We also declare an - * unused "Segment" class that is instantiated in minimal form - * only when serializing. - * - * Also, solely for compatibility with previous versions of this - * class, it extends AbstractMap, even though all of its methods - * are overridden, so it is just useless baggage. - * - * This file is organized to make things a little easier to follow - * while reading than they might otherwise: First the main static - * declarations and utilities, then fields, then main public - * methods (with a few factorings of multiple public methods into - * internal ones), then sizing methods, trees, traversers, and - * bulk operations. - */ - - /* ---------------- Constants -------------- */ - - /** - * The largest possible table capacity. This value must be - * exactly 1<<30 to stay within Java array allocation and indexing - * bounds for power of two table sizes, and is further required - * because the top two bits of 32bit hash fields are used for - * control purposes. - */ - private static final int MAXIMUM_CAPACITY = 1 << 30; - - /** - * The default initial table capacity. Must be a power of 2 - * (i.e., at least 1) and at most MAXIMUM_CAPACITY. - */ - private static final int DEFAULT_CAPACITY = 16; - - /** - * The largest possible (non-power of two) array size. - * Needed by toArray and related methods. - */ - static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; - - /** - * The default concurrency level for this table. Unused but - * defined for compatibility with previous versions of this class. - */ - private static final int DEFAULT_CONCURRENCY_LEVEL = 16; - - /** - * The load factor for this table. Overrides of this value in - * constructors affect only the initial table capacity. The - * actual floating point value isn't normally used -- it is - * simpler to use expressions such as {@code n - (n >>> 2)} for - * the associated resizing threshold. - */ - private static final float LOAD_FACTOR = 0.75f; - - /** - * The bin count threshold for using a tree rather than list for a - * bin. Bins are converted to trees when adding an element to a - * bin with at least this many nodes. The value must be greater - * than 2, and should be at least 8 to mesh with assumptions in - * tree removal about conversion back to plain bins upon - * shrinkage. - */ - static final int TREEIFY_THRESHOLD = 8; - - /** - * The bin count threshold for untreeifying a (split) bin during a - * resize operation. Should be less than TREEIFY_THRESHOLD, and at - * most 6 to mesh with shrinkage detection under removal. - */ - static final int UNTREEIFY_THRESHOLD = 6; - - /** - * The smallest table capacity for which bins may be treeified. - * (Otherwise the table is resized if too many nodes in a bin.) - * The value should be at least 4 * TREEIFY_THRESHOLD to avoid - * conflicts between resizing and treeification thresholds. - */ - static final int MIN_TREEIFY_CAPACITY = 64; - - /** - * Minimum number of rebinnings per transfer step. Ranges are - * subdivided to allow multiple resizer threads. This value - * serves as a lower bound to avoid resizers encountering - * excessive memory contention. The value should be at least - * DEFAULT_CAPACITY. - */ - private static final int MIN_TRANSFER_STRIDE = 16; - - /* - * Encodings for Node hash fields. See above for explanation. - */ - static final int MOVED = -1; // hash for forwarding nodes - static final int TREEBIN = -2; // hash for roots of trees - static final int RESERVED = -3; // hash for transient reservations - static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash - - /** - * Number of CPUS, to place bounds on some sizings - */ - static final int NCPU = Runtime.getRuntime().availableProcessors(); - - /** - * For serialization compatibility. - */ - private static final ObjectStreamField[] serialPersistentFields = { - new ObjectStreamField("segments", Segment[].class), - new ObjectStreamField("segmentMask", Integer.TYPE), - new ObjectStreamField("segmentShift", Integer.TYPE) - }; - - /* ---------------- Nodes -------------- */ - - /** - * Key-value entry. This class is never exported out as a - * user-mutable Map.Entry (i.e., one supporting setValue; see - * MapEntry below), but can be used for read-only traversals used - * in bulk tasks. Subclasses of Node with a negative hash field - * are special, and contain null keys and values (but are never - * exported). Otherwise, keys and vals are never null. - */ - static class Node implements Map.Entry { - final int hash; - final K key; - volatile V val; - volatile Node next; - - Node(final int hash, final K key, final V val, final Node next) { - this.hash = hash; - this.key = key; - this.val = val; - this.next = next; - } - - public final K getKey() { - return key; - } - - public final V getValue() { - return val; - } - - public final int hashCode() { - return key.hashCode() ^ val.hashCode(); - } - - public final String toString() { - return key + "=" + val; - } - - public final V setValue(final V value) { - throw new UnsupportedOperationException(); - } - - public final boolean equals(final Object o) { - final Object k; - final Object v; - final Object u; - final Map.Entry e; - return ((o instanceof Map.Entry) && - (k = (e = (Map.Entry) o).getKey()) != null && - (v = e.getValue()) != null && - (k == key || k.equals(key)) && - (v == (u = val) || v.equals(u))); - } - - /** - * Virtualized support for map.get(); overridden in subclasses. - */ - Node find(final int h, final Object k) { - Node e = this; - if (k != null) { - do { - final K ek; - if (e.hash == h && - ((ek = e.key) == k || (ek != null && k.equals(ek)))) - return e; - } while ((e = e.next) != null); - } - return null; - } - } - - /* ---------------- Static utilities -------------- */ - - /** - * Spreads (XORs) higher bits of hash to lower and also forces top - * bit to 0. Because the table uses power-of-two masking, sets of - * hashes that vary only in bits above the current mask will - * always collide. (Among known examples are sets of Float keys - * holding consecutive whole numbers in small tables.) So we - * apply a transform that spreads the impact of higher bits - * downward. There is a tradeoff between speed, utility, and - * quality of bit-spreading. Because many common sets of hashes - * are already reasonably distributed (so don't benefit from - * spreading), and because we use trees to handle large sets of - * collisions in bins, we just XOR some shifted bits in the - * cheapest possible way to reduce systematic lossage, as well as - * to incorporate impact of the highest bits that would otherwise - * never be used in index calculations because of table bounds. - */ - static final int spread(final int h) { - return (h ^ (h >>> 16)) & HASH_BITS; - } - - /** - * Returns a power of two table size for the given desired capacity. - * See Hackers Delight, sec 3.2 - */ - private static final int tableSizeFor(final int c) { - int n = c - 1; - n |= n >>> 1; - n |= n >>> 2; - n |= n >>> 4; - n |= n >>> 8; - n |= n >>> 16; - return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; - } - - /** - * Returns x's Class if it is of the form "class C implements - * Comparable", else null. - */ - static Class comparableClassFor(final Object x) { - if (x instanceof Comparable) { - final Class c; - final Type[] ts; - Type[] as; - Type t; - ParameterizedType p; - if ((c = x.getClass()) == String.class) // bypass checks - return c; - if ((ts = c.getGenericInterfaces()) != null) { - for (int i = 0; i < ts.length; ++i) { - if (((t = ts[i]) instanceof ParameterizedType) && - ((p = (ParameterizedType) t).getRawType() == - Comparable.class) && - (as = p.getActualTypeArguments()) != null && - as.length == 1 && as[0] == c) // type arg is c - return c; - } - } - } - return null; - } - - /** - * Returns k.compareTo(x) if x matches kc (k's screened comparable - * class), else 0. - */ - @SuppressWarnings({"rawtypes", "unchecked"}) // for cast to Comparable - static int compareComparables(final Class kc, final Object k, final Object x) { - return (x == null || x.getClass() != kc ? 0 : - ((Comparable) k).compareTo(x)); - } - - /* ---------------- Table element access -------------- */ - - /* - * Volatile access methods are used for table elements as well as - * elements of in-progress next table while resizing. All uses of - * the tab arguments must be null checked by callers. All callers - * also paranoically precheck that tab's length is not zero (or an - * equivalent check), thus ensuring that any index argument taking - * the form of a hash value anded with (length - 1) is a valid - * index. Note that, to be correct wrt arbitrary concurrency - * errors by users, these checks must operate on local variables, - * which accounts for some odd-looking inline assignments below. - * Note that calls to setTabAt always occur within locked regions, - * and so in principle require only release ordering, not need - * full volatile semantics, but are currently coded as volatile - * writes to be conservative. - */ - - @SuppressWarnings("unchecked") - static final Node tabAt(final Node[] tab, final int i) { - return (Node) U.getObjectVolatile(tab, ((long) i << ASHIFT) + ABASE); - } - - static final boolean casTabAt(final Node[] tab, final int i, - final Node c, final Node v) { - return U.compareAndSwapObject(tab, ((long) i << ASHIFT) + ABASE, c, v); - } - - static final void setTabAt(final Node[] tab, final int i, final Node v) { - U.putObjectVolatile(tab, ((long) i << ASHIFT) + ABASE, v); - } - - /* ---------------- Fields -------------- */ - - /** - * The array of bins. Lazily initialized upon first insertion. - * Size is always a power of two. Accessed directly by iterators. - */ - transient volatile Node[] table; - - /** - * The next table to use; non-null only while resizing. - */ - private transient volatile Node[] nextTable; - - /** - * Base counter value, used mainly when there is no contention, - * but also as a fallback during table initialization - * races. Updated via CAS. - */ - private transient volatile long baseCount; - - /** - * Table initialization and resizing control. When negative, the - * table is being initialized or resized: -1 for initialization, - * else -(1 + the number of active resizing threads). Otherwise, - * when table is null, holds the initial table size to use upon - * creation, or 0 for default. After initialization, holds the - * next element count value upon which to resize the table. - */ - private transient volatile int sizeCtl; - - /** - * The next table index (plus one) to split while resizing. - */ - private transient volatile int transferIndex; - - /** - * The least available table index to split while resizing. - */ - private transient volatile int transferOrigin; - - /** - * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. - */ - private transient volatile int cellsBusy; - - /** - * Table of counter cells. When non-null, size is a power of 2. - */ - private transient volatile CounterCell[] counterCells; - - // views - private transient KeySetView keySet; - private transient ValuesView values; - private transient EntrySetView entrySet; - - - /* ---------------- Public operations -------------- */ - - /** - * Creates a new, empty map with the default initial table size (16). - */ - ConcurrentHashMapV8() { - } - - /** - * Creates a new, empty map with an initial table size - * accommodating the specified number of elements without the need - * to dynamically resize. - * - * @param initialCapacity The implementation performs internal - * sizing to accommodate this many elements. - * @throws IllegalArgumentException if the initial capacity of - * elements is negative - */ - ConcurrentHashMapV8(final int initialCapacity) { - if (initialCapacity < 0) - throw new IllegalArgumentException(); - final int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? - MAXIMUM_CAPACITY : - tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); - this.sizeCtl = cap; - } - - /** - * Creates a new map with the same mappings as the given map. - * - * @param m the map - */ - ConcurrentHashMapV8(final Map m) { - this.sizeCtl = DEFAULT_CAPACITY; - putAll(m); - } - - /** - * Creates a new, empty map with an initial table size based on - * the given number of elements ({@code initialCapacity}) and - * initial table density ({@code loadFactor}). - * - * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements, - * given the specified load factor. - * @param loadFactor the load factor (table density) for - * establishing the initial table size - * @throws IllegalArgumentException if the initial capacity of - * elements is negative or the load factor is nonpositive - * @since 1.6 - */ - ConcurrentHashMapV8(final int initialCapacity, final float loadFactor) { - this(initialCapacity, loadFactor, 1); - } - - /** - * Creates a new, empty map with an initial table size based on - * the given number of elements ({@code initialCapacity}), table - * density ({@code loadFactor}), and number of concurrently - * updating threads ({@code concurrencyLevel}). - * - * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements, - * given the specified load factor. - * @param loadFactor the load factor (table density) for - * establishing the initial table size - * @param concurrencyLevel the estimated number of concurrently - * updating threads. The implementation may use this value as - * a sizing hint. - * @throws IllegalArgumentException if the initial capacity is - * negative or the load factor or concurrencyLevel are - * nonpositive - */ - ConcurrentHashMapV8(int initialCapacity, - final float loadFactor, final int concurrencyLevel) { - if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) - throw new IllegalArgumentException(); - if (initialCapacity < concurrencyLevel) // Use at least as many bins - initialCapacity = concurrencyLevel; // as estimated threads - final long size = (long) (1.0 + (long) initialCapacity / loadFactor); - final int cap = (size >= (long) MAXIMUM_CAPACITY) ? - MAXIMUM_CAPACITY : tableSizeFor((int) size); - this.sizeCtl = cap; - } - - // Original (since JDK1.2) Map methods - - /** - * {@inheritDoc} - */ - public int size() { - final long n = sumCount(); - return ((n < 0L) ? 0 : - (n > (long) Integer.MAX_VALUE) ? Integer.MAX_VALUE : - (int) n); - } - - /** - * {@inheritDoc} - */ - public boolean isEmpty() { - return sumCount() <= 0L; // ignore transient negative values - } - - /** - * Returns the value to which the specified key is mapped, - * or {@code null} if this map contains no mapping for the key. - * - *

More formally, if this map contains a mapping from a key - * {@code k} to a value {@code v} such that {@code key.equals(k)}, - * then this method returns {@code v}; otherwise it returns - * {@code null}. (There can be at most one such mapping.) - * - * @throws NullPointerException if the specified key is null - */ - public V get(final Object key) { - final Node[] tab; - Node e; - final Node p; - final int n; - final int eh; - K ek; - final int h = spread(key.hashCode()); - if ((tab = table) != null && (n = tab.length) > 0 && - (e = tabAt(tab, (n - 1) & h)) != null) { - if ((eh = e.hash) == h) { - if ((ek = e.key) == key || (ek != null && key.equals(ek))) - return e.val; - } else if (eh < 0) - return (p = e.find(h, key)) != null ? p.val : null; - while ((e = e.next) != null) { - if (e.hash == h && - ((ek = e.key) == key || (ek != null && key.equals(ek)))) - return e.val; - } - } - return null; - } - - /** - * Tests if the specified object is a key in this table. - * - * @param key possible key - * @return {@code true} if and only if the specified object - * is a key in this table, as determined by the - * {@code equals} method; {@code false} otherwise - * @throws NullPointerException if the specified key is null - */ - public boolean containsKey(final Object key) { - return get(key) != null; - } - - /** - * Returns {@code true} if this map maps one or more keys to the - * specified value. Note: This method may require a full traversal - * of the map, and is much slower than method {@code containsKey}. - * - * @param value value whose presence in this map is to be tested - * @return {@code true} if this map maps one or more keys to the - * specified value - * @throws NullPointerException if the specified value is null - */ - public boolean containsValue(final Object value) { - if (value == null) - throw new NullPointerException(); - final Node[] t; - if ((t = table) != null) { - final Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - final V v; - if ((v = p.val) == value || (v != null && value.equals(v))) - return true; - } - } - return false; - } - - /** - * Maps the specified key to the specified value in this table. - * Neither the key nor the value can be null. - * - *

The value can be retrieved by calling the {@code get} method - * with a key that is equal to the original key. - * - * @param key key with which the specified value is to be associated - * @param value value to be associated with the specified key - * @return the previous value associated with {@code key}, or - * {@code null} if there was no mapping for {@code key} - * @throws NullPointerException if the specified key or value is null - */ - public V put(final K key, final V value) { - return putVal(key, value, false); - } - - /** - * Implementation for put and putIfAbsent - */ - final V putVal(final K key, final V value, final boolean onlyIfAbsent) { - if (key == null || value == null) throw new NullPointerException(); - final int hash = spread(key.hashCode()); - int binCount = 0; - for (Node[] tab = table; ; ) { - final Node f; - final int n; - final int i; - final int fh; - if (tab == null || (n = tab.length) == 0) - tab = initTable(); - else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { - if (casTabAt(tab, i, null, - new Node(hash, key, value, null))) - break; // no lock when adding to empty bin - } else if ((fh = f.hash) == MOVED) - tab = helpTransfer(tab, f); - else { - V oldVal = null; - synchronized (f) { - if (tabAt(tab, i) == f) { - if (fh >= 0) { - binCount = 1; - for (Node e = f; ; ++binCount) { - final K ek; - if (e.hash == hash && - ((ek = e.key) == key || - (ek != null && key.equals(ek)))) { - oldVal = e.val; - if (!onlyIfAbsent) - e.val = value; - break; - } - final Node pred = e; - if ((e = e.next) == null) { - pred.next = new Node(hash, key, - value, null); - break; - } - } - } else if (f instanceof TreeBin) { - final Node p; - binCount = 2; - if ((p = ((TreeBin) f).putTreeVal(hash, key, - value)) != null) { - oldVal = p.val; - if (!onlyIfAbsent) - p.val = value; - } - } - } - } - if (binCount != 0) { - if (binCount >= TREEIFY_THRESHOLD) - treeifyBin(tab, i); - if (oldVal != null) - return oldVal; - break; - } - } - } - addCount(1L, binCount); - return null; - } - - /** - * Copies all of the mappings from the specified map to this one. - * These mappings replace any mappings that this map had for any of the - * keys currently in the specified map. - * - * @param m mappings to be stored in this map - */ - public void putAll(final Map m) { - tryPresize(m.size()); - for (final Map.Entry e : m.entrySet()) - putVal(e.getKey(), e.getValue(), false); - } - - /** - * Removes the key (and its corresponding value) from this map. - * This method does nothing if the key is not in the map. - * - * @param key the key that needs to be removed - * @return the previous value associated with {@code key}, or - * {@code null} if there was no mapping for {@code key} - * @throws NullPointerException if the specified key is null - */ - public V remove(final Object key) { - return replaceNode(key, null, null); - } - - /** - * Implementation for the four public remove/replace methods: - * Replaces node value with v, conditional upon match of cv if - * non-null. If resulting value is null, delete. - */ - final V replaceNode(final Object key, final V value, final Object cv) { - final int hash = spread(key.hashCode()); - for (Node[] tab = table; ; ) { - final Node f; - final int n; - final int i; - final int fh; - if (tab == null || (n = tab.length) == 0 || - (f = tabAt(tab, i = (n - 1) & hash)) == null) - break; - else if ((fh = f.hash) == MOVED) - tab = helpTransfer(tab, f); - else { - V oldVal = null; - boolean validated = false; - synchronized (f) { - if (tabAt(tab, i) == f) { - if (fh >= 0) { - validated = true; - for (Node e = f, pred = null; ; ) { - final K ek; - if (e.hash == hash && - ((ek = e.key) == key || - (ek != null && key.equals(ek)))) { - final V ev = e.val; - if (cv == null || cv == ev || - (ev != null && cv.equals(ev))) { - oldVal = ev; - if (value != null) - e.val = value; - else if (pred != null) - pred.next = e.next; - else - setTabAt(tab, i, e.next); - } - break; - } - pred = e; - if ((e = e.next) == null) - break; - } - } else if (f instanceof TreeBin) { - validated = true; - final TreeBin t = (TreeBin) f; - final TreeNode r; - final TreeNode p; - if ((r = t.root) != null && - (p = r.findTreeNode(hash, key, null)) != null) { - final V pv = p.val; - if (cv == null || cv == pv || - (pv != null && cv.equals(pv))) { - oldVal = pv; - if (value != null) - p.val = value; - else if (t.removeTreeNode(p)) - setTabAt(tab, i, untreeify(t.first)); - } - } - } - } - } - if (validated) { - if (oldVal != null) { - if (value == null) - addCount(-1L, -1); - return oldVal; - } - break; - } - } - } - return null; - } - - /** - * Removes all of the mappings from this map. - */ - public void clear() { - long delta = 0L; // negative number of deletions - int i = 0; - Node[] tab = table; - while (tab != null && i < tab.length) { - final int fh; - final Node f = tabAt(tab, i); - if (f == null) - ++i; - else if ((fh = f.hash) == MOVED) { - tab = helpTransfer(tab, f); - i = 0; // restart - } else { - synchronized (f) { - if (tabAt(tab, i) == f) { - Node p = (fh >= 0 ? f : - (f instanceof TreeBin) ? - ((TreeBin) f).first : null); - while (p != null) { - --delta; - p = p.next; - } - setTabAt(tab, i++, null); - } - } - } - } - if (delta != 0L) - addCount(delta, -1); - } - - /** - * Returns a {@link Set} view of the keys contained in this map. - * The set is backed by the map, so changes to the map are - * reflected in the set, and vice-versa. The set supports element - * removal, which removes the corresponding mapping from this map, - * via the {@code Iterator.remove}, {@code Set.remove}, - * {@code removeAll}, {@code retainAll}, and {@code clear} - * operations. It does not support the {@code add} or - * {@code addAll} operations. - * - *

The view's {@code iterator} is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. - * - * @return the set view - */ - public KeySetView keySet() { - final KeySetView ks; - return (ks = keySet) != null ? ks : (keySet = new KeySetView(this, null)); - } - - /** - * Returns a {@link Collection} view of the values contained in this map. - * The collection is backed by the map, so changes to the map are - * reflected in the collection, and vice-versa. The collection - * supports element removal, which removes the corresponding - * mapping from this map, via the {@code Iterator.remove}, - * {@code Collection.remove}, {@code removeAll}, - * {@code retainAll}, and {@code clear} operations. It does not - * support the {@code add} or {@code addAll} operations. - * - *

The view's {@code iterator} is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. - * - * @return the collection view - */ - public Collection values() { - final ValuesView vs; - return (vs = values) != null ? vs : (values = new ValuesView(this)); - } - - /** - * Returns a {@link Set} view of the mappings contained in this map. - * The set is backed by the map, so changes to the map are - * reflected in the set, and vice-versa. The set supports element - * removal, which removes the corresponding mapping from the map, - * via the {@code Iterator.remove}, {@code Set.remove}, - * {@code removeAll}, {@code retainAll}, and {@code clear} - * operations. - * - *

The view's {@code iterator} is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. - * - * @return the set view - */ - public Set> entrySet() { - final EntrySetView es; - return (es = entrySet) != null ? es : (entrySet = new EntrySetView(this)); - } - - /** - * Returns the hash code value for this {@link Map}, i.e., - * the sum of, for each key-value pair in the map, - * {@code key.hashCode() ^ value.hashCode()}. - * - * @return the hash code value for this map - */ - public int hashCode() { - int h = 0; - final Node[] t; - if ((t = table) != null) { - final Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) - h += p.key.hashCode() ^ p.val.hashCode(); - } - return h; - } - - /** - * Returns a string representation of this map. The string - * representation consists of a list of key-value mappings (in no - * particular order) enclosed in braces ("{@code {}}"). Adjacent - * mappings are separated by the characters {@code ", "} (comma - * and space). Each key-value mapping is rendered as the key - * followed by an equals sign ("{@code =}") followed by the - * associated value. - * - * @return a string representation of this map - */ - public String toString() { - final Node[] t; - final int f = (t = table) == null ? 0 : t.length; - final Traverser it = new Traverser(t, f, 0, f); - final StringBuilder sb = new StringBuilder(); - sb.append('{'); - Node p; - if ((p = it.advance()) != null) { - for (; ; ) { - final K k = p.key; - final V v = p.val; - sb.append(k == this ? "(this Map)" : k); - sb.append('='); - sb.append(v == this ? "(this Map)" : v); - if ((p = it.advance()) == null) - break; - sb.append(',').append(' '); - } - } - return sb.append('}').toString(); - } - - /** - * Compares the specified object with this map for equality. - * Returns {@code true} if the given object is a map with the same - * mappings as this map. This operation may return misleading - * results if either map is concurrently modified during execution - * of this method. - * - * @param o object to be compared for equality with this map - * @return {@code true} if the specified object is equal to this map - */ - public boolean equals(final Object o) { - if (o != this) { - if (!(o instanceof Map)) - return false; - final Map m = (Map) o; - final Node[] t; - final int f = (t = table) == null ? 0 : t.length; - final Traverser it = new Traverser(t, f, 0, f); - for (Node p; (p = it.advance()) != null; ) { - final V val = p.val; - final Object v = m.get(p.key); - if (v == null || (v != val && !v.equals(val))) - return false; - } - for (final Map.Entry e : m.entrySet()) { - final Object mk; - final Object mv; - final Object v; - if ((mk = e.getKey()) == null || - (mv = e.getValue()) == null || - (v = get(mk)) == null || - (mv != v && !mv.equals(v))) - return false; - } - } - return true; - } - - /** - * Stripped-down version of helper class used in previous version, - * declared for the sake of serialization compatibility - */ - static class Segment extends ReentrantLock implements Serializable { - private static final long serialVersionUID = 2249069246763182397L; - final float loadFactor; - - Segment(final float lf) { - this.loadFactor = lf; - } - } - - /** - * Saves the state of the {@code ConcurrentHashMapV8} instance to a - * stream (i.e., serializes it). - * - * @param s the stream - * @throws java.io.IOException if an I/O error occurs - * @serialData the key (Object) and value (Object) - * for each key-value mapping, followed by a null pair. - * The key-value mappings are emitted in no particular order. - */ - private void writeObject(final java.io.ObjectOutputStream s) - throws java.io.IOException { - // For serialization compatibility - // Emulate segment calculation from previous version of this class - int sshift = 0; - int ssize = 1; - while (ssize < DEFAULT_CONCURRENCY_LEVEL) { - ++sshift; - ssize <<= 1; - } - final int segmentShift = 32 - sshift; - final int segmentMask = ssize - 1; - @SuppressWarnings("unchecked") Segment[] segments = (Segment[]) - new Segment[DEFAULT_CONCURRENCY_LEVEL]; - for (int i = 0; i < segments.length; ++i) - segments[i] = new Segment(LOAD_FACTOR); - s.putFields().put("segments", segments); - s.putFields().put("segmentShift", segmentShift); - s.putFields().put("segmentMask", segmentMask); - s.writeFields(); - - final Node[] t; - if ((t = table) != null) { - final Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - s.writeObject(p.key); - s.writeObject(p.val); - } - } - s.writeObject(null); - s.writeObject(null); - segments = null; // throw away - } - - /** - * Reconstitutes the instance from a stream (that is, deserializes it). - * - * @param s the stream - * @throws ClassNotFoundException if the class of a serialized object - * could not be found - * @throws java.io.IOException if an I/O error occurs - */ - private void readObject(final java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - /* - * To improve performance in typical cases, we create nodes - * while reading, then place in table once size is known. - * However, we must also validate uniqueness and deal with - * overpopulated bins while doing so, which requires - * specialized versions of putVal mechanics. - */ - sizeCtl = -1; // force exclusion for table construction - s.defaultReadObject(); - long size = 0L; - Node p = null; - for (; ; ) { - @SuppressWarnings("unchecked") final K k = (K) s.readObject(); - @SuppressWarnings("unchecked") final V v = (V) s.readObject(); - if (k != null && v != null) { - p = new Node(spread(k.hashCode()), k, v, p); - ++size; - } else - break; - } - if (size == 0L) - sizeCtl = 0; - else { - final int n; - if (size >= (long) (MAXIMUM_CAPACITY >>> 1)) - n = MAXIMUM_CAPACITY; - else { - final int sz = (int) size; - n = tableSizeFor(sz + (sz >>> 1) + 1); - } - @SuppressWarnings({"rawtypes", "unchecked"}) final - Node[] tab = (Node[]) new Node[n]; - final int mask = n - 1; - long added = 0L; - while (p != null) { - boolean insertAtFront; - final Node next = p.next; - final Node first; - final int h = p.hash; - final int j = h & mask; - if ((first = tabAt(tab, j)) == null) - insertAtFront = true; - else { - final K k = p.key; - if (first.hash < 0) { - final TreeBin t = (TreeBin) first; - if (t.putTreeVal(h, k, p.val) == null) - ++added; - insertAtFront = false; - } else { - int binCount = 0; - insertAtFront = true; - Node q; - K qk; - for (q = first; q != null; q = q.next) { - if (q.hash == h && - ((qk = q.key) == k || - (qk != null && k.equals(qk)))) { - insertAtFront = false; - break; - } - ++binCount; - } - if (insertAtFront && binCount >= TREEIFY_THRESHOLD) { - insertAtFront = false; - ++added; - p.next = first; - TreeNode hd = null, tl = null; - for (q = p; q != null; q = q.next) { - final TreeNode t = new TreeNode - (q.hash, q.key, q.val, null, null); - if ((t.prev = tl) == null) - hd = t; - else - tl.next = t; - tl = t; - } - setTabAt(tab, j, new TreeBin(hd)); - } - } - } - if (insertAtFront) { - ++added; - p.next = first; - setTabAt(tab, j, p); - } - p = next; - } - table = tab; - sizeCtl = n - (n >>> 2); - baseCount = added; - } - } - - // ConcurrentMap methods - - /** - * {@inheritDoc} - * - * @return the previous value associated with the specified key, - * or {@code null} if there was no mapping for the key - * @throws NullPointerException if the specified key or value is null - */ - public V putIfAbsent(final K key, final V value) { - return putVal(key, value, true); - } - - /** - * {@inheritDoc} - * - * @throws NullPointerException if the specified key is null - */ - public boolean remove(final Object key, final Object value) { - if (key == null) - throw new NullPointerException(); - return value != null && replaceNode(key, null, value) != null; - } - - /** - * {@inheritDoc} - * - * @throws NullPointerException if any of the arguments are null - */ - public boolean replace(final K key, final V oldValue, final V newValue) { - if (key == null || oldValue == null || newValue == null) - throw new NullPointerException(); - return replaceNode(key, newValue, oldValue) != null; - } - - /** - * {@inheritDoc} - * - * @return the previous value associated with the specified key, - * or {@code null} if there was no mapping for the key - * @throws NullPointerException if the specified key or value is null - */ - public V replace(final K key, final V value) { - if (key == null || value == null) - throw new NullPointerException(); - return replaceNode(key, value, null); - } - - // Overrides of JDK8+ Map extension method defaults - - /** - * Returns the value to which the specified key is mapped, or the - * given default value if this map contains no mapping for the - * key. - * - * @param key the key whose associated value is to be returned - * @param defaultValue the value to return if this map contains - * no mapping for the given key - * @return the mapping for the key, if present; else the default value - * @throws NullPointerException if the specified key is null - */ - public V getOrDefault(final Object key, final V defaultValue) { - final V v; - return (v = get(key)) == null ? defaultValue : v; - } - - - // Hashtable legacy methods - - /** - * Legacy method testing if some key maps into the specified value - * in this table. This method is identical in functionality to - * {@link #containsValue(Object)}, and exists solely to ensure - * full compatibility with class {@link java.util.Hashtable}, - * which supported this method prior to introduction of the - * Java Collections framework. - * - * @param value a value to search for - * @return {@code true} if and only if some key maps to the - * {@code value} argument in this table as - * determined by the {@code equals} method; - * {@code false} otherwise - * @throws NullPointerException if the specified value is null - */ - @Deprecated - public boolean contains(final Object value) { - return containsValue(value); - } - - /** - * Returns an enumeration of the keys in this table. - * - * @return an enumeration of the keys in this table - * @see #keySet() - */ - public Enumeration keys() { - final Node[] t; - final int f = (t = table) == null ? 0 : t.length; - return new KeyIterator(t, f, 0, f, this); - } - - /** - * Returns an enumeration of the values in this table. - * - * @return an enumeration of the values in this table - * @see #values() - */ - public Enumeration elements() { - final Node[] t; - final int f = (t = table) == null ? 0 : t.length; - return new ValueIterator(t, f, 0, f, this); - } - - // ConcurrentHashMapV8-only methods - - /** - * Returns the number of mappings. This method should be used - * instead of {@link #size} because a ConcurrentHashMapV8 may - * contain more mappings than can be represented as an int. The - * value returned is an estimate; the actual count may differ if - * there are concurrent insertions or removals. - * - * @return the number of mappings - * @since 1.8 - */ - public long mappingCount() { - final long n = sumCount(); - return (n < 0L) ? 0L : n; // ignore transient negative values - } - - /** - * Creates a new {@link Set} backed by a ConcurrentHashMapV8 - * from the given type to {@code Boolean.TRUE}. - * - * @return the new set - * @since 1.8 - */ - public static KeySetView newKeySet() { - return new KeySetView - (new ConcurrentHashMapV8(), Boolean.TRUE); - } - - /** - * Creates a new {@link Set} backed by a ConcurrentHashMapV8 - * from the given type to {@code Boolean.TRUE}. - * - * @param initialCapacity The implementation performs internal - * sizing to accommodate this many elements. - * @return the new set - * @throws IllegalArgumentException if the initial capacity of - * elements is negative - * @since 1.8 - */ - public static KeySetView newKeySet(final int initialCapacity) { - return new KeySetView - (new ConcurrentHashMapV8(initialCapacity), Boolean.TRUE); - } - - /** - * Returns a {@link Set} view of the keys in this map, using the - * given common mapped value for any additions (i.e., {@link - * Collection#add} and {@link Collection#addAll(Collection)}). - * This is of course only appropriate if it is acceptable to use - * the same value for all additions from this view. - * - * @param mappedValue the mapped value to use for any additions - * @return the set view - * @throws NullPointerException if the mappedValue is null - */ - public KeySetView keySet(final V mappedValue) { - if (mappedValue == null) - throw new NullPointerException(); - return new KeySetView(this, mappedValue); - } - - /* ---------------- Special Nodes -------------- */ - - /** - * A node inserted at head of bins during transfer operations. - */ - static final class ForwardingNode extends Node { - final Node[] nextTable; - - ForwardingNode(final Node[] tab) { - super(MOVED, null, null, null); - this.nextTable = tab; - } - - Node find(final int h, final Object k) { - // loop to avoid arbitrarily deep recursion on forwarding nodes - outer: - for (Node[] tab = nextTable; ; ) { - Node e; - final int n; - if (k == null || tab == null || (n = tab.length) == 0 || - (e = tabAt(tab, (n - 1) & h)) == null) - return null; - for (; ; ) { - final int eh; - final K ek; - if ((eh = e.hash) == h && - ((ek = e.key) == k || (ek != null && k.equals(ek)))) - return e; - if (eh < 0) { - if (e instanceof ForwardingNode) { - tab = ((ForwardingNode) e).nextTable; - continue outer; - } else - return e.find(h, k); - } - if ((e = e.next) == null) - return null; - } - } - } - } - - /** - * A place-holder node used in computeIfAbsent and compute - */ - static final class ReservationNode extends Node { - ReservationNode() { - super(RESERVED, null, null, null); - } - - Node find(final int h, final Object k) { - return null; - } - } - - /* ---------------- Table Initialization and Resizing -------------- */ - - /** - * Initializes table, using the size recorded in sizeCtl. - */ - private final Node[] initTable() { - Node[] tab; - int sc; - while ((tab = table) == null || tab.length == 0) { - if ((sc = sizeCtl) < 0) - Thread.yield(); // lost initialization race; just spin - else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { - try { - if ((tab = table) == null || tab.length == 0) { - final int n = (sc > 0) ? sc : DEFAULT_CAPACITY; - @SuppressWarnings({"rawtypes", "unchecked"}) final - Node[] nt = (Node[]) new Node[n]; - table = tab = nt; - sc = n - (n >>> 2); - } - } finally { - sizeCtl = sc; - } - break; - } - } - return tab; - } - - /** - * Adds to count, and if table is too small and not already - * resizing, initiates transfer. If already resizing, helps - * perform transfer if work is available. Rechecks occupancy - * after a transfer to see if another resize is already needed - * because resizings are lagging additions. - * - * @param x the count to add - * @param check if <0, don't check resize, if <= 1 only check if uncontended - */ - private final void addCount(final long x, final int check) { - final CounterCell[] as; - final long b; - long s; - if ((as = counterCells) != null || - !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { - final CounterHashCode hc; - final CounterCell a; - final long v; - final int m; - boolean uncontended = true; - if ((hc = threadCounterHashCode.get()) == null || - as == null || (m = as.length - 1) < 0 || - (a = as[m & hc.code]) == null || - !(uncontended = - U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { - fullAddCount(x, hc, uncontended); - return; - } - if (check <= 1) - return; - s = sumCount(); - } - if (check >= 0) { - Node[] tab, nt; - int sc; - while (s >= (long) (sc = sizeCtl) && (tab = table) != null && - tab.length < MAXIMUM_CAPACITY) { - if (sc < 0) { - if (sc == -1 || transferIndex <= transferOrigin || - (nt = nextTable) == null) - break; - if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) - transfer(tab, nt); - } else if (U.compareAndSwapInt(this, SIZECTL, sc, -2)) - transfer(tab, null); - s = sumCount(); - } - } - } - - /** - * Helps transfer if a resize is in progress. - */ - final Node[] helpTransfer(final Node[] tab, final Node f) { - final Node[] nextTab; - final int sc; - if ((f instanceof ForwardingNode) && - (nextTab = ((ForwardingNode) f).nextTable) != null) { - if (nextTab == nextTable && tab == table && - transferIndex > transferOrigin && (sc = sizeCtl) < -1 && - U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) - transfer(tab, nextTab); - return nextTab; - } - return table; - } - - /** - * Tries to presize table to accommodate the given number of elements. - * - * @param size number of elements (doesn't need to be perfectly accurate) - */ - private final void tryPresize(final int size) { - final int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : - tableSizeFor(size + (size >>> 1) + 1); - int sc; - while ((sc = sizeCtl) >= 0) { - final Node[] tab = table; - int n; - if (tab == null || (n = tab.length) == 0) { - n = (sc > c) ? sc : c; - if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { - try { - if (table == tab) { - @SuppressWarnings({"rawtypes", "unchecked"}) final - Node[] nt = (Node[]) new Node[n]; - table = nt; - sc = n - (n >>> 2); - } - } finally { - sizeCtl = sc; - } - } - } else if (c <= sc || n >= MAXIMUM_CAPACITY) - break; - else if (tab == table && - U.compareAndSwapInt(this, SIZECTL, sc, -2)) - transfer(tab, null); - } - } - - /** - * Moves and/or copies the nodes in each bin to new table. See - * above for explanation. - */ - private final void transfer(final Node[] tab, Node[] nextTab) { - final int n = tab.length; - int stride; - if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) - stride = MIN_TRANSFER_STRIDE; // subdivide range - if (nextTab == null) { // initiating - try { - @SuppressWarnings({"rawtypes", "unchecked"}) final - Node[] nt = (Node[]) new Node[n << 1]; - nextTab = nt; - } catch (final Throwable ex) { // try to cope with OOME - sizeCtl = Integer.MAX_VALUE; - return; - } - nextTable = nextTab; - transferOrigin = n; - transferIndex = n; - final ForwardingNode rev = new ForwardingNode(tab); - for (int k = n; k > 0; ) { // progressively reveal ready slots - final int nextk = (k > stride) ? k - stride : 0; - for (int m = nextk; m < k; ++m) - nextTab[m] = rev; - for (int m = n + nextk; m < n + k; ++m) - nextTab[m] = rev; - U.putOrderedInt(this, TRANSFERORIGIN, k = nextk); - } - } - final int nextn = nextTab.length; - final ForwardingNode fwd = new ForwardingNode(nextTab); - boolean advance = true; - boolean finishing = false; // to ensure sweep before committing nextTab - for (int i = 0, bound = 0; ; ) { - int nextIndex; - int nextBound; - final int fh; - final Node f; - while (advance) { - if (--i >= bound || finishing) - advance = false; - else if ((nextIndex = transferIndex) <= transferOrigin) { - i = -1; - advance = false; - } else if (U.compareAndSwapInt - (this, TRANSFERINDEX, nextIndex, - nextBound = (nextIndex > stride ? - nextIndex - stride : 0))) { - bound = nextBound; - i = nextIndex - 1; - advance = false; - } - } - if (i < 0 || i >= n || i + n >= nextn) { - if (finishing) { - nextTable = null; - table = nextTab; - sizeCtl = (n << 1) - (n >>> 1); - return; - } - for (int sc; ; ) { - if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { - if (sc != -1) - return; - finishing = advance = true; - i = n; // recheck before commit - break; - } - } - } else if ((f = tabAt(tab, i)) == null) { - if (casTabAt(tab, i, null, fwd)) { - setTabAt(nextTab, i, null); - setTabAt(nextTab, i + n, null); - advance = true; - } - } else if ((fh = f.hash) == MOVED) - advance = true; // already processed - else { - synchronized (f) { - if (tabAt(tab, i) == f) { - Node ln, hn; - if (fh >= 0) { - int runBit = fh & n; - Node lastRun = f; - for (Node p = f.next; p != null; p = p.next) { - final int b = p.hash & n; - if (b != runBit) { - runBit = b; - lastRun = p; - } - } - if (runBit == 0) { - ln = lastRun; - hn = null; - } else { - hn = lastRun; - ln = null; - } - for (Node p = f; p != lastRun; p = p.next) { - final int ph = p.hash; - final K pk = p.key; - final V pv = p.val; - if ((ph & n) == 0) - ln = new Node(ph, pk, pv, ln); - else - hn = new Node(ph, pk, pv, hn); - } - setTabAt(nextTab, i, ln); - setTabAt(nextTab, i + n, hn); - setTabAt(tab, i, fwd); - advance = true; - } else if (f instanceof TreeBin) { - final TreeBin t = (TreeBin) f; - TreeNode lo = null, loTail = null; - TreeNode hi = null, hiTail = null; - int lc = 0, hc = 0; - for (Node e = t.first; e != null; e = e.next) { - final int h = e.hash; - final TreeNode p = new TreeNode - (h, e.key, e.val, null, null); - if ((h & n) == 0) { - if ((p.prev = loTail) == null) - lo = p; - else - loTail.next = p; - loTail = p; - ++lc; - } else { - if ((p.prev = hiTail) == null) - hi = p; - else - hiTail.next = p; - hiTail = p; - ++hc; - } - } - ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : - (hc != 0) ? new TreeBin(lo) : t; - hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : - (lc != 0) ? new TreeBin(hi) : t; - setTabAt(nextTab, i, ln); - setTabAt(nextTab, i + n, hn); - setTabAt(tab, i, fwd); - advance = true; - } - } - } - } - } - } - - /* ---------------- Conversion from/to TreeBins -------------- */ - - /** - * Replaces all linked nodes in bin at given index unless table is - * too small, in which case resizes instead. - */ - private final void treeifyBin(final Node[] tab, final int index) { - final Node b; - final int n; - final int sc; - if (tab != null) { - if ((n = tab.length) < MIN_TREEIFY_CAPACITY) { - if (tab == table && (sc = sizeCtl) >= 0 && - U.compareAndSwapInt(this, SIZECTL, sc, -2)) - transfer(tab, null); - } else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { - synchronized (b) { - if (tabAt(tab, index) == b) { - TreeNode hd = null, tl = null; - for (Node e = b; e != null; e = e.next) { - final TreeNode p = - new TreeNode(e.hash, e.key, e.val, - null, null); - if ((p.prev = tl) == null) - hd = p; - else - tl.next = p; - tl = p; - } - setTabAt(tab, index, new TreeBin(hd)); - } - } - } - } - } - - /** - * Returns a list on non-TreeNodes replacing those in given list. - */ - static Node untreeify(final Node b) { - Node hd = null, tl = null; - for (Node q = b; q != null; q = q.next) { - final Node p = new Node(q.hash, q.key, q.val, null); - if (tl == null) - hd = p; - else - tl.next = p; - tl = p; - } - return hd; - } - - /* ---------------- TreeNodes -------------- */ - - /** - * Nodes for use in TreeBins - */ - static final class TreeNode extends Node { - TreeNode parent; // red-black tree links - TreeNode left; - TreeNode right; - TreeNode prev; // needed to unlink next upon deletion - boolean red; - - TreeNode(final int hash, final K key, final V val, final Node next, - final TreeNode parent) { - super(hash, key, val, next); - this.parent = parent; - } - - Node find(final int h, final Object k) { - return findTreeNode(h, k, null); - } - - /** - * Returns the TreeNode (or null if not found) for the given key - * starting at given root. - */ - final TreeNode findTreeNode(final int h, final Object k, Class kc) { - if (k != null) { - TreeNode p = this; - do { - final int ph; - final int dir; - final K pk; - final TreeNode q; - final TreeNode pl = p.left; - final TreeNode pr = p.right; - if ((ph = p.hash) > h) - p = pl; - else if (ph < h) - p = pr; - else if ((pk = p.key) == k || (pk != null && k.equals(pk))) - return p; - else if (pl == null) - p = pr; - else if (pr == null) - p = pl; - else if ((kc != null || - (kc = comparableClassFor(k)) != null) && - (dir = compareComparables(kc, k, pk)) != 0) - p = (dir < 0) ? pl : pr; - else if ((q = pr.findTreeNode(h, k, kc)) != null) - return q; - else - p = pl; - } while (p != null); - } - return null; - } - } - - /* ---------------- TreeBins -------------- */ - - /** - * TreeNodes used at the heads of bins. TreeBins do not hold user - * keys or values, but instead point to list of TreeNodes and - * their root. They also maintain a parasitic read-write lock - * forcing writers (who hold bin lock) to wait for readers (who do - * not) to complete before tree restructuring operations. - */ - static final class TreeBin extends Node { - TreeNode root; - volatile TreeNode first; - volatile Thread waiter; - volatile int lockState; - // values for lockState - static final int WRITER = 1; // set while holding write lock - static final int WAITER = 2; // set when waiting for write lock - static final int READER = 4; // increment value for setting read lock - - /** - * Tie-breaking utility for ordering insertions when equal - * hashCodes and non-comparable. We don't require a total - * order, just a consistent insertion rule to maintain - * equivalence across rebalancings. Tie-breaking further than - * necessary simplifies testing a bit. - */ - static int tieBreakOrder(final Object a, final Object b) { - int d; - if (a == null || b == null || - (d = a.getClass().getName(). - compareTo(b.getClass().getName())) == 0) - d = (System.identityHashCode(a) <= System.identityHashCode(b) ? - -1 : 1); - return d; - } - - /** - * Creates bin with initial set of nodes headed by b. - */ - TreeBin(final TreeNode b) { - super(TREEBIN, null, null, null); - this.first = b; - TreeNode r = null; - for (TreeNode x = b, next; x != null; x = next) { - next = (TreeNode) x.next; - x.left = x.right = null; - if (r == null) { - x.parent = null; - x.red = false; - r = x; - } else { - final K k = x.key; - final int h = x.hash; - Class kc = null; - for (TreeNode p = r; ; ) { - int dir; - final int ph; - final K pk = p.key; - if ((ph = p.hash) > h) - dir = -1; - else if (ph < h) - dir = 1; - else if ((kc == null && - (kc = comparableClassFor(k)) == null) || - (dir = compareComparables(kc, k, pk)) == 0) - dir = tieBreakOrder(k, pk); - final TreeNode xp = p; - if ((p = (dir <= 0) ? p.left : p.right) == null) { - x.parent = xp; - if (dir <= 0) - xp.left = x; - else - xp.right = x; - r = balanceInsertion(r, x); - break; - } - } - } - } - this.root = r; - assert checkInvariants(root); - } - - /** - * Acquires write lock for tree restructuring. - */ - private final void lockRoot() { - if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER)) - contendedLock(); // offload to separate method - } - - /** - * Releases write lock for tree restructuring. - */ - private final void unlockRoot() { - lockState = 0; - } - - /** - * Possibly blocks awaiting root lock. - */ - private final void contendedLock() { - boolean waiting = false; - for (int s; ; ) { - if (((s = lockState) & WRITER) == 0) { - if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) { - if (waiting) - waiter = null; - return; - } - } else if ((s & WAITER) == 0) { - if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { - waiting = true; - waiter = Thread.currentThread(); - } - } else if (waiting) - LockSupport.park(this); - } - } - - /** - * Returns matching node or null if none. Tries to search - * using tree comparisons from root, but continues linear - * search when lock not available. - */ - final Node find(final int h, final Object k) { - if (k != null) { - for (Node e = first; e != null; e = e.next) { - final int s; - final K ek; - if (((s = lockState) & (WAITER | WRITER)) != 0) { - if (e.hash == h && - ((ek = e.key) == k || (ek != null && k.equals(ek)))) - return e; - } else if (U.compareAndSwapInt(this, LOCKSTATE, s, - s + READER)) { - final TreeNode r; - TreeNode p; - try { - p = ((r = root) == null ? null : - r.findTreeNode(h, k, null)); - } finally { - final Thread w; - int ls; - do { - } while (!U.compareAndSwapInt - (this, LOCKSTATE, - ls = lockState, ls - READER)); - if (ls == (READER | WAITER) && (w = waiter) != null) - LockSupport.unpark(w); - } - return p; - } - } - } - return null; - } - - /** - * Finds or adds a node. - * - * @return null if added - */ - final TreeNode putTreeVal(final int h, final K k, final V v) { - Class kc = null; - boolean searched = false; - for (TreeNode p = root; ; ) { - int dir; - final int ph; - final K pk; - if (p == null) { - first = root = new TreeNode(h, k, v, null, null); - break; - } else if ((ph = p.hash) > h) - dir = -1; - else if (ph < h) - dir = 1; - else if ((pk = p.key) == k || (pk != null && k.equals(pk))) - return p; - else if ((kc == null && - (kc = comparableClassFor(k)) == null) || - (dir = compareComparables(kc, k, pk)) == 0) { - if (!searched) { - TreeNode q, ch; - searched = true; - if (((ch = p.left) != null && - (q = ch.findTreeNode(h, k, kc)) != null) || - ((ch = p.right) != null && - (q = ch.findTreeNode(h, k, kc)) != null)) - return q; - } - dir = tieBreakOrder(k, pk); - } - - final TreeNode xp = p; - if ((p = (dir <= 0) ? p.left : p.right) == null) { - final TreeNode x; - final TreeNode f = first; - first = x = new TreeNode(h, k, v, f, xp); - if (f != null) - f.prev = x; - if (dir <= 0) - xp.left = x; - else - xp.right = x; - if (!xp.red) - x.red = true; - else { - lockRoot(); - try { - root = balanceInsertion(root, x); - } finally { - unlockRoot(); - } - } - break; - } - } - assert checkInvariants(root); - return null; - } - - /** - * Removes the given node, that must be present before this - * call. This is messier than typical red-black deletion code - * because we cannot swap the contents of an interior node - * with a leaf successor that is pinned by "next" pointers - * that are accessible independently of lock. So instead we - * swap the tree linkages. - * - * @return true if now too small, so should be untreeified - */ - final boolean removeTreeNode(final TreeNode p) { - final TreeNode next = (TreeNode) p.next; - final TreeNode pred = p.prev; // unlink traversal pointers - TreeNode r; - final TreeNode rl; - if (pred == null) - first = next; - else - pred.next = next; - if (next != null) - next.prev = pred; - if (first == null) { - root = null; - return true; - } - if ((r = root) == null || r.right == null || // too small - (rl = r.left) == null || rl.left == null) - return true; - lockRoot(); - try { - final TreeNode replacement; - final TreeNode pl = p.left; - final TreeNode pr = p.right; - if (pl != null && pr != null) { - TreeNode s = pr, sl; - while ((sl = s.left) != null) // find successor - s = sl; - final boolean c = s.red; - s.red = p.red; - p.red = c; // swap colors - final TreeNode sr = s.right; - final TreeNode pp = p.parent; - if (s == pr) { // p was s's direct parent - p.parent = s; - s.right = p; - } else { - final TreeNode sp = s.parent; - if ((p.parent = sp) != null) { - if (s == sp.left) - sp.left = p; - else - sp.right = p; - } - if ((s.right = pr) != null) - pr.parent = s; - } - p.left = null; - if ((p.right = sr) != null) - sr.parent = p; - if ((s.left = pl) != null) - pl.parent = s; - if ((s.parent = pp) == null) - r = s; - else if (p == pp.left) - pp.left = s; - else - pp.right = s; - if (sr != null) - replacement = sr; - else - replacement = p; - } else if (pl != null) - replacement = pl; - else if (pr != null) - replacement = pr; - else - replacement = p; - if (replacement != p) { - final TreeNode pp = replacement.parent = p.parent; - if (pp == null) - r = replacement; - else if (p == pp.left) - pp.left = replacement; - else - pp.right = replacement; - p.left = p.right = p.parent = null; - } - - root = (p.red) ? r : balanceDeletion(r, replacement); - - if (p == replacement) { // detach pointers - final TreeNode pp; - if ((pp = p.parent) != null) { - if (p == pp.left) - pp.left = null; - else if (p == pp.right) - pp.right = null; - p.parent = null; - } - } - } finally { - unlockRoot(); - } - assert checkInvariants(root); - return false; - } - - /* ------------------------------------------------------------ */ - // Red-black tree methods, all adapted from CLR - - static TreeNode rotateLeft(TreeNode root, - final TreeNode p) { - final TreeNode r; - final TreeNode pp; - final TreeNode rl; - if (p != null && (r = p.right) != null) { - if ((rl = p.right = r.left) != null) - rl.parent = p; - if ((pp = r.parent = p.parent) == null) - (root = r).red = false; - else if (pp.left == p) - pp.left = r; - else - pp.right = r; - r.left = p; - p.parent = r; - } - return root; - } - - static TreeNode rotateRight(TreeNode root, - final TreeNode p) { - final TreeNode l; - final TreeNode pp; - final TreeNode lr; - if (p != null && (l = p.left) != null) { - if ((lr = p.left = l.right) != null) - lr.parent = p; - if ((pp = l.parent = p.parent) == null) - (root = l).red = false; - else if (pp.right == p) - pp.right = l; - else - pp.left = l; - l.right = p; - p.parent = l; - } - return root; - } - - static TreeNode balanceInsertion(TreeNode root, - TreeNode x) { - x.red = true; - for (TreeNode xp, xpp, xppl, xppr; ; ) { - if ((xp = x.parent) == null) { - x.red = false; - return x; - } else if (!xp.red || (xpp = xp.parent) == null) - return root; - if (xp == (xppl = xpp.left)) { - if ((xppr = xpp.right) != null && xppr.red) { - xppr.red = false; - xp.red = false; - xpp.red = true; - x = xpp; - } else { - if (x == xp.right) { - root = rotateLeft(root, x = xp); - xpp = (xp = x.parent) == null ? null : xp.parent; - } - if (xp != null) { - xp.red = false; - if (xpp != null) { - xpp.red = true; - root = rotateRight(root, xpp); - } - } - } - } else { - if (xppl != null && xppl.red) { - xppl.red = false; - xp.red = false; - xpp.red = true; - x = xpp; - } else { - if (x == xp.left) { - root = rotateRight(root, x = xp); - xpp = (xp = x.parent) == null ? null : xp.parent; - } - if (xp != null) { - xp.red = false; - if (xpp != null) { - xpp.red = true; - root = rotateLeft(root, xpp); - } - } - } - } - } - } - - static TreeNode balanceDeletion(TreeNode root, - TreeNode x) { - for (TreeNode xp, xpl, xpr; ; ) { - if (x == null || x == root) - return root; - else if ((xp = x.parent) == null) { - x.red = false; - return x; - } else if (x.red) { - x.red = false; - return root; - } else if ((xpl = xp.left) == x) { - if ((xpr = xp.right) != null && xpr.red) { - xpr.red = false; - xp.red = true; - root = rotateLeft(root, xp); - xpr = (xp = x.parent) == null ? null : xp.right; - } - if (xpr == null) - x = xp; - else { - final TreeNode sl = xpr.left; - TreeNode sr = xpr.right; - if ((sr == null || !sr.red) && - (sl == null || !sl.red)) { - xpr.red = true; - x = xp; - } else { - if (sr == null || !sr.red) { - if (sl != null) - sl.red = false; - xpr.red = true; - root = rotateRight(root, xpr); - xpr = (xp = x.parent) == null ? - null : xp.right; - } - if (xpr != null) { - xpr.red = (xp == null) ? false : xp.red; - if ((sr = xpr.right) != null) - sr.red = false; - } - if (xp != null) { - xp.red = false; - root = rotateLeft(root, xp); - } - x = root; - } - } - } else { // symmetric - if (xpl != null && xpl.red) { - xpl.red = false; - xp.red = true; - root = rotateRight(root, xp); - xpl = (xp = x.parent) == null ? null : xp.left; - } - if (xpl == null) - x = xp; - else { - TreeNode sl = xpl.left; - final TreeNode sr = xpl.right; - if ((sl == null || !sl.red) && - (sr == null || !sr.red)) { - xpl.red = true; - x = xp; - } else { - if (sl == null || !sl.red) { - if (sr != null) - sr.red = false; - xpl.red = true; - root = rotateLeft(root, xpl); - xpl = (xp = x.parent) == null ? - null : xp.left; - } - if (xpl != null) { - xpl.red = (xp == null) ? false : xp.red; - if ((sl = xpl.left) != null) - sl.red = false; - } - if (xp != null) { - xp.red = false; - root = rotateRight(root, xp); - } - x = root; - } - } - } - } - } - - /** - * Recursive invariant check - */ - static boolean checkInvariants(final TreeNode t) { - final TreeNode tp = t.parent; - final TreeNode tl = t.left; - final TreeNode tr = t.right; - final TreeNode tb = t.prev; - final TreeNode tn = (TreeNode) t.next; - if (tb != null && tb.next != t) - return false; - if (tn != null && tn.prev != t) - return false; - if (tp != null && t != tp.left && t != tp.right) - return false; - if (tl != null && (tl.parent != t || tl.hash > t.hash)) - return false; - if (tr != null && (tr.parent != t || tr.hash < t.hash)) - return false; - if (t.red && tl != null && tl.red && tr != null && tr.red) - return false; - if (tl != null && !checkInvariants(tl)) - return false; - if (tr != null && !checkInvariants(tr)) - return false; - return true; - } - - private static final sun.misc.Unsafe U; - private static final long LOCKSTATE; - - static { - try { - U = getUnsafe(); - final Class k = TreeBin.class; - LOCKSTATE = U.objectFieldOffset - (k.getDeclaredField("lockState")); - } catch (final Exception e) { - throw new Error(e); - } - } - } - - /* ----------------Table Traversal -------------- */ - - /** - * Encapsulates traversal for methods such as containsValue; also - * serves as a base class for other iterators and spliterators. - * - * Method advance visits once each still-valid node that was - * reachable upon iterator construction. It might miss some that - * were added to a bin after the bin was visited, which is OK wrt - * consistency guarantees. Maintaining this property in the face - * of possible ongoing resizes requires a fair amount of - * bookkeeping state that is difficult to optimize away amidst - * volatile accesses. Even so, traversal maintains reasonable - * throughput. - * - * Normally, iteration proceeds bin-by-bin traversing lists. - * However, if the table has been resized, then all future steps - * must traverse both the bin at the current index as well as at - * (index + baseSize); and so on for further resizings. To - * paranoically cope with potential sharing by users of iterators - * across threads, iteration terminates if a bounds checks fails - * for a table read. - */ - static class Traverser { - Node[] tab; // current table; updated if resized - Node next; // the next entry to use - int index; // index of bin to use next - int baseIndex; // current index of initial table - int baseLimit; // index bound for initial table - final int baseSize; // initial table size - - Traverser(final Node[] tab, final int size, final int index, final int limit) { - this.tab = tab; - this.baseSize = size; - this.baseIndex = this.index = index; - this.baseLimit = limit; - this.next = null; - } - - /** - * Advances if possible, returning next valid node, or null if none. - */ - final Node advance() { - Node e; - if ((e = next) != null) - e = e.next; - for (; ; ) { - final Node[] t; - final int i; - final int n; - K ek; // must use locals in checks - if (e != null) - return next = e; - if (baseIndex >= baseLimit || (t = tab) == null || - (n = t.length) <= (i = index) || i < 0) - return next = null; - if ((e = tabAt(t, index)) != null && e.hash < 0) { - if (e instanceof ForwardingNode) { - tab = ((ForwardingNode) e).nextTable; - e = null; - continue; - } else if (e instanceof TreeBin) - e = ((TreeBin) e).first; - else - e = null; - } - if ((index += baseSize) >= n) - index = ++baseIndex; // visit upper slots if present - } - } - } - - /** - * Base of key, value, and entry Iterators. Adds fields to - * Traverser to support iterator.remove. - */ - static class BaseIterator extends Traverser { - final ConcurrentHashMapV8 map; - Node lastReturned; - - BaseIterator(final Node[] tab, final int size, final int index, final int limit, - final ConcurrentHashMapV8 map) { - super(tab, size, index, limit); - this.map = map; - advance(); - } - - public final boolean hasNext() { - return next != null; - } - - public final boolean hasMoreElements() { - return next != null; - } - - public final void remove() { - final Node p; - if ((p = lastReturned) == null) - throw new IllegalStateException(); - lastReturned = null; - map.replaceNode(p.key, null, null); - } - } - - static final class KeyIterator extends BaseIterator - implements Iterator, Enumeration { - KeyIterator(final Node[] tab, final int index, final int size, final int limit, - final ConcurrentHashMapV8 map) { - super(tab, index, size, limit, map); - } - - public final K next() { - final Node p; - if ((p = next) == null) - throw new NoSuchElementException(); - final K k = p.key; - lastReturned = p; - advance(); - return k; - } - - public final K nextElement() { - return next(); - } - } - - static final class ValueIterator extends BaseIterator - implements Iterator, Enumeration { - ValueIterator(final Node[] tab, final int index, final int size, final int limit, - final ConcurrentHashMapV8 map) { - super(tab, index, size, limit, map); - } - - public final V next() { - final Node p; - if ((p = next) == null) - throw new NoSuchElementException(); - final V v = p.val; - lastReturned = p; - advance(); - return v; - } - - public final V nextElement() { - return next(); - } - } - - static final class EntryIterator extends BaseIterator - implements Iterator> { - EntryIterator(final Node[] tab, final int index, final int size, final int limit, - final ConcurrentHashMapV8 map) { - super(tab, index, size, limit, map); - } - - public final Map.Entry next() { - final Node p; - if ((p = next) == null) - throw new NoSuchElementException(); - final K k = p.key; - final V v = p.val; - lastReturned = p; - advance(); - return new MapEntry(k, v, map); - } - } - - /** - * Exported Entry for EntryIterator - */ - static final class MapEntry implements Map.Entry { - final K key; // non-null - V val; // non-null - final ConcurrentHashMapV8 map; - - MapEntry(final K key, final V val, final ConcurrentHashMapV8 map) { - this.key = key; - this.val = val; - this.map = map; - } - - public K getKey() { - return key; - } - - public V getValue() { - return val; - } - - public int hashCode() { - return key.hashCode() ^ val.hashCode(); - } - - public String toString() { - return key + "=" + val; - } - - public boolean equals(final Object o) { - final Object k; - final Object v; - final Map.Entry e; - return ((o instanceof Map.Entry) && - (k = (e = (Map.Entry) o).getKey()) != null && - (v = e.getValue()) != null && - (k == key || k.equals(key)) && - (v == val || v.equals(val))); - } - - /** - * Sets our entry's value and writes through to the map. The - * value to return is somewhat arbitrary here. Since we do not - * necessarily track asynchronous changes, the most recent - * "previous" value could be different from what we return (or - * could even have been removed, in which case the put will - * re-establish). We do not and cannot guarantee more. - */ - public V setValue(final V value) { - if (value == null) throw new NullPointerException(); - final V v = val; - val = value; - map.put(key, value); - return v; - } - } - - /* ----------------Views -------------- */ - - /** - * Base class for views. - */ - abstract static class CollectionView - implements Collection, java.io.Serializable { - private static final long serialVersionUID = 7249069246763182397L; - final ConcurrentHashMapV8 map; - - CollectionView(final ConcurrentHashMapV8 map) { - this.map = map; - } - - /** - * Returns the map backing this view. - * - * @return the map backing this view - */ - public ConcurrentHashMapV8 getMap() { - return map; - } - - /** - * Removes all of the elements from this view, by removing all - * the mappings from the map backing this view. - */ - public final void clear() { - map.clear(); - } - - public final int size() { - return map.size(); - } - - public final boolean isEmpty() { - return map.isEmpty(); - } - - // implementations below rely on concrete classes supplying these - // abstract methods - - /** - * Returns a "weakly consistent" iterator that will never - * throw {@link ConcurrentModificationException}, and - * guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not - * guaranteed to) reflect any modifications subsequent to - * construction. - */ - public abstract Iterator iterator(); - - public abstract boolean contains(Object o); - - public abstract boolean remove(Object o); - - private static final String oomeMsg = "Required array size too large"; - - public final Object[] toArray() { - final long sz = map.mappingCount(); - if (sz > MAX_ARRAY_SIZE) - throw new OutOfMemoryError(oomeMsg); - int n = (int) sz; - Object[] r = new Object[n]; - int i = 0; - for (final E e : this) { - if (i == n) { - if (n >= MAX_ARRAY_SIZE) - throw new OutOfMemoryError(oomeMsg); - if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) - n = MAX_ARRAY_SIZE; - else - n += (n >>> 1) + 1; - r = Arrays.copyOf(r, n); - } - r[i++] = e; - } - return (i == n) ? r : Arrays.copyOf(r, i); - } - - @SuppressWarnings("unchecked") - public final T[] toArray(final T[] a) { - final long sz = map.mappingCount(); - if (sz > MAX_ARRAY_SIZE) - throw new OutOfMemoryError(oomeMsg); - final int m = (int) sz; - T[] r = (a.length >= m) ? a : - (T[]) java.lang.reflect.Array - .newInstance(a.getClass().getComponentType(), m); - int n = r.length; - int i = 0; - for (final E e : this) { - if (i == n) { - if (n >= MAX_ARRAY_SIZE) - throw new OutOfMemoryError(oomeMsg); - if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) - n = MAX_ARRAY_SIZE; - else - n += (n >>> 1) + 1; - r = Arrays.copyOf(r, n); - } - r[i++] = (T) e; - } - if (a == r && i < n) { - r[i] = null; // null-terminate - return r; - } - return (i == n) ? r : Arrays.copyOf(r, i); - } - - /** - * Returns a string representation of this collection. - * The string representation consists of the string representations - * of the collection's elements in the order they are returned by - * its iterator, enclosed in square brackets ({@code "[]"}). - * Adjacent elements are separated by the characters {@code ", "} - * (comma and space). Elements are converted to strings as by - * {@link String#valueOf(Object)}. - * - * @return a string representation of this collection - */ - public final String toString() { - final StringBuilder sb = new StringBuilder(); - sb.append('['); - final Iterator it = iterator(); - if (it.hasNext()) { - for (; ; ) { - final Object e = it.next(); - sb.append(e == this ? "(this Collection)" : e); - if (!it.hasNext()) - break; - sb.append(',').append(' '); - } - } - return sb.append(']').toString(); - } - - public final boolean containsAll(final Collection c) { - if (c != this) { - for (final Object e : c) { - if (e == null || !contains(e)) - return false; - } - } - return true; - } - - public final boolean removeAll(final Collection c) { - boolean modified = false; - for (final Iterator it = iterator(); it.hasNext(); ) { - if (c.contains(it.next())) { - it.remove(); - modified = true; - } - } - return modified; - } - - public final boolean retainAll(final Collection c) { - boolean modified = false; - for (final Iterator it = iterator(); it.hasNext(); ) { - if (!c.contains(it.next())) { - it.remove(); - modified = true; - } - } - return modified; - } - - } - - /** - * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in - * which additions may optionally be enabled by mapping to a - * common value. This class cannot be directly instantiated. - * See {@link #keySet() keySet()}, - * {@link #keySet(Object) keySet(V)}, - * {@link #newKeySet() newKeySet()}, - * {@link #newKeySet(int) newKeySet(int)}. - * - * @since 1.8 - */ - public static class KeySetView extends CollectionView - implements Set, java.io.Serializable { - private static final long serialVersionUID = 7249069246763182397L; - private final V value; - - KeySetView(final ConcurrentHashMapV8 map, final V value) { // non-public - super(map); - this.value = value; - } - - /** - * Returns the default mapped value for additions, - * or {@code null} if additions are not supported. - * - * @return the default mapped value for additions, or {@code null} - * if not supported - */ - public V getMappedValue() { - return value; - } - - /** - * {@inheritDoc} - * - * @throws NullPointerException if the specified key is null - */ - public boolean contains(final Object o) { - return map.containsKey(o); - } - - /** - * Removes the key from this map view, by removing the key (and its - * corresponding value) from the backing map. This method does - * nothing if the key is not in the map. - * - * @param o the key to be removed from the backing map - * @return {@code true} if the backing map contained the specified key - * @throws NullPointerException if the specified key is null - */ - public boolean remove(final Object o) { - return map.remove(o) != null; - } - - /** - * @return an iterator over the keys of the backing map - */ - public Iterator iterator() { - final Node[] t; - final ConcurrentHashMapV8 m = map; - final int f = (t = m.table) == null ? 0 : t.length; - return new KeyIterator(t, f, 0, f, m); - } - - /** - * Adds the specified key to this set view by mapping the key to - * the default mapped value in the backing map, if defined. - * - * @param e key to be added - * @return {@code true} if this set changed as a result of the call - * @throws NullPointerException if the specified key is null - * @throws UnsupportedOperationException if no default mapped value - * for additions was provided - */ - public boolean add(final K e) { - final V v; - if ((v = value) == null) - throw new UnsupportedOperationException(); - return map.putVal(e, v, true) == null; - } - - /** - * Adds all of the elements in the specified collection to this set, - * as if by calling {@link #add} on each one. - * - * @param c the elements to be inserted into this set - * @return {@code true} if this set changed as a result of the call - * @throws NullPointerException if the collection or any of its - * elements are {@code null} - * @throws UnsupportedOperationException if no default mapped value - * for additions was provided - */ - public boolean addAll(final Collection c) { - boolean added = false; - final V v; - if ((v = value) == null) - throw new UnsupportedOperationException(); - for (final K e : c) { - if (map.putVal(e, v, true) == null) - added = true; - } - return added; - } - - public int hashCode() { - int h = 0; - for (final K e : this) - h += e.hashCode(); - return h; - } - - public boolean equals(final Object o) { - final Set c; - return ((o instanceof Set) && - ((c = (Set) o) == this || - (containsAll(c) && c.containsAll(this)))); - } - } - - /** - * A view of a ConcurrentHashMapV8 as a {@link Collection} of - * values, in which additions are disabled. This class cannot be - * directly instantiated. See {@link #values()}. - */ - static final class ValuesView extends CollectionView - implements Collection, java.io.Serializable { - private static final long serialVersionUID = 2249069246763182397L; - - ValuesView(final ConcurrentHashMapV8 map) { - super(map); - } - - public final boolean contains(final Object o) { - return map.containsValue(o); - } - - public final boolean remove(final Object o) { - if (o != null) { - for (final Iterator it = iterator(); it.hasNext(); ) { - if (o.equals(it.next())) { - it.remove(); - return true; - } - } - } - return false; - } - - public final Iterator iterator() { - final ConcurrentHashMapV8 m = map; - final Node[] t; - final int f = (t = m.table) == null ? 0 : t.length; - return new ValueIterator(t, f, 0, f, m); - } - - public final boolean add(final V e) { - throw new UnsupportedOperationException(); - } - - public final boolean addAll(final Collection c) { - throw new UnsupportedOperationException(); - } - } - - /** - * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) - * entries. This class cannot be directly instantiated. See - * {@link #entrySet()}. - */ - static final class EntrySetView extends CollectionView> - implements Set>, java.io.Serializable { - private static final long serialVersionUID = 2249069246763182397L; - - EntrySetView(final ConcurrentHashMapV8 map) { - super(map); - } - - public boolean contains(final Object o) { - final Object k; - final Object v; - final Object r; - final Map.Entry e; - return ((o instanceof Map.Entry) && - (k = (e = (Map.Entry) o).getKey()) != null && - (r = map.get(k)) != null && - (v = e.getValue()) != null && - (v == r || v.equals(r))); - } - - public boolean remove(final Object o) { - final Object k; - final Object v; - final Map.Entry e; - return ((o instanceof Map.Entry) && - (k = (e = (Map.Entry) o).getKey()) != null && - (v = e.getValue()) != null && - map.remove(k, v)); - } - - /** - * @return an iterator over the entries of the backing map - */ - public Iterator> iterator() { - final ConcurrentHashMapV8 m = map; - final Node[] t; - final int f = (t = m.table) == null ? 0 : t.length; - return new EntryIterator(t, f, 0, f, m); - } - - public boolean add(final Entry e) { - return map.putVal(e.getKey(), e.getValue(), false) == null; - } - - public boolean addAll(final Collection> c) { - boolean added = false; - for (final Entry e : c) { - if (add(e)) - added = true; - } - return added; - } - - public final int hashCode() { - int h = 0; - final Node[] t; - if ((t = map.table) != null) { - final Traverser it = new Traverser(t, t.length, 0, t.length); - for (Node p; (p = it.advance()) != null; ) { - h += p.hashCode(); - } - } - return h; - } - - public final boolean equals(final Object o) { - final Set c; - return ((o instanceof Set) && - ((c = (Set) o) == this || - (containsAll(c) && c.containsAll(this)))); - } - } - - // ------------------------------------------------------- - - /* ---------------- Counters -------------- */ - - // Adapted from LongAdder and Striped64. - // See their internal docs for explanation. - - // A padded cell for distributing counts - static final class CounterCell { - volatile long p0, p1, p2, p3, p4, p5, p6; - volatile long value; - volatile long q0, q1, q2, q3, q4, q5, q6; - - CounterCell(final long x) { - value = x; - } - } - - /** - * Holder for the thread-local hash code determining which - * CounterCell to use. The code is initialized via the - * counterHashCodeGenerator, but may be moved upon collisions. - */ - static final class CounterHashCode { - int code; - } - - /** - * Generates initial value for per-thread CounterHashCodes. - */ - static final AtomicInteger counterHashCodeGenerator = new AtomicInteger(); - - /** - * Increment for counterHashCodeGenerator. See class ThreadLocal - * for explanation. - */ - static final int SEED_INCREMENT = 0x61c88647; - - /** - * Per-thread counter hash codes. Shared across all instances. - */ - static final ThreadLocal threadCounterHashCode = - new ThreadLocal(); - - - final long sumCount() { - final CounterCell[] as = counterCells; - CounterCell a; - long sum = baseCount; - if (as != null) { - for (int i = 0; i < as.length; ++i) { - if ((a = as[i]) != null) - sum += a.value; - } - } - return sum; - } - - // See LongAdder version for explanation - private final void fullAddCount(final long x, CounterHashCode hc, - boolean wasUncontended) { - int h; - if (hc == null) { - hc = new CounterHashCode(); - final int s = counterHashCodeGenerator.addAndGet(SEED_INCREMENT); - h = hc.code = (s == 0) ? 1 : s; // Avoid zero - threadCounterHashCode.set(hc); - } else - h = hc.code; - boolean collide = false; // True if last slot nonempty - for (; ; ) { - final CounterCell[] as; - final CounterCell a; - final int n; - final long v; - if ((as = counterCells) != null && (n = as.length) > 0) { - if ((a = as[(n - 1) & h]) == null) { - if (cellsBusy == 0) { // Try to attach new Cell - final CounterCell r = new CounterCell(x); // Optimistic create - if (cellsBusy == 0 && - U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { - boolean created = false; - try { // Recheck under lock - final CounterCell[] rs; - final int m; - final int j; - if ((rs = counterCells) != null && - (m = rs.length) > 0 && - rs[j = (m - 1) & h] == null) { - rs[j] = r; - created = true; - } - } finally { - cellsBusy = 0; - } - if (created) - break; - continue; // Slot is now non-empty - } - } - collide = false; - } else if (!wasUncontended) // CAS already known to fail - wasUncontended = true; // Continue after rehash - else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) - break; - else if (counterCells != as || n >= NCPU) - collide = false; // At max size or stale - else if (!collide) - collide = true; - else if (cellsBusy == 0 && - U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { - try { - if (counterCells == as) {// Expand table unless stale - final CounterCell[] rs = new CounterCell[n << 1]; - System.arraycopy(as, 0, rs, 0, n); - counterCells = rs; - } - } finally { - cellsBusy = 0; - } - collide = false; - continue; // Retry with expanded table - } - h ^= h << 13; // Rehash - h ^= h >>> 17; - h ^= h << 5; - } else if (cellsBusy == 0 && counterCells == as && - U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { - boolean init = false; - try { // Initialize table - if (counterCells == as) { - final CounterCell[] rs = new CounterCell[2]; - rs[h & 1] = new CounterCell(x); - counterCells = rs; - init = true; - } - } finally { - cellsBusy = 0; - } - if (init) - break; - } else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) - break; // Fall back on using base - } - hc.code = h; // Record index for next time - } - - // Unsafe mechanics - private static final sun.misc.Unsafe U; - private static final long SIZECTL; - private static final long TRANSFERINDEX; - private static final long TRANSFERORIGIN; - private static final long BASECOUNT; - private static final long CELLSBUSY; - private static final long CELLVALUE; - private static final long ABASE; - private static final int ASHIFT; - - static { - try { - U = getUnsafe(); - final Class k = ConcurrentHashMapV8.class; - SIZECTL = U.objectFieldOffset - (k.getDeclaredField("sizeCtl")); - TRANSFERINDEX = U.objectFieldOffset - (k.getDeclaredField("transferIndex")); - TRANSFERORIGIN = U.objectFieldOffset - (k.getDeclaredField("transferOrigin")); - BASECOUNT = U.objectFieldOffset - (k.getDeclaredField("baseCount")); - CELLSBUSY = U.objectFieldOffset - (k.getDeclaredField("cellsBusy")); - final Class ck = CounterCell.class; - CELLVALUE = U.objectFieldOffset - (ck.getDeclaredField("value")); - final Class ak = Node[].class; - ABASE = U.arrayBaseOffset(ak); - final int scale = U.arrayIndexScale(ak); - if ((scale & (scale - 1)) != 0) - throw new Error("data type scale not a power of two"); - ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); - } catch (final Exception e) { - throw new Error(e); - } - } - - /** - * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. - * Replace with a simple call to Unsafe.getUnsafe when integrating - * into a jdk. - * - * @return a sun.misc.Unsafe - */ - private static sun.misc.Unsafe getUnsafe() { - try { - return sun.misc.Unsafe.getUnsafe(); - } catch (final SecurityException tryReflectionInstead) { - } - try { - return java.security.AccessController.doPrivileged - (new java.security.PrivilegedExceptionAction() { - public sun.misc.Unsafe run() throws Exception { - final Class k = sun.misc.Unsafe.class; - for (final java.lang.reflect.Field f : k.getDeclaredFields()) { - f.setAccessible(true); - final Object x = f.get(null); - if (k.isInstance(x)) - return k.cast(x); - } - throw new NoSuchFieldError("the Unsafe"); - } - }); - } catch (final java.security.PrivilegedActionException e) { - throw new RuntimeException("Could not initialize intrinsics", - e.getCause()); - } - } -} diff --git a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/DataStructures.java b/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/DataStructures.java index 164f357262..468125ff3d 100644 --- a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/DataStructures.java +++ b/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/DataStructures.java @@ -16,18 +16,11 @@ package org.glassfish.jersey.internal.util.collection; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; import java.util.Map; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.logging.Level; -import java.util.logging.Logger; - -import org.glassfish.jersey.internal.util.JdkVersion; +import java.util.concurrent.LinkedTransferQueue; /** * Utility class, which tries to pickup the best collection implementation depending @@ -39,33 +32,6 @@ */ public final class DataStructures { - private static final Class LTQ_CLASS; - - static { - String className = null; - - Class c; - try { - final JdkVersion jdkVersion = JdkVersion.getJdkVersion(); - final JdkVersion minimumVersion = JdkVersion.parseVersion("1.7.0"); - - className = (minimumVersion.compareTo(jdkVersion) <= 0) - ? "java.util.concurrent.LinkedTransferQueue" - : "org.glassfish.jersey.internal.util.collection.LinkedTransferQueue"; - - c = getAndVerify(className); - Logger.getLogger(DataStructures.class.getName()).log(Level.FINE, "USING LTQ class:{0}", c); - } catch (final Throwable t) { - Logger.getLogger(DataStructures.class.getName()).log(Level.FINE, - "failed loading data structure class:" + className - + " fallback to embedded one", t); - - c = LinkedBlockingQueue.class; // fallback to LinkedBlockingQueue - } - - LTQ_CLASS = c; - } - /** * Default concurrency level calculated based on the number of available CPUs. */ @@ -76,100 +42,69 @@ private static int ceilingNextPowerOfTwo(final int x) { return 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(x - 1)); } - private static Class getAndVerify(final String cn) throws Throwable { - try { - return AccessController.doPrivileged(new PrivilegedExceptionAction>() { - @Override - public Class run() throws Exception { - return DataStructures.class.getClassLoader().loadClass(cn).newInstance().getClass(); - } - }); - } catch (final PrivilegedActionException ex) { - throw ex.getCause(); - } - } - /** * Create an instance of a {@link BlockingQueue} that is based on - * {@code LinkedTransferQueue} implementation from JDK 7. + * {@code LinkedTransferQueue} implementation, available in JDK 7 and above. *

- * When running on JDK 7 or higher, JDK {@code LinkedTransferQueue} implementation is used, - * on JDK 6 an internal Jersey implementation class is used. + * Originally, the method was used to provide backwards compatibility for JDK versions 6 and below. + * As those versions are now unsupported, callers should instantiate an {@link LinkedTransferQueue} + * directly instead of using this method. *

* * @param the type of elements held in the queue. * @return new instance of a {@link BlockingQueue} that is based on {@code LinkedTransferQueue} * implementation from JDK 7. */ - @SuppressWarnings("unchecked") + @Deprecated public static BlockingQueue createLinkedTransferQueue() { - try { - return AccessController.doPrivileged(new PrivilegedExceptionAction>() { - @Override - public BlockingQueue run() throws Exception { - return (BlockingQueue) LTQ_CLASS.newInstance(); - } - }); - } catch (final PrivilegedActionException ex) { - final Throwable cause = ex.getCause(); - if (cause instanceof RuntimeException) { - throw (RuntimeException) cause; - } else { - throw new RuntimeException(cause); - } - } + return new LinkedTransferQueue<>(); } /** * Creates a new, empty map with a default initial capacity (16), * load factor (0.75) and concurrencyLevel (16). *

- * On Oracle JDK, the factory method will return an instance of + * The method was originally used to provide the * - * {@code ConcurrentHashMapV8} - * that is supposed to be available in JDK 8 and provides better performance and memory characteristics than - * {@link ConcurrentHashMap} implementation from JDK 7 or earlier. On non-Oracle JDK, - * the factory instantiates the standard {@code ConcurrentHashMap} from JDK. + * {@code ConcurrentHashMapV8}, available in JDK 8 and above, for JDK 7 or earlier. + * As those versions are now unsupported, callers should instantiate an {@link ConcurrentHashMap} + * directly instead of using this method. *

* * @return the map. */ + @Deprecated public static ConcurrentMap createConcurrentMap() { - return JdkVersion.getJdkVersion().isUnsafeSupported() - ? new ConcurrentHashMapV8() - : new ConcurrentHashMap(); + return new ConcurrentHashMap<>(); } /** * Creates a new map with the same mappings as the given map. *

- * On Oracle JDK, the factory method will return an instance of + * The method was originally used to provide the * - * {@code ConcurrentHashMapV8} - * that is supposed to be available in JDK 8 and provides better performance and memory characteristics than - * {@link ConcurrentHashMap} implementation from JDK 7 or earlier. On non-Oracle JDK, - * the factory instantiates the standard {@code ConcurrentHashMap} from JDK. + * {@code ConcurrentHashMapV8}, available in JDK 8 and above, for JDK 7 or earlier. + * As those versions are now unsupported, callers should instantiate an {@link ConcurrentHashMap} + * directly instead of using this method. *

* * @param map the map. */ + @Deprecated public static ConcurrentMap createConcurrentMap( final Map map) { - return JdkVersion.getJdkVersion().isUnsafeSupported() - ? new ConcurrentHashMapV8(map) - : new ConcurrentHashMap(map); + return new ConcurrentHashMap<>(map); } /** * Creates a new, empty map with an initial table size accommodating the specified * number of elements without the need to dynamically resize. *

- * On Oracle JDK, the factory method will return an instance of + * The method was originally used to provide the * - * {@code ConcurrentHashMapV8} - * that is supposed to be available in JDK 8 and provides better performance and memory characteristics than - * {@link ConcurrentHashMap} implementation from JDK 7 or earlier. On non-Oracle JDK, - * the factory instantiates the standard {@code ConcurrentHashMap} from JDK. + * {@code ConcurrentHashMapV8}, available in JDK 8 and above, for JDK 7 or earlier. + * As those versions are now unsupported, callers should instantiate an {@link ConcurrentHashMap} + * directly instead of using this method. *

* * @param initialCapacity The implementation performs internal @@ -177,11 +112,10 @@ public static ConcurrentMap createConcurrentMap( * @throws IllegalArgumentException if the initial capacity of * elements is negative. */ + @Deprecated public static ConcurrentMap createConcurrentMap( final int initialCapacity) { - return JdkVersion.getJdkVersion().isUnsafeSupported() - ? new ConcurrentHashMapV8(initialCapacity) - : new ConcurrentHashMap(initialCapacity); + return new ConcurrentHashMap<>(initialCapacity); } /** @@ -189,12 +123,11 @@ public static ConcurrentMap createConcurrentMap( * ({@code initialCapacity}), table density ({@code loadFactor}), and number of concurrently * updating threads ({@code concurrencyLevel}). *

- * On Oracle JDK, the factory method will return an instance of + * The method was originally used to provide the * - * {@code ConcurrentHashMapV8} - * that is supposed to be available in JDK 8 and provides better performance and memory characteristics than - * {@link ConcurrentHashMap} implementation from JDK 7 or earlier. On non-Oracle JDK, - * the factory instantiates the standard {@code ConcurrentHashMap} from JDK. + * {@code ConcurrentHashMapV8}, available in JDK 8 and above, for JDK 7 or earlier. + * As those versions are now unsupported, callers should instantiate an {@link ConcurrentHashMap} + * directly instead of using this method. *

* * @param initialCapacity the initial capacity. The implementation @@ -209,11 +142,10 @@ public static ConcurrentMap createConcurrentMap( * negative or the load factor or concurrencyLevel are * not positive. */ + @Deprecated public static ConcurrentMap createConcurrentMap( final int initialCapacity, final float loadFactor, final int concurrencyLevel) { - return JdkVersion.getJdkVersion().isUnsafeSupported() - ? new ConcurrentHashMapV8(initialCapacity, loadFactor, concurrencyLevel) - : new ConcurrentHashMap(initialCapacity, loadFactor, concurrencyLevel); + return new ConcurrentHashMap<>(initialCapacity, loadFactor, concurrencyLevel); } } diff --git a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/LinkedTransferQueue.java b/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/LinkedTransferQueue.java deleted file mode 100644 index 8d98734841..0000000000 --- a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/LinkedTransferQueue.java +++ /dev/null @@ -1,1371 +0,0 @@ -/* - * Copyright (c) 2009, 2018 Oracle and/or its affiliates. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v. 2.0, which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * This Source Code may also be made available under the following Secondary - * Licenses when the conditions for such availability set forth in the - * Eclipse Public License v. 2.0 are satisfied: GNU General Public License, - * version 2 with the GNU Classpath Exception, which is available at - * https://www.gnu.org/software/classpath/license.html. - * - * SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 - */ - -/** - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ -package org.glassfish.jersey.internal.util.collection; - -import java.util.AbstractQueue; -import java.util.Collection; -import java.util.Iterator; -import java.util.NoSuchElementException; -import java.util.Queue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.LockSupport; - -/** - * An unbounded {@link TransferQueue} based on linked nodes. - * This queue orders elements FIFO (first-in-first-out) with respect - * to any given producer. The head of the queue is that - * element that has been on the queue the longest time for some - * producer. The tail of the queue is that element that has - * been on the queue the shortest time for some producer. - * - *

Beware that, unlike in most collections, the {@code size} method - * is NOT a constant-time operation. Because of the - * asynchronous nature of these queues, determining the current number - * of elements requires a traversal of the elements, and so may report - * inaccurate results if this collection is modified during traversal. - * Additionally, the bulk operations {@code addAll}, - * {@code removeAll}, {@code retainAll}, {@code containsAll}, - * {@code equals}, and {@code toArray} are not guaranteed - * to be performed atomically. For example, an iterator operating - * concurrently with an {@code addAll} operation might view only some - * of the added elements. - * - *

This class and its iterator implement all of the - * optional methods of the {@link Collection} and {@link - * Iterator} interfaces. - * - *

Memory consistency effects: As with other concurrent - * collections, actions in a thread prior to placing an object into a - * {@code LinkedTransferQueue} - * happen-before - * actions subsequent to the access or removal of that element from - * the {@code LinkedTransferQueue} in another thread. - * - *

This class is a member of the - * - * Java Collections Framework. - * - * TODO: Do NOT remove this class. It's referenced from {@link org.glassfish.jersey.internal.util.collection.DataStructures}. - * - * @param the type of elements held in this collection. - * @author Doug Lea - */ -class LinkedTransferQueue extends AbstractQueue - implements TransferQueue, java.io.Serializable { - private static final long serialVersionUID = -3223113410248163686L; - - /* - * *** Overview of Dual Queues with Slack *** - * - * Dual Queues, introduced by Scherer and Scott - * (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are - * (linked) queues in which nodes may represent either data or - * requests. When a thread tries to enqueue a data node, but - * encounters a request node, it instead "matches" and removes it; - * and vice versa for enqueuing requests. Blocking Dual Queues - * arrange that threads enqueuing unmatched requests block until - * other threads provide the match. Dual Synchronous Queues (see - * Scherer, Lea, & Scott - * http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf) - * additionally arrange that threads enqueuing unmatched data also - * block. Dual Transfer Queues support all of these modes, as - * dictated by callers. - * - * A FIFO dual queue may be implemented using a variation of the - * Michael & Scott (M&S) lock-free queue algorithm - * (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf). - * It maintains two pointer fields, "head", pointing to a - * (matched) node that in turn points to the first actual - * (unmatched) queue node (or null if empty); and "tail" that - * points to the last node on the queue (or again null if - * empty). For example, here is a possible queue with four data - * elements: - * - * head tail - * | | - * v v - * M -> U -> U -> U -> U - * - * The M&S queue algorithm is known to be prone to scalability and - * overhead limitations when maintaining (via CAS) these head and - * tail pointers. This has led to the development of - * contention-reducing variants such as elimination arrays (see - * Moir et al http://portal.acm.org/citation.cfm?id=1074013) and - * optimistic back pointers (see Ladan-Mozes & Shavit - * http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf). - * However, the nature of dual queues enables a simpler tactic for - * improving M&S-style implementations when dual-ness is needed. - * - * In a dual queue, each node must atomically maintain its match - * status. While there are other possible variants, we implement - * this here as: for a data-mode node, matching entails CASing an - * "item" field from a non-null data value to null upon match, and - * vice-versa for request nodes, CASing from null to a data - * value. (Note that the linearization properties of this style of - * queue are easy to verify -- elements are made available by - * linking, and unavailable by matching.) Compared to plain M&S - * queues, this property of dual queues requires one additional - * successful atomic operation per enq/deq pair. But it also - * enables lower cost variants of queue maintenance mechanics. (A - * variation of this idea applies even for non-dual queues that - * support deletion of interior elements, such as - * j.u.c.ConcurrentLinkedQueue.) - * - * Once a node is matched, its match status can never again - * change. We may thus arrange that the linked list of them - * contain a prefix of zero or more matched nodes, followed by a - * suffix of zero or more unmatched nodes. (Note that we allow - * both the prefix and suffix to be zero length, which in turn - * means that we do not use a dummy header.) If we were not - * concerned with either time or space efficiency, we could - * correctly perform enqueue and dequeue operations by traversing - * from a pointer to the initial node; CASing the item of the - * first unmatched node on match and CASing the next field of the - * trailing node on appends. (Plus some special-casing when - * initially empty). While this would be a terrible idea in - * itself, it does have the benefit of not requiring ANY atomic - * updates on head/tail fields. - * - * We introduce here an approach that lies between the extremes of - * never versus always updating queue (head and tail) pointers. - * This offers a tradeoff between sometimes requiring extra - * traversal steps to locate the first and/or last unmatched - * nodes, versus the reduced overhead and contention of fewer - * updates to queue pointers. For example, a possible snapshot of - * a queue is: - * - * head tail - * | | - * v v - * M -> M -> U -> U -> U -> U - * - * The best value for this "slack" (the targeted maximum distance - * between the value of "head" and the first unmatched node, and - * similarly for "tail") is an empirical matter. We have found - * that using very small constants in the range of 1-3 work best - * over a range of platforms. Larger values introduce increasing - * costs of cache misses and risks of long traversal chains, while - * smaller values increase CAS contention and overhead. - * - * Dual queues with slack differ from plain M&S dual queues by - * virtue of only sometimes updating head or tail pointers when - * matching, appending, or even traversing nodes; in order to - * maintain a targeted slack. The idea of "sometimes" may be - * operationalized in several ways. The simplest is to use a - * per-operation counter incremented on each traversal step, and - * to try (via CAS) to update the associated queue pointer - * whenever the count exceeds a threshold. Another, that requires - * more overhead, is to use random number generators to update - * with a given probability per traversal step. - * - * In any strategy along these lines, because CASes updating - * fields may fail, the actual slack may exceed targeted - * slack. However, they may be retried at any time to maintain - * targets. Even when using very small slack values, this - * approach works well for dual queues because it allows all - * operations up to the point of matching or appending an item - * (hence potentially allowing progress by another thread) to be - * read-only, thus not introducing any further contention. As - * described below, we implement this by performing slack - * maintenance retries only after these points. - * - * As an accompaniment to such techniques, traversal overhead can - * be further reduced without increasing contention of head - * pointer updates: Threads may sometimes shortcut the "next" link - * path from the current "head" node to be closer to the currently - * known first unmatched node, and similarly for tail. Again, this - * may be triggered with using thresholds or randomization. - * - * These ideas must be further extended to avoid unbounded amounts - * of costly-to-reclaim garbage caused by the sequential "next" - * links of nodes starting at old forgotten head nodes: As first - * described in detail by Boehm - * (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC - * delays noticing that any arbitrarily old node has become - * garbage, all newer dead nodes will also be unreclaimed. - * (Similar issues arise in non-GC environments.) To cope with - * this in our implementation, upon CASing to advance the head - * pointer, we set the "next" link of the previous head to point - * only to itself; thus limiting the length of connected dead lists. - * (We also take similar care to wipe out possibly garbage - * retaining values held in other Node fields.) However, doing so - * adds some further complexity to traversal: If any "next" - * pointer links to itself, it indicates that the current thread - * has lagged behind a head-update, and so the traversal must - * continue from the "head". Traversals trying to find the - * current tail starting from "tail" may also encounter - * self-links, in which case they also continue at "head". - * - * It is tempting in slack-based scheme to not even use CAS for - * updates (similarly to Ladan-Mozes & Shavit). However, this - * cannot be done for head updates under the above link-forgetting - * mechanics because an update may leave head at a detached node. - * And while direct writes are possible for tail updates, they - * increase the risk of long retraversals, and hence long garbage - * chains, which can be much more costly than is worthwhile - * considering that the cost difference of performing a CAS vs - * write is smaller when they are not triggered on each operation - * (especially considering that writes and CASes equally require - * additional GC bookkeeping ("write barriers") that are sometimes - * more costly than the writes themselves because of contention). - * - * *** Overview of implementation *** - * - * We use a threshold-based approach to updates, with a slack - * threshold of two -- that is, we update head/tail when the - * current pointer appears to be two or more steps away from the - * first/last node. The slack value is hard-wired: a path greater - * than one is naturally implemented by checking equality of - * traversal pointers except when the list has only one element, - * in which case we keep slack threshold at one. Avoiding tracking - * explicit counts across method calls slightly simplifies an - * already-messy implementation. Using randomization would - * probably work better if there were a low-quality dirt-cheap - * per-thread one available, but even ThreadLocalRandom is too - * heavy for these purposes. - * - * With such a small slack threshold value, it is not worthwhile - * to augment this with path short-circuiting (i.e., unsplicing - * interior nodes) except in the case of cancellation/removal (see - * below). - * - * We allow both the head and tail fields to be null before any - * nodes are enqueued; initializing upon first append. This - * simplifies some other logic, as well as providing more - * efficient explicit control paths instead of letting JVMs insert - * implicit NullPointerExceptions when they are null. While not - * currently fully implemented, we also leave open the possibility - * of re-nulling these fields when empty (which is complicated to - * arrange, for little benefit.) - * - * All enqueue/dequeue operations are handled by the single method - * "xfer" with parameters indicating whether to act as some form - * of offer, put, poll, take, or transfer (each possibly with - * timeout). The relative complexity of using one monolithic - * method outweighs the code bulk and maintenance problems of - * using separate methods for each case. - * - * Operation consists of up to three phases. The first is - * implemented within method xfer, the second in tryAppend, and - * the third in method awaitMatch. - * - * 1. Try to match an existing node - * - * Starting at head, skip already-matched nodes until finding - * an unmatched node of opposite mode, if one exists, in which - * case matching it and returning, also if necessary updating - * head to one past the matched node (or the node itself if the - * list has no other unmatched nodes). If the CAS misses, then - * a loop retries advancing head by two steps until either - * success or the slack is at most two. By requiring that each - * attempt advances head by two (if applicable), we ensure that - * the slack does not grow without bound. Traversals also check - * if the initial head is now off-list, in which case they - * start at the new head. - * - * If no candidates are found and the call was untimed - * poll/offer, (argument "how" is NOW) return. - * - * 2. Try to append a new node (method tryAppend) - * - * Starting at current tail pointer, find the actual last node - * and try to append a new node (or if head was null, establish - * the first node). Nodes can be appended only if their - * predecessors are either already matched or are of the same - * mode. If we detect otherwise, then a new node with opposite - * mode must have been appended during traversal, so we must - * restart at phase 1. The traversal and update steps are - * otherwise similar to phase 1: Retrying upon CAS misses and - * checking for staleness. In particular, if a self-link is - * encountered, then we can safely jump to a node on the list - * by continuing the traversal at current head. - * - * On successful append, if the call was ASYNC, return. - * - * 3. Await match or cancellation (method awaitMatch) - * - * Wait for another thread to match node; instead cancelling if - * the current thread was interrupted or the wait timed out. On - * multiprocessors, we use front-of-queue spinning: If a node - * appears to be the first unmatched node in the queue, it - * spins a bit before blocking. In either case, before blocking - * it tries to unsplice any nodes between the current "head" - * and the first unmatched node. - * - * Front-of-queue spinning vastly improves performance of - * heavily contended queues. And so long as it is relatively - * brief and "quiet", spinning does not much impact performance - * of less-contended queues. During spins threads check their - * interrupt status and generate a thread-local random number - * to decide to occasionally perform a Thread.yield. While - * yield has underdefined specs, we assume that it might help, - * and will not hurt, in limiting impact of spinning on busy - * systems. We also use smaller (1/2) spins for nodes that are - * not known to be front but whose predecessors have not - * blocked -- these "chained" spins avoid artifacts of - * front-of-queue rules which otherwise lead to alternating - * nodes spinning vs blocking. Further, front threads that - * represent phase changes (from data to request node or vice - * versa) compared to their predecessors receive additional - * chained spins, reflecting longer paths typically required to - * unblock threads during phase changes. - * - * - * ** Unlinking removed interior nodes ** - * - * In addition to minimizing garbage retention via self-linking - * described above, we also unlink removed interior nodes. These - * may arise due to timed out or interrupted waits, or calls to - * remove(x) or Iterator.remove. Normally, given a node that was - * at one time known to be the predecessor of some node s that is - * to be removed, we can unsplice s by CASing the next field of - * its predecessor if it still points to s (otherwise s must - * already have been removed or is now offlist). But there are two - * situations in which we cannot guarantee to make node s - * unreachable in this way: (1) If s is the trailing node of list - * (i.e., with null next), then it is pinned as the target node - * for appends, so can only be removed later after other nodes are - * appended. (2) We cannot necessarily unlink s given a - * predecessor node that is matched (including the case of being - * cancelled): the predecessor may already be unspliced, in which - * case some previous reachable node may still point to s. - * (For further explanation see Herlihy & Shavit "The Art of - * Multiprocessor Programming" chapter 9). Although, in both - * cases, we can rule out the need for further action if either s - * or its predecessor are (or can be made to be) at, or fall off - * from, the head of list. - * - * Without taking these into account, it would be possible for an - * unbounded number of supposedly removed nodes to remain - * reachable. Situations leading to such buildup are uncommon but - * can occur in practice; for example when a series of short timed - * calls to poll repeatedly time out but never otherwise fall off - * the list because of an untimed call to take at the front of the - * queue. - * - * When these cases arise, rather than always retraversing the - * entire list to find an actual predecessor to unlink (which - * won't help for case (1) anyway), we record a conservative - * estimate of possible unsplice failures (in "sweepVotes"). - * We trigger a full sweep when the estimate exceeds a threshold - * ("SWEEP_THRESHOLD") indicating the maximum number of estimated - * removal failures to tolerate before sweeping through, unlinking - * cancelled nodes that were not unlinked upon initial removal. - * We perform sweeps by the thread hitting threshold (rather than - * background threads or by spreading work to other threads) - * because in the main contexts in which removal occurs, the - * caller is already timed-out, cancelled, or performing a - * potentially O(n) operation (e.g. remove(x)), none of which are - * time-critical enough to warrant the overhead that alternatives - * would impose on other threads. - * - * Because the sweepVotes estimate is conservative, and because - * nodes become unlinked "naturally" as they fall off the head of - * the queue, and because we allow votes to accumulate even while - * sweeps are in progress, there are typically significantly fewer - * such nodes than estimated. Choice of a threshold value - * balances the likelihood of wasted effort and contention, versus - * providing a worst-case bound on retention of interior nodes in - * quiescent queues. The value defined below was chosen - * empirically to balance these under various timeout scenarios. - * - * Note that we cannot self-link unlinked interior nodes during - * sweeps. However, the associated garbage chains terminate when - * some successor ultimately falls off the head of the list and is - * self-linked. - */ - - /** - * True if on multiprocessor - */ - private static final boolean MP = - Runtime.getRuntime().availableProcessors() > 1; - - /** - * The number of times to spin (with randomly interspersed calls - * to Thread.yield) on multiprocessor before blocking when a node - * is apparently the first waiter in the queue. See above for - * explanation. Must be a power of two. The value is empirically - * derived -- it works pretty well across a variety of processors, - * numbers of CPUs, and OSes. - */ - private static final int FRONT_SPINS = 1 << 7; - - /** - * The number of times to spin before blocking when a node is - * preceded by another node that is apparently spinning. Also - * serves as an increment to FRONT_SPINS on phase changes, and as - * base average frequency for yielding during spins. Must be a - * power of two. - */ - private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; - - /** - * The maximum number of estimated removal failures (sweepVotes) - * to tolerate before sweeping through the queue unlinking - * cancelled nodes that were not unlinked upon initial - * removal. See above for explanation. The value must be at least - * two to avoid useless sweeps when removing trailing nodes. - */ - static final int SWEEP_THRESHOLD = 32; - - /** - * Queue nodes. Uses Object, not E, for items to allow forgetting - * them after use. Relies heavily on Unsafe mechanics to minimize - * unnecessary ordering constraints: Writes that are intrinsically - * ordered wrt other accesses or CASes use simple relaxed forms. - */ - static final class Node { - final boolean isData; // false if this is a request node - volatile Object item; // initially non-null if isData; CASed to match - volatile Node next; - volatile Thread waiter; // null until waiting - - // CAS methods for fields - final boolean casNext(Node cmp, Node val) { - return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val); - } - - final boolean casItem(Object cmp, Object val) { - // assert cmp == null || cmp.getClass() != Node.class; - return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val); - } - - /** - * Constructs a new node. Uses relaxed write because item can - * only be seen after publication via casNext. - */ - Node(Object item, boolean isData) { - UNSAFE.putObject(this, itemOffset, item); // relaxed write - this.isData = isData; - } - - /** - * Links node to itself to avoid garbage retention. Called - * only after CASing head field, so uses relaxed write. - */ - final void forgetNext() { - UNSAFE.putObject(this, nextOffset, this); - } - - /** - * Sets item to self and waiter to null, to avoid garbage - * retention after matching or cancelling. Uses relaxed writes - * because order is already constrained in the only calling - * contexts: item is forgotten only after volatile/atomic - * mechanics that extract items. Similarly, clearing waiter - * follows either CAS or return from park (if ever parked; - * else we don't care). - */ - final void forgetContents() { - UNSAFE.putObject(this, itemOffset, this); - UNSAFE.putObject(this, waiterOffset, null); - } - - /** - * Returns true if this node has been matched, including the - * case of artificial matches due to cancellation. - */ - final boolean isMatched() { - Object x = item; - return (x == this) || ((x == null) == isData); - } - - /** - * Returns true if this is an unmatched request node. - */ - final boolean isUnmatchedRequest() { - return !isData && item == null; - } - - /** - * Returns true if a node with the given mode cannot be - * appended to this node because this node is unmatched and - * has opposite data mode. - */ - final boolean cannotPrecede(boolean haveData) { - boolean d = isData; - Object x; - return d != haveData && (x = item) != this && (x != null) == d; - } - - /** - * Tries to artificially match a data node -- used by remove. - */ - final boolean tryMatchData() { - // assert isData; - Object x = item; - if (x != null && x != this && casItem(x, null)) { - LockSupport.unpark(waiter); - return true; - } - return false; - } - - private static final long serialVersionUID = -3375979862319811754L; - - // Unsafe mechanics - private static final sun.misc.Unsafe UNSAFE; - private static final long itemOffset; - private static final long nextOffset; - private static final long waiterOffset; - - static { - try { - UNSAFE = getUnsafe(); - Class k = Node.class; - itemOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("item")); - nextOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("next")); - waiterOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("waiter")); - } catch (Exception e) { - throw new Error(e); - } - } - } - - /** - * head of the queue; null until first enqueue - */ - transient volatile Node head; - - /** - * tail of the queue; null until first append - */ - private transient volatile Node tail; - - /** - * The number of apparent failures to unsplice removed nodes - */ - private transient volatile int sweepVotes; - - // CAS methods for fields - private boolean casTail(Node cmp, Node val) { - return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val); - } - - private boolean casHead(Node cmp, Node val) { - return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val); - } - - private boolean casSweepVotes(int cmp, int val) { - return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val); - } - - /* - * Possible values for "how" argument in xfer method. - */ - private static final int NOW = 0; // for untimed poll, tryTransfer - private static final int ASYNC = 1; // for offer, put, add - private static final int SYNC = 2; // for transfer, take - private static final int TIMED = 3; // for timed poll, tryTransfer - - @SuppressWarnings("unchecked") - static E cast(Object item) { - // assert item == null || item.getClass() != Node.class; - return (E) item; - } - - /** - * Implements all queuing methods. See above for explanation. - * - * @param e the item or null for take - * @param haveData true if this is a put, else a take - * @param how NOW, ASYNC, SYNC, or TIMED - * @param nanos timeout in nanosecs, used only if mode is TIMED - * @return an item if matched, else e - * @throws NullPointerException if haveData mode but e is null - */ - private E xfer(E e, boolean haveData, int how, long nanos) { - if (haveData && (e == null)) - throw new NullPointerException(); - Node s = null; // the node to append, if needed - - retry: - for (; ; ) { // restart on append race - - for (Node h = head, p = h; p != null; ) { // find & match first node - boolean isData = p.isData; - Object item = p.item; - if (item != p && (item != null) == isData) { // unmatched - if (isData == haveData) // can't match - break; - if (p.casItem(item, e)) { // match - for (Node q = p; q != h; ) { - Node n = q.next; // update by 2 unless singleton - if (head == h && casHead(h, n == null ? q : n)) { - h.forgetNext(); - break; - } // advance and retry - if ((h = head) == null || - (q = h.next) == null || !q.isMatched()) - break; // unless slack < 2 - } - LockSupport.unpark(p.waiter); - return LinkedTransferQueue.cast(item); - } - } - Node n = p.next; - p = (p != n) ? n : (h = head); // Use head if p offlist - } - - if (how != NOW) { // No matches available - if (s == null) - s = new Node(e, haveData); - Node pred = tryAppend(s, haveData); - if (pred == null) - continue retry; // lost race vs opposite mode - if (how != ASYNC) - return awaitMatch(s, pred, e, (how == TIMED), nanos); - } - return e; // not waiting - } - } - - /** - * Tries to append node s as tail. - * - * @param s the node to append - * @param haveData true if appending in data mode - * @return null on failure due to losing race with append in - * different mode, else s's predecessor, or s itself if no - * predecessor - */ - private Node tryAppend(Node s, boolean haveData) { - for (Node t = tail, p = t; ; ) { // move p to last node and append - Node n, u; // temps for reads of next & tail - if (p == null && (p = head) == null) { - if (casHead(null, s)) - return s; // initialize - } else if (p.cannotPrecede(haveData)) - return null; // lost race vs opposite mode - else if ((n = p.next) != null) // not last; keep traversing - p = p != t && t != (u = tail) ? (t = u) : // stale tail - (p != n) ? n : null; // restart if off list - else if (!p.casNext(null, s)) - p = p.next; // re-read on CAS failure - else { - if (p != t) { // update if slack now >= 2 - while ((tail != t || !casTail(t, s)) && - (t = tail) != null && - (s = t.next) != null && // advance and retry - (s = s.next) != null && s != t) ; - } - return p; - } - } - } - - /** - * Spins/yields/blocks until node s is matched or caller gives up. - * - * @param s the waiting node - * @param pred the predecessor of s, or s itself if it has no - * predecessor, or null if unknown (the null case does not occur - * in any current calls but may in possible future extensions) - * @param e the comparison value for checking match - * @param timed if true, wait only until timeout elapses - * @param nanos timeout in nanosecs, used only if timed is true - * @return matched item, or e if unmatched on interrupt or timeout - */ - private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { - long lastTime = timed ? System.nanoTime() : 0L; - Thread w = Thread.currentThread(); - int spins = -1; // initialized after first item and cancel checks - ThreadLocalRandom randomYields = null; // bound if needed - - for (; ; ) { - Object item = s.item; - if (item != e) { // matched - // assert item != s; - s.forgetContents(); // avoid garbage - return LinkedTransferQueue.cast(item); - } - if ((w.isInterrupted() || (timed && nanos <= 0)) && - s.casItem(e, s)) { // cancel - unsplice(pred, s); - return e; - } - - if (spins < 0) { // establish spins at/near front - if ((spins = spinsFor(pred, s.isData)) > 0) - randomYields = ThreadLocalRandom.current(); - } else if (spins > 0) { // spin - --spins; - if (randomYields.nextInt(CHAINED_SPINS) == 0) - Thread.yield(); // occasionally yield - } else if (s.waiter == null) { - s.waiter = w; // request unpark then recheck - } else if (timed) { - long now = System.nanoTime(); - if ((nanos -= now - lastTime) > 0) - LockSupport.parkNanos(this, nanos); - lastTime = now; - } else { - LockSupport.park(this); - } - } - } - - /** - * Returns spin/yield value for a node with given predecessor and - * data mode. See above for explanation. - */ - private static int spinsFor(Node pred, boolean haveData) { - if (MP && pred != null) { - if (pred.isData != haveData) // phase change - return FRONT_SPINS + CHAINED_SPINS; - if (pred.isMatched()) // probably at front - return FRONT_SPINS; - if (pred.waiter == null) // pred apparently spinning - return CHAINED_SPINS; - } - return 0; - } - - /* -------------- Traversal methods -------------- */ - - /** - * Returns the successor of p, or the head node if p.next has been - * linked to self, which will only be true if traversing with a - * stale pointer that is now off the list. - */ - final Node succ(Node p) { - Node next = p.next; - return (p == next) ? head : next; - } - - /** - * Returns the first unmatched node of the given mode, or null if - * none. Used by methods isEmpty, hasWaitingConsumer. - */ - private Node firstOfMode(boolean isData) { - for (Node p = head; p != null; p = succ(p)) { - if (!p.isMatched()) - return (p.isData == isData) ? p : null; - } - return null; - } - - /** - * Returns the item in the first unmatched node with isData; or - * null if none. Used by peek. - */ - private E firstDataItem() { - for (Node p = head; p != null; p = succ(p)) { - Object item = p.item; - if (p.isData) { - if (item != null && item != p) - return LinkedTransferQueue.cast(item); - } else if (item == null) - return null; - } - return null; - } - - /** - * Traverses and counts unmatched nodes of the given mode. - * Used by methods size and getWaitingConsumerCount. - */ - private int countOfMode(boolean data) { - int count = 0; - for (Node p = head; p != null; ) { - if (!p.isMatched()) { - if (p.isData != data) - return 0; - if (++count == Integer.MAX_VALUE) // saturated - break; - } - Node n = p.next; - if (n != p) - p = n; - else { - count = 0; - p = head; - } - } - return count; - } - - final class Itr implements Iterator { - private Node nextNode; // next node to return item for - private E nextItem; // the corresponding item - private Node lastRet; // last returned node, to support remove - private Node lastPred; // predecessor to unlink lastRet - - /** - * Moves to next node after prev, or first node if prev null. - */ - private void advance(Node prev) { - /* - * To track and avoid buildup of deleted nodes in the face - * of calls to both Queue.remove and Itr.remove, we must - * include variants of unsplice and sweep upon each - * advance: Upon Itr.remove, we may need to catch up links - * from lastPred, and upon other removes, we might need to - * skip ahead from stale nodes and unsplice deleted ones - * found while advancing. - */ - - Node r, b; // reset lastPred upon possible deletion of lastRet - if ((r = lastRet) != null && !r.isMatched()) - lastPred = r; // next lastPred is old lastRet - else if ((b = lastPred) == null || b.isMatched()) - lastPred = null; // at start of list - else { - Node s, n; // help with removal of lastPred.next - while ((s = b.next) != null && - s != b && s.isMatched() && - (n = s.next) != null && n != s) - b.casNext(s, n); - } - - this.lastRet = prev; - - for (Node p = prev, s, n; ; ) { - s = (p == null) ? head : p.next; - if (s == null) - break; - else if (s == p) { - p = null; - continue; - } - Object item = s.item; - if (s.isData) { - if (item != null && item != s) { - nextItem = LinkedTransferQueue.cast(item); - nextNode = s; - return; - } - } else if (item == null) - break; - // assert s.isMatched(); - if (p == null) - p = s; - else if ((n = s.next) == null) - break; - else if (s == n) - p = null; - else - p.casNext(s, n); - } - nextNode = null; - nextItem = null; - } - - Itr() { - advance(null); - } - - public final boolean hasNext() { - return nextNode != null; - } - - public final E next() { - Node p = nextNode; - if (p == null) throw new NoSuchElementException(); - E e = nextItem; - advance(p); - return e; - } - - public final void remove() { - final Node lastRet = this.lastRet; - if (lastRet == null) - throw new IllegalStateException(); - this.lastRet = null; - if (lastRet.tryMatchData()) - unsplice(lastPred, lastRet); - } - } - - /* -------------- Removal methods -------------- */ - - /** - * Unsplices (now or later) the given deleted/cancelled node with - * the given predecessor. - * - * @param pred a node that was at one time known to be the - * predecessor of s, or null or s itself if s is/was at head - * @param s the node to be unspliced - */ - final void unsplice(Node pred, Node s) { - s.forgetContents(); // forget unneeded fields - /* - * See above for rationale. Briefly: if pred still points to - * s, try to unlink s. If s cannot be unlinked, because it is - * trailing node or pred might be unlinked, and neither pred - * nor s are head or offlist, add to sweepVotes, and if enough - * votes have accumulated, sweep. - */ - if (pred != null && pred != s && pred.next == s) { - Node n = s.next; - if (n == null || - (n != s && pred.casNext(s, n) && pred.isMatched())) { - for (; ; ) { // check if at, or could be, head - Node h = head; - if (h == pred || h == s || h == null) - return; // at head or list empty - if (!h.isMatched()) - break; - Node hn = h.next; - if (hn == null) - return; // now empty - if (hn != h && casHead(h, hn)) - h.forgetNext(); // advance head - } - if (pred.next != pred && s.next != s) { // recheck if offlist - for (; ; ) { // sweep now if enough votes - int v = sweepVotes; - if (v < SWEEP_THRESHOLD) { - if (casSweepVotes(v, v + 1)) - break; - } else if (casSweepVotes(v, 0)) { - sweep(); - break; - } - } - } - } - } - } - - /** - * Unlinks matched (typically cancelled) nodes encountered in a - * traversal from head. - */ - private void sweep() { - for (Node p = head, s, n; p != null && (s = p.next) != null; ) { - if (!s.isMatched()) - // Unmatched nodes are never self-linked - p = s; - else if ((n = s.next) == null) // trailing node is pinned - break; - else if (s == n) // stale - // No need to also check for p == s, since that implies s == n - p = head; - else - p.casNext(s, n); - } - } - - /** - * Main implementation of remove(Object) - */ - private boolean findAndRemove(Object e) { - if (e != null) { - for (Node pred = null, p = head; p != null; ) { - Object item = p.item; - if (p.isData) { - if (item != null && item != p && e.equals(item) && - p.tryMatchData()) { - unsplice(pred, p); - return true; - } - } else if (item == null) - break; - pred = p; - if ((p = p.next) == pred) { // stale - pred = null; - p = head; - } - } - } - return false; - } - - - /** - * Creates an initially empty {@code LinkedTransferQueue}. - */ - public LinkedTransferQueue() { - } - - /** - * Creates a {@code LinkedTransferQueue} - * initially containing the elements of the given collection, - * added in traversal order of the collection's iterator. - * - * @param c the collection of elements to initially contain - * @throws NullPointerException if the specified collection or any - * of its elements are null - */ - public LinkedTransferQueue(Collection c) { - this(); - addAll(c); - } - - /** - * Inserts the specified element at the tail of this queue. - * As the queue is unbounded, this method will never block. - * - * @throws NullPointerException if the specified element is null - */ - public void put(E e) { - xfer(e, true, ASYNC, 0); - } - - /** - * Inserts the specified element at the tail of this queue. - * As the queue is unbounded, this method will never block or - * return {@code false}. - * - * @return {@code true} (as specified by - * {@link java.util.concurrent.BlockingQueue#offer(Object, long, TimeUnit) - * BlockingQueue.offer}) - * @throws NullPointerException if the specified element is null - */ - public boolean offer(E e, long timeout, TimeUnit unit) { - xfer(e, true, ASYNC, 0); - return true; - } - - /** - * Inserts the specified element at the tail of this queue. - * As the queue is unbounded, this method will never return {@code false}. - * - * @return {@code true} (as specified by {@link Queue#offer}) - * @throws NullPointerException if the specified element is null - */ - public boolean offer(E e) { - xfer(e, true, ASYNC, 0); - return true; - } - - /** - * Inserts the specified element at the tail of this queue. - * As the queue is unbounded, this method will never throw - * {@link IllegalStateException} or return {@code false}. - * - * @return {@code true} (as specified by {@link Collection#add}) - * @throws NullPointerException if the specified element is null - */ - public boolean add(E e) { - xfer(e, true, ASYNC, 0); - return true; - } - - /** - * Transfers the element to a waiting consumer immediately, if possible. - * - *

More precisely, transfers the specified element immediately - * if there exists a consumer already waiting to receive it (in - * {@link #take} or timed {@link #poll(long, TimeUnit) poll}), - * otherwise returning {@code false} without enqueuing the element. - * - * @throws NullPointerException if the specified element is null - */ - public boolean tryTransfer(E e) { - return xfer(e, true, NOW, 0) == null; - } - - /** - * Transfers the element to a consumer, waiting if necessary to do so. - * - *

More precisely, transfers the specified element immediately - * if there exists a consumer already waiting to receive it (in - * {@link #take} or timed {@link #poll(long, TimeUnit) poll}), - * else inserts the specified element at the tail of this queue - * and waits until the element is received by a consumer. - * - * @throws NullPointerException if the specified element is null - */ - public void transfer(E e) throws InterruptedException { - if (xfer(e, true, SYNC, 0) != null) { - Thread.interrupted(); // failure possible only due to interrupt - throw new InterruptedException(); - } - } - - /** - * Transfers the element to a consumer if it is possible to do so - * before the timeout elapses. - * - *

More precisely, transfers the specified element immediately - * if there exists a consumer already waiting to receive it (in - * {@link #take} or timed {@link #poll(long, TimeUnit) poll}), - * else inserts the specified element at the tail of this queue - * and waits until the element is received by a consumer, - * returning {@code false} if the specified wait time elapses - * before the element can be transferred. - * - * @throws NullPointerException if the specified element is null - */ - public boolean tryTransfer(E e, long timeout, TimeUnit unit) - throws InterruptedException { - if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) - return true; - if (!Thread.interrupted()) - return false; - throw new InterruptedException(); - } - - public E take() throws InterruptedException { - E e = xfer(null, false, SYNC, 0); - if (e != null) - return e; - Thread.interrupted(); - throw new InterruptedException(); - } - - public E poll(long timeout, TimeUnit unit) throws InterruptedException { - E e = xfer(null, false, TIMED, unit.toNanos(timeout)); - if (e != null || !Thread.interrupted()) - return e; - throw new InterruptedException(); - } - - public E poll() { - return xfer(null, false, NOW, 0); - } - - /** - * @throws NullPointerException {@inheritDoc} - * @throws IllegalArgumentException {@inheritDoc} - */ - public int drainTo(Collection c) { - if (c == null) - throw new NullPointerException(); - if (c == this) - throw new IllegalArgumentException(); - int n = 0; - for (E e; (e = poll()) != null; ) { - c.add(e); - ++n; - } - return n; - } - - /** - * @throws NullPointerException {@inheritDoc} - * @throws IllegalArgumentException {@inheritDoc} - */ - public int drainTo(Collection c, int maxElements) { - if (c == null) - throw new NullPointerException(); - if (c == this) - throw new IllegalArgumentException(); - int n = 0; - for (E e; n < maxElements && (e = poll()) != null; ) { - c.add(e); - ++n; - } - return n; - } - - /** - * Returns an iterator over the elements in this queue in proper sequence. - * The elements will be returned in order from first (head) to last (tail). - * - *

The returned iterator is a "weakly consistent" iterator that - * will never throw {@link java.util.ConcurrentModificationException - * ConcurrentModificationException}, and guarantees to traverse - * elements as they existed upon construction of the iterator, and - * may (but is not guaranteed to) reflect any modifications - * subsequent to construction. - * - * @return an iterator over the elements in this queue in proper sequence - */ - public Iterator iterator() { - return new Itr(); - } - - public E peek() { - return firstDataItem(); - } - - /** - * Returns {@code true} if this queue contains no elements. - * - * @return {@code true} if this queue contains no elements - */ - public boolean isEmpty() { - for (Node p = head; p != null; p = succ(p)) { - if (!p.isMatched()) - return !p.isData; - } - return true; - } - - public boolean hasWaitingConsumer() { - return firstOfMode(false) != null; - } - - /** - * Returns the number of elements in this queue. If this queue - * contains more than {@code Integer.MAX_VALUE} elements, returns - * {@code Integer.MAX_VALUE}. - * - *

Beware that, unlike in most collections, this method is - * NOT a constant-time operation. Because of the - * asynchronous nature of these queues, determining the current - * number of elements requires an O(n) traversal. - * - * @return the number of elements in this queue - */ - public int size() { - return countOfMode(true); - } - - public int getWaitingConsumerCount() { - return countOfMode(false); - } - - /** - * Removes a single instance of the specified element from this queue, - * if it is present. More formally, removes an element {@code e} such - * that {@code o.equals(e)}, if this queue contains one or more such - * elements. - * Returns {@code true} if this queue contained the specified element - * (or equivalently, if this queue changed as a result of the call). - * - * @param o element to be removed from this queue, if present - * @return {@code true} if this queue changed as a result of the call - */ - public boolean remove(Object o) { - return findAndRemove(o); - } - - /** - * Returns {@code true} if this queue contains the specified element. - * More formally, returns {@code true} if and only if this queue contains - * at least one element {@code e} such that {@code o.equals(e)}. - * - * @param o object to be checked for containment in this queue - * @return {@code true} if this queue contains the specified element - */ - public boolean contains(Object o) { - if (o == null) return false; - for (Node p = head; p != null; p = succ(p)) { - Object item = p.item; - if (p.isData) { - if (item != null && item != p && o.equals(item)) - return true; - } else if (item == null) - break; - } - return false; - } - - /** - * Always returns {@code Integer.MAX_VALUE} because a - * {@code LinkedTransferQueue} is not capacity constrained. - * - * @return {@code Integer.MAX_VALUE} (as specified by - * {@link java.util.concurrent.BlockingQueue#remainingCapacity() - * BlockingQueue.remainingCapacity}) - */ - public int remainingCapacity() { - return Integer.MAX_VALUE; - } - - /** - * Saves the state to a stream (that is, serializes it). - * - * @param s the stream - * @serialData All of the elements (each an {@code E}) in - * the proper order, followed by a null - */ - private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { - s.defaultWriteObject(); - for (E e : this) - s.writeObject(e); - // Use trailing null as sentinel - s.writeObject(null); - } - - /** - * Reconstitutes the Queue instance from a stream (that is, - * deserializes it). - * - * @param s the stream - */ - private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - s.defaultReadObject(); - for (; ; ) { - @SuppressWarnings("unchecked") - E item = (E) s.readObject(); - if (item == null) - break; - else - offer(item); - } - } - - // Unsafe mechanics - - private static final sun.misc.Unsafe UNSAFE; - private static final long headOffset; - private static final long tailOffset; - private static final long sweepVotesOffset; - - static { - try { - UNSAFE = getUnsafe(); - Class k = LinkedTransferQueue.class; - headOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("head")); - tailOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("tail")); - sweepVotesOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("sweepVotes")); - } catch (Exception e) { - throw new Error(e); - } - } - - /** - * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. - * Replace with a simple call to Unsafe.getUnsafe when integrating - * into a jdk. - * - * @return a sun.misc.Unsafe - */ - static sun.misc.Unsafe getUnsafe() { - try { - return sun.misc.Unsafe.getUnsafe(); - } catch (SecurityException tryReflectionInstead) { - } - try { - return java.security.AccessController.doPrivileged - (new java.security.PrivilegedExceptionAction() { - public sun.misc.Unsafe run() throws Exception { - Class k = sun.misc.Unsafe.class; - for (java.lang.reflect.Field f : k.getDeclaredFields()) { - f.setAccessible(true); - Object x = f.get(null); - if (k.isInstance(x)) - return k.cast(x); - } - throw new NoSuchFieldError("the Unsafe"); - } - }); - } catch (java.security.PrivilegedActionException e) { - throw new RuntimeException("Could not initialize intrinsics", - e.getCause()); - } - } -} diff --git a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ThreadLocalRandom.java b/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ThreadLocalRandom.java deleted file mode 100644 index aaf5017731..0000000000 --- a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/ThreadLocalRandom.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (c) 2009, 2018 Oracle and/or its affiliates. All rights reserved. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License v. 2.0, which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * This Source Code may also be made available under the following Secondary - * Licenses when the conditions for such availability set forth in the - * Eclipse Public License v. 2.0 are satisfied: GNU General Public License, - * version 2 with the GNU Classpath Exception, which is available at - * https://www.gnu.org/software/classpath/license.html. - * - * SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 - */ - -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ -package org.glassfish.jersey.internal.util.collection; - -import java.util.Random; - -/** - * A random number generator isolated to the current thread. Like the - * global {@link java.util.Random} generator used by the {@link - * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized - * with an internally generated seed that may not otherwise be - * modified. When applicable, use of {@code ThreadLocalRandom} rather - * than shared {@code Random} objects in concurrent programs will - * typically encounter much less overhead and contention. Use of - * {@code ThreadLocalRandom} is particularly appropriate when multiple - * tasks (for example, each a {@link ForkJoinTask}) use random numbers - * in parallel in thread pools. - * - *

Usages of this class should typically be of the form: - * {@code ThreadLocalRandom.current().nextX(...)} (where - * {@code X} is {@code Int}, {@code Long}, etc). - * When all usages are of this form, it is never possible to - * accidently share a {@code ThreadLocalRandom} across multiple threads. - * - *

This class also provides additional commonly used bounded random - * generation methods. - * - * @author Doug Lea - */ -class ThreadLocalRandom extends Random { - // same constants as Random, but must be redeclared because private - private static final long multiplier = 0x5DEECE66DL; - private static final long addend = 0xBL; - private static final long mask = (1L << 48) - 1; - - /** - * The random seed. We can't use super.seed. - */ - private long rnd; - - /** - * Initialization flag to permit calls to setSeed to succeed only - * while executing the Random constructor. We can't allow others - * since it would cause setting seed in one part of a program to - * unintentionally impact other usages by the thread. - */ - boolean initialized; - - // Padding to help avoid memory contention among seed updates in - // different TLRs in the common case that they are located near - // each other. - private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; - - /** - * The actual ThreadLocal - */ - private static final ThreadLocal localRandom = - new ThreadLocal() { - protected ThreadLocalRandom initialValue() { - return new ThreadLocalRandom(); - } - }; - - - /** - * Constructor called only by localRandom.initialValue. - */ - ThreadLocalRandom() { - super(); - initialized = true; - } - - /** - * Returns the current thread's {@code ThreadLocalRandom}. - * - * @return the current thread's {@code ThreadLocalRandom} - */ - public static ThreadLocalRandom current() { - return localRandom.get(); - } - - /** - * Throws {@code UnsupportedOperationException}. Setting seeds in - * this generator is not supported. - * - * @throws UnsupportedOperationException always - */ - public void setSeed(long seed) { - if (initialized) - throw new UnsupportedOperationException(); - rnd = (seed ^ multiplier) & mask; - } - - protected int next(int bits) { - rnd = (rnd * multiplier + addend) & mask; - return (int) (rnd >>> (48 - bits)); - } - - /** - * Returns a pseudorandom, uniformly distributed value between the - * given least value (inclusive) and bound (exclusive). - * - * @param least the least value returned - * @param bound the upper bound (exclusive) - * @return the next value - * @throws IllegalArgumentException if least greater than or equal - * to bound - */ - public int nextInt(int least, int bound) { - if (least >= bound) - throw new IllegalArgumentException(); - return nextInt(bound - least) + least; - } - - /** - * Returns a pseudorandom, uniformly distributed value - * between 0 (inclusive) and the specified value (exclusive). - * - * @param n the bound on the random number to be returned. Must be - * positive. - * @return the next value - * @throws IllegalArgumentException if n is not positive - */ - public long nextLong(long n) { - if (n <= 0) - throw new IllegalArgumentException("n must be positive"); - // Divide n by two until small enough for nextInt. On each - // iteration (at most 31 of them but usually much less), - // randomly choose both whether to include high bit in result - // (offset) and whether to continue with the lower vs upper - // half (which makes a difference only if odd). - long offset = 0; - while (n >= Integer.MAX_VALUE) { - int bits = next(2); - long half = n >>> 1; - long nextn = ((bits & 2) == 0) ? half : n - half; - if ((bits & 1) == 0) - offset += n - nextn; - n = nextn; - } - return offset + nextInt((int) n); - } - - /** - * Returns a pseudorandom, uniformly distributed value between the - * given least value (inclusive) and bound (exclusive). - * - * @param least the least value returned - * @param bound the upper bound (exclusive) - * @return the next value - * @throws IllegalArgumentException if least greater than or equal - * to bound - */ - public long nextLong(long least, long bound) { - if (least >= bound) - throw new IllegalArgumentException(); - return nextLong(bound - least) + least; - } - - /** - * Returns a pseudorandom, uniformly distributed {@code double} value - * between 0 (inclusive) and the specified value (exclusive). - * - * @param n the bound on the random number to be returned. Must be - * positive. - * @return the next value - * @throws IllegalArgumentException if n is not positive - */ - public double nextDouble(double n) { - if (n <= 0) - throw new IllegalArgumentException("n must be positive"); - return nextDouble() * n; - } - - /** - * Returns a pseudorandom, uniformly distributed value between the - * given least value (inclusive) and bound (exclusive). - * - * @param least the least value returned - * @param bound the upper bound (exclusive) - * @return the next value - * @throws IllegalArgumentException if least greater than or equal - * to bound - */ - public double nextDouble(double least, double bound) { - if (least >= bound) - throw new IllegalArgumentException(); - return nextDouble() * (bound - least) + least; - } - - private static final long serialVersionUID = -5851777807851030925L; -} - diff --git a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/TransferQueue.java b/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/TransferQueue.java index ce9e3e961d..265a0dc9aa 100644 --- a/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/TransferQueue.java +++ b/core-common/src/main/java/org/glassfish/jersey/internal/util/collection/TransferQueue.java @@ -52,6 +52,7 @@ * @param the type of elements held in this collection * @author Doug Lea */ +@Deprecated public interface TransferQueue extends BlockingQueue { /** * Transfers the specified element if there exists a consumer diff --git a/core-common/src/main/java/org/glassfish/jersey/message/internal/MessageBodyFactory.java b/core-common/src/main/java/org/glassfish/jersey/message/internal/MessageBodyFactory.java index 5b78763dd4..3c16126983 100644 --- a/core-common/src/main/java/org/glassfish/jersey/message/internal/MessageBodyFactory.java +++ b/core-common/src/main/java/org/glassfish/jersey/message/internal/MessageBodyFactory.java @@ -36,6 +36,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; import java.util.logging.Level; import java.util.logging.Logger; @@ -180,19 +181,19 @@ private int compare(List mediaTypeList1, List mediaTypeLis private static final int LOOKUP_CACHE_INITIAL_CAPACITY = 32; private static final float LOOKUP_CACHE_LOAD_FACTOR = 0.75f; - private final Map, List> mbrTypeLookupCache = DataStructures.createConcurrentMap( + private final Map, List> mbrTypeLookupCache = new ConcurrentHashMap<>( LOOKUP_CACHE_INITIAL_CAPACITY, LOOKUP_CACHE_LOAD_FACTOR, DataStructures.DEFAULT_CONCURENCY_LEVEL); - private final Map, List> mbwTypeLookupCache = DataStructures.createConcurrentMap( + private final Map, List> mbwTypeLookupCache = new ConcurrentHashMap<>( LOOKUP_CACHE_INITIAL_CAPACITY, LOOKUP_CACHE_LOAD_FACTOR, DataStructures.DEFAULT_CONCURENCY_LEVEL); - private final Map, List> typeToMediaTypeReadersCache = DataStructures.createConcurrentMap( + private final Map, List> typeToMediaTypeReadersCache = new ConcurrentHashMap<>( LOOKUP_CACHE_INITIAL_CAPACITY, LOOKUP_CACHE_LOAD_FACTOR, DataStructures.DEFAULT_CONCURENCY_LEVEL); - private final Map, List> typeToMediaTypeWritersCache = DataStructures.createConcurrentMap( + private final Map, List> typeToMediaTypeWritersCache = new ConcurrentHashMap<>( LOOKUP_CACHE_INITIAL_CAPACITY, LOOKUP_CACHE_LOAD_FACTOR, DataStructures.DEFAULT_CONCURENCY_LEVEL); - private final Map> mbrLookupCache = DataStructures.createConcurrentMap( + private final Map> mbrLookupCache = new ConcurrentHashMap<>( LOOKUP_CACHE_INITIAL_CAPACITY, LOOKUP_CACHE_LOAD_FACTOR, DataStructures.DEFAULT_CONCURENCY_LEVEL); - private final Map> mbwLookupCache = DataStructures.createConcurrentMap( + private final Map> mbwLookupCache = new ConcurrentHashMap<>( LOOKUP_CACHE_INITIAL_CAPACITY, LOOKUP_CACHE_LOAD_FACTOR, DataStructures.DEFAULT_CONCURENCY_LEVEL); /** diff --git a/core-common/src/test/java/org/glassfish/jersey/internal/util/JdkVersionCompareTest.java b/core-common/src/test/java/org/glassfish/jersey/internal/util/JdkVersionCompareTest.java new file mode 100644 index 0000000000..62c1021999 --- /dev/null +++ b/core-common/src/test/java/org/glassfish/jersey/internal/util/JdkVersionCompareTest.java @@ -0,0 +1,35 @@ +package org.glassfish.jersey.internal.util; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +public class JdkVersionCompareTest { + + @Test + public void testCompareJava8WithJava9() { + JdkVersion java8 = JdkVersion.parseVersion("1.8.0_141"); + JdkVersion java9 = JdkVersion.parseVersion("9"); + + assertEquals(1, java9.compareTo(java8)); + assertEquals(-1, java8.compareTo(java9)); + } + + @Test + public void testCompareJava8Updates() { + JdkVersion java8u141 = JdkVersion.parseVersion("1.8.0_141"); + JdkVersion java8u152 = JdkVersion.parseVersion("1.8.0_152"); + + assertEquals(1, java8u152.compareTo(java8u141)); + assertEquals(-1, java8u141.compareTo(java8u152)); + } + + @Test + public void testCompareJava9Versions() { + JdkVersion java900 = JdkVersion.parseVersion("9.0.0"); + JdkVersion java901 = JdkVersion.parseVersion("9.0.1"); + + assertEquals(1, java901.compareTo(java900)); + assertEquals(-1, java900.compareTo(java901)); + } +} diff --git a/core-common/src/test/java/org/glassfish/jersey/internal/util/JdkVersionParseTest.java b/core-common/src/test/java/org/glassfish/jersey/internal/util/JdkVersionParseTest.java new file mode 100644 index 0000000000..fc14fbbd34 --- /dev/null +++ b/core-common/src/test/java/org/glassfish/jersey/internal/util/JdkVersionParseTest.java @@ -0,0 +1,52 @@ +package org.glassfish.jersey.internal.util; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.Arrays; +import java.util.Collection; + +@RunWith(Parameterized.class) +public class JdkVersionParseTest { + + @Parameterized.Parameter + public String rawVersionString; + @Parameterized.Parameter(1) + public int expectedMajorVersion; + @Parameterized.Parameter(2) + public int expectedMinorVersion; + @Parameterized.Parameter(3) + public int expectedMaintenanceVersion; + @Parameterized.Parameter(4) + public int expectedUpdateVersion; + + @Parameterized.Parameters + public static Collection provideVersions() { + return Arrays.asList(new Object[][]{ + // Java 8 + {"1.8.0_141-b15", 1, 8, 0, 141}, + {"1.8.0_141", 1, 8, 0, 141}, + + // Java 9 and above + {"9", 9, 0, 0, 0}, + {"9.0.3", 9, 0, 3, 0}, + {"11", 11, 0, 0, 0}, + + // malformed version + {"invalid version", -1, -1, -1, -1} + }); + } + + @Test + public void testParseVersion() { + JdkVersion version = JdkVersion.parseVersion(rawVersionString); + + assertEquals(expectedMajorVersion, version.getMajor()); + assertEquals(expectedMinorVersion, version.getMinor()); + assertEquals(expectedMaintenanceVersion, version.getMaintenance()); + assertEquals(expectedUpdateVersion, version.getUpdate()); + } +} diff --git a/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/EntityGraphProviderImpl.java b/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/EntityGraphProviderImpl.java index ec67e9ee3f..ee07977e7e 100644 --- a/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/EntityGraphProviderImpl.java +++ b/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/EntityGraphProviderImpl.java @@ -19,9 +19,9 @@ import java.util.Collections; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import org.glassfish.jersey.internal.util.collection.DataStructures; import org.glassfish.jersey.message.filtering.spi.EntityGraph; import org.glassfish.jersey.message.filtering.spi.EntityGraphProvider; import org.glassfish.jersey.message.filtering.spi.ObjectGraph; @@ -33,8 +33,8 @@ */ final class EntityGraphProviderImpl implements EntityGraphProvider { - private final ConcurrentMap, EntityGraph> writerClassToGraph = DataStructures.createConcurrentMap(); - private final ConcurrentMap, EntityGraph> readerClassToGraph = DataStructures.createConcurrentMap(); + private final ConcurrentMap, EntityGraph> writerClassToGraph = new ConcurrentHashMap<>(); + private final ConcurrentMap, EntityGraph> readerClassToGraph = new ConcurrentHashMap<>(); @Override public EntityGraph getOrCreateEntityGraph(final Class entityClass, final boolean forWriter) { diff --git a/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/ServerScopeProvider.java b/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/ServerScopeProvider.java index 872317b4ff..753acb4167 100644 --- a/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/ServerScopeProvider.java +++ b/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/ServerScopeProvider.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import javax.ws.rs.ConstrainedTo; @@ -34,7 +35,6 @@ import javax.inject.Singleton; import org.glassfish.jersey.internal.inject.InjectionManager; -import org.glassfish.jersey.internal.util.collection.DataStructures; import org.glassfish.jersey.server.ExtendedUriInfo; import org.glassfish.jersey.server.model.Invocable; import org.glassfish.jersey.server.model.ResourceMethod; @@ -64,7 +64,7 @@ class ServerScopeProvider extends CommonScopeProvider { @Inject public ServerScopeProvider(final Configuration config, final InjectionManager injectionManager) { super(config, injectionManager); - this.uriToContexts = DataStructures.createConcurrentMap(); + this.uriToContexts = new ConcurrentHashMap<>(); } @Override diff --git a/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/spi/FilteringHelper.java b/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/spi/FilteringHelper.java index a0f2fa1843..6704754891 100644 --- a/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/spi/FilteringHelper.java +++ b/ext/entity-filtering/src/main/java/org/glassfish/jersey/message/filtering/spi/FilteringHelper.java @@ -28,12 +28,12 @@ import java.util.HashMap; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import javax.xml.bind.JAXBElement; import org.glassfish.jersey.internal.util.ReflectionHelper; -import org.glassfish.jersey.internal.util.collection.DataStructures; /** * SPI utility methods for entity filtering. @@ -47,7 +47,7 @@ public final class FilteringHelper { */ public static final Annotation[] EMPTY_ANNOTATIONS = new Annotation[0]; - private static final ConcurrentMap> ENTITY_CLASSES = DataStructures.createConcurrentMap(); + private static final ConcurrentMap> ENTITY_CLASSES = new ConcurrentHashMap<>(); /** * Determine whether given class is filterable by entity-filtering. Filterable classes are all classes that are not primitives diff --git a/ext/mvc/src/main/java/org/glassfish/jersey/server/mvc/spi/AbstractTemplateProcessor.java b/ext/mvc/src/main/java/org/glassfish/jersey/server/mvc/spi/AbstractTemplateProcessor.java index 049ea9a533..55d67c2517 100644 --- a/ext/mvc/src/main/java/org/glassfish/jersey/server/mvc/spi/AbstractTemplateProcessor.java +++ b/ext/mvc/src/main/java/org/glassfish/jersey/server/mvc/spi/AbstractTemplateProcessor.java @@ -30,6 +30,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.function.Function; import java.util.logging.Level; @@ -45,7 +46,6 @@ import org.glassfish.jersey.internal.util.PropertiesHelper; import org.glassfish.jersey.internal.util.ReflectionHelper; -import org.glassfish.jersey.internal.util.collection.DataStructures; import org.glassfish.jersey.internal.util.collection.Value; import org.glassfish.jersey.server.mvc.MvcFeature; import org.glassfish.jersey.server.mvc.internal.LocalizationMessages; @@ -116,7 +116,7 @@ public AbstractTemplateProcessor(final Configuration config, final ServletContex if (cacheEnabled == null) { cacheEnabled = PropertiesHelper.getValue(properties, MvcFeature.CACHE_TEMPLATES, false, null); } - this.cache = cacheEnabled ? DataStructures.createConcurrentMap() : null; + this.cache = cacheEnabled ? new ConcurrentHashMap<>() : null; this.encoding = TemplateHelper.getTemplateOutputEncoding(config, suffix); }