JDK8中的ConcurrentHashMap源码
背景
上文jdk8中的hashmap源码写了hashmap,这次写concurrenthashmap
concurrenthashmap源码
/** * maps the specified key to the specified value in this table. * neither the key nor the value can be null. * * <p>the value can be retrieved by calling the {@code get} method * with a key that is equal to the original key. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key * @return the previous value associated with {@code key}, or * {@code null} if there was no mapping for {@code key} * @throws nullpointerexception if the specified key or value is null */ public v put(k key, v value) { return putval(key, value, false); } /** implementation for put and putifabsent */ final v putval(k key, v value, boolean onlyifabsent) { if (key == null || value == null) throw new nullpointerexception(); int hash = spread(key.hashcode()); int bincount = 0; for (node<k,v>[] tab = table;;) { node<k,v> f; int n, i, fh; //tab为空,则初始化 if (tab == null || (n = tab.length) == 0) tab = inittable(); else if ((f = tabat(tab, i = (n - 1) & hash)) == null) { //该槽为空,则尝试插入 if (castabat(tab, i, null, new node<k,v>(hash, key, value, null))) break; // no lock when adding to empty bin } else if ((fh = f.hash) == moved) //正在移动, tab = helptransfer(tab, f); else { v oldval = null; synchronized (f) { //对该槽进行加锁 if (tabat(tab, i) == f) { if (fh >= 0) { bincount = 1; for (node<k,v> e = f;; ++bincount) { k ek; if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { oldval = e.val; if (!onlyifabsent) e.val = value; break; } node<k,v> pred = e; if ((e = e.next) == null) { pred.next = new node<k,v>(hash, key, value, null); break; } } } else if (f instanceof treebin) { node<k,v> p; bincount = 2; if ((p = ((treebin<k,v>)f).puttreeval(hash, key, value)) != null) { oldval = p.val; if (!onlyifabsent) p.val = value; } } } } if (bincount != 0) { if (bincount >= treeify_threshold) treeifybin(tab, i); if (oldval != null) return oldval; break; } } } addcount(1l, bincount); return null; } /** * returns the value to which the specified key is mapped, * or {@code null} if this map contains no mapping for the key. * * <p>more formally, if this map contains a mapping from a key * {@code k} to a value {@code v} such that {@code key.equals(k)}, * then this method returns {@code v}; otherwise it returns * {@code null}. (there can be at most one such mapping.) * * @throws nullpointerexception if the specified key is null */ public v get(object key) { node<k,v>[] tab; node<k,v> e, p; int n, eh; k ek; //获得hash值 int h = spread(key.hashcode()); //表非空,且该处不为空 if ((tab = table) != null && (n = tab.length) > 0 && (e = tabat(tab, (n - 1) & h)) != null) { if ((eh = e.hash) == h) { //判断第1个 if ((ek = e.key) == key || (ek != null && key.equals(ek))) return e.val; } else if (eh < 0) //eh<0,找其他的 return (p = e.find(h, key)) != null ? p.val : null; while ((e = e.next) != null) { //遍历 if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) return e.val; } } return null; }
concurrenthashmap代码太多了,粘了好几次粘不上来。只粘几个方法吧。
阅后感
concurrenthashmap通过几个原子操作尽量减少加锁操作。
扩容部分没有看太明白,尤其时扩容时进行get操作。后续再继续学习。
/* * oracle proprietary/confidential. use is subject to license terms. * * * * * * * * * * * * * * * * * * * * */
/* * * * * * * written by doug lea with assistance from members of jcp jsr-166 * expert group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */
package java.util.concurrent;
import java.io.objectstreamfield;import java.io.serializable;import java.lang.reflect.parameterizedtype;import java.lang.reflect.type;import java.util.abstractmap;import java.util.arrays;import java.util.collection;import java.util.comparator;import java.util.enumeration;import java.util.hashmap;import java.util.hashtable;import java.util.iterator;import java.util.map;import java.util.nosuchelementexception;import java.util.set;import java.util.spliterator;import java.util.concurrent.concurrentmap;import java.util.concurrent.forkjoinpool;import java.util.concurrent.atomic.atomicreference;import java.util.concurrent.locks.locksupport;import java.util.concurrent.locks.reentrantlock;import java.util.function.biconsumer;import java.util.function.bifunction;import java.util.function.binaryoperator;import java.util.function.consumer;import java.util.function.doublebinaryoperator;import java.util.function.function;import java.util.function.intbinaryoperator;import java.util.function.longbinaryoperator;import java.util.function.todoublebifunction;import java.util.function.todoublefunction;import java.util.function.tointbifunction;import java.util.function.tointfunction;import java.util.function.tolongbifunction;import java.util.function.tolongfunction;import java.util.stream.stream;
/** * a hash table supporting full concurrency of retrievals and * high expected concurrency for updates. this class obeys the * same functional specification as {@link java.util.hashtable}, and * includes versions of methods corresponding to each method of * {@code hashtable}. however, even though all operations are * thread-safe, retrieval operations do <em>not</em> entail locking, * and there is <em>not</em> any support for locking the entire table * in a way that prevents all access. this class is fully * interoperable with {@code hashtable} in programs that rely on its * thread safety but not on its synchronization details. * * <p>retrieval operations (including {@code get}) generally do not * block, so may overlap with update operations (including {@code put} * and {@code remove}). retrievals reflect the results of the most * recently <em>completed</em> update operations holding upon their * onset. (more formally, an update operation for a given key bears a * <em>happens-before</em> relation with any (non-null) retrieval for * that key reporting the updated value.) for aggregate operations * such as {@code putall} and {@code clear}, concurrent retrievals may * reflect insertion or removal of only some entries. similarly, * iterators, spliterators and enumerations return elements reflecting the * state of the hash table at some point at or since the creation of the * iterator/enumeration. they do <em>not</em> throw {@link * java.util.concurrentmodificationexception concurrentmodificationexception}. * however, iterators are designed to be used by only one thread at a time. * bear in mind that the results of aggregate status methods including * {@code size}, {@code isempty}, and {@code containsvalue} are typically * useful only when a map is not undergoing concurrent updates in other threads. * otherwise the results of these methods reflect transient states * that may be adequate for monitoring or estimation purposes, but not * for program control. * * <p>the table is dynamically expanded when there are too many * collisions (i.e., keys that have distinct hash codes but fall into * the same slot modulo the table size), with the expected average * effect of maintaining roughly two bins per mapping (corresponding * to a 0.75 load factor threshold for resizing). there may be much * variance around this average as mappings are added and removed, but * overall, this maintains a commonly accepted time/space tradeoff for * hash tables. however, resizing this or any other kind of hash * table may be a relatively slow operation. when possible, it is a * good idea to provide a size estimate as an optional {@code * initialcapacity} constructor argument. an additional optional * {@code loadfactor} constructor argument provides a further means of * customizing initial table capacity by specifying the table density * to be used in calculating the amount of space to allocate for the * given number of elements. also, for compatibility with previous * versions of this class, constructors may optionally specify an * expected {@code concurrencylevel} as an additional hint for * internal sizing. note that using many keys with exactly the same * {@code hashcode()} is a sure way to slow down performance of any * hash table. to ameliorate impact, when keys are {@link comparable}, * this class may use comparison order among keys to help break ties. * * <p>a {@link set} projection of a concurrenthashmap may be created * (using {@link #newkeyset()} or {@link #newkeyset(int)}), or viewed * (using {@link #keyset(object)} when only keys are of interest, and the * mapped values are (perhaps transiently) not used or all take the * same mapping value. * * <p>a concurrenthashmap can be used as scalable frequency map (a * form of histogram or multiset) by using {@link * java.util.concurrent.atomic.longadder} values and initializing via * {@link #computeifabsent computeifabsent}. for example, to add a count * to a {@code concurrenthashmap<string,longadder> freqs}, you can use * {@code freqs.computeifabsent(k -> new longadder()).increment();} * * <p>this class and its views and iterators implement all of the * <em>optional</em> methods of the {@link map} and {@link iterator} * interfaces. * * <p>like {@link hashtable} but unlike {@link hashmap}, this class * does <em>not</em> allow {@code null} to be used as a key or value. * * <p>concurrenthashmaps support a set of sequential and parallel bulk * operations that, unlike most {@link stream} methods, are designed * to be safely, and often sensibly, applied even with maps that are * being concurrently updated by other threads; for example, when * computing a snapshot summary of the values in a shared registry. * there are three kinds of operation, each with four forms, accepting * functions with keys, values, entries, and (key, value) arguments * and/or return values. because the elements of a concurrenthashmap * are not ordered in any particular way, and may be processed in * different orders in different parallel executions, the correctness * of supplied functions should not depend on any ordering, or on any * other objects or values that may transiently change while * computation is in progress; and except for foreach actions, should * ideally be side-effect-free. bulk operations on {@link java.util.map.entry} * objects do not support method {@code setvalue}. * * <ul> * <li> foreach: perform a given action on each element. * a variant form applies a given transformation on each element * before performing the action.</li> * * <li> search: return the first available non-null result of * applying a given function on each element; skipping further * search when a result is found.</li> * * <li> reduce: accumulate each element. the supplied reduction * function cannot rely on ordering (more formally, it should be * both associative and commutative). there are five variants: * * <ul> * * <li> plain reductions. (there is not a form of this method for * (key, value) function arguments since there is no corresponding * return type.)</li> * * <li> mapped reductions that accumulate the results of a given * function applied to each element.</li> * * <li> reductions to scalar doubles, longs, and ints, using a * given basis value.</li> * * </ul> * </li> * </ul> * * <p>these bulk operations accept a {@code parallelismthreshold} * argument. methods proceed sequentially if the current map size is * estimated to be less than the given threshold. using a value of * {@code long.max_value} suppresses all parallelism. using a value * of {@code 1} results in maximal parallelism by partitioning into * enough subtasks to fully utilize the {@link * forkjoinpool#commonpool()} that is used for all parallel * computations. normally, you would initially choose one of these * extreme values, and then measure performance of using in-between * values that trade off overhead versus throughput. * * <p>the concurrency properties of bulk operations follow * from those of concurrenthashmap: any non-null result returned * from {@code get(key)} and related access methods bears a * happens-before relation with the associated insertion or * update. the result of any bulk operation reflects the * composition of these per-element relations (but is not * necessarily atomic with respect to the map as a whole unless it * is somehow known to be quiescent). conversely, because keys * and values in the map are never null, null serves as a reliable * atomic indicator of the current lack of any result. to * maintain this property, null serves as an implicit basis for * all non-scalar reduction operations. for the double, long, and * int versions, the basis should be one that, when combined with * any other value, returns that other value (more formally, it * should be the identity element for the reduction). most common * reductions have these properties; for example, computing a sum * with basis 0 or a minimum with basis max_value. * * <p>search and transformation functions provided as arguments * should similarly return null to indicate the lack of any result * (in which case it is not used). in the case of mapped * reductions, this also enables transformations to serve as * filters, returning null (or, in the case of primitive * specializations, the identity basis) if the element should not * be combined. you can create compound transformations and * filterings by composing them yourself under this "null means * there is nothing there now" rule before using them in search or * reduce operations. * * <p>methods accepting and/or returning entry arguments maintain * key-value associations. they may be useful for example when * finding the key for the greatest value. note that "plain" entry * arguments can be supplied using {@code new * abstractmap.simpleentry(k,v)}. * * <p>bulk operations may complete abruptly, throwing an * exception encountered in the application of a supplied * function. bear in mind when handling such exceptions that other * concurrently executing functions could also have thrown * exceptions, or would have done so if the first exception had * not occurred. * * <p>speedups for parallel compared to sequential forms are common * but not guaranteed. parallel operations involving brief functions * on small maps may execute more slowly than sequential forms if the * underlying work to parallelize the computation is more expensive * than the computation itself. similarly, parallelization may not * lead to much actual parallelism if all processors are busy * performing unrelated tasks. * * <p>all arguments to all task methods must be non-null. * * <p>this class is a member of the * <a href="{@docroot}/../technotes/guides/collections/index.html"> * java collections framework</a>. * * @since 1.5 * @author doug lea * @param <k> the type of keys maintained by this map * @param <v> the type of mapped values */public class concurrenthashmap<k,v> extends abstractmap<k,v> implements concurrentmap<k,v>, serializable { private static final long serialversionuid = 7249069246763182397l;
/* * overview: * * the primary design goal of this hash table is to maintain * concurrent readability (typically method get(), but also * iterators and related methods) while minimizing update * contention. secondary goals are to keep space consumption about * the same or better than java.util.hashmap, and to support high * initial insertion rates on an empty table by many threads. * * this map usually acts as a binned (bucketed) hash table. each * key-value mapping is held in a node. most nodes are instances * of the basic node class with hash, key, value, and next * fields. however, various subclasses exist: treenodes are * arranged in balanced trees, not lists. treebins hold the roots * of sets of treenodes. forwardingnodes are placed at the heads * of bins during resizing. reservationnodes are used as * placeholders while establishing values in computeifabsent and * related methods. the types treebin, forwardingnode, and * reservationnode do not hold normal user keys, values, or * hashes, and are readily distinguishable during search etc * because they have negative hash fields and null key and value * fields. (these special nodes are either uncommon or transient, * so the impact of carrying around some unused fields is * insignificant.) * * the table is lazily initialized to a power-of-two size upon the * first insertion. each bin in the table normally contains a * list of nodes (most often, the list has only zero or one node). * table accesses require volatile/atomic reads, writes, and * cases. because there is no other way to arrange this without * adding further indirections, we use intrinsics * (sun.misc.unsafe) operations. * * we use the top (sign) bit of node hash fields for control * purposes -- it is available anyway because of addressing * constraints. nodes with negative hash fields are specially * handled or ignored in map methods. * * insertion (via put or its variants) of the first node in an * empty bin is performed by just casing it to the bin. this is * by far the most common case for put operations under most * key/hash distributions. other update operations (insert, * delete, and replace) require locks. we do not want to waste * the space required to associate a distinct lock object with * each bin, so instead use the first node of a bin list itself as * a lock. locking support for these locks relies on builtin * "synchronized" monitors. * * using the first node of a list as a lock does not by itself * suffice though: when a node is locked, any update must first * validate that it is still the first node after locking it, and * retry if not. because new nodes are always appended to lists, * once a node is first in a bin, it remains first until deleted * or the bin becomes invalidated (upon resizing). * * the main disadvantage of per-bin locks is that other update * operations on other nodes in a bin list protected by the same * lock can stall, for example when user equals() or mapping * functions take a long time. however, statistically, under * random hash codes, this is not a common problem. ideally, the * frequency of nodes in bins follows a poisson distribution * (http://en.wikipedia.org/wiki/poisson_distribution) with a * parameter of about 0.5 on average, given the resizing threshold * of 0.75, although with a large variance because of resizing * granularity. ignoring variance, the expected occurrences of * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). the * first values are: * * 0: 0.60653066 * 1: 0.30326533 * 2: 0.07581633 * 3: 0.01263606 * 4: 0.00157952 * 5: 0.00015795 * 6: 0.00001316 * 7: 0.00000094 * 8: 0.00000006 * more: less than 1 in ten million * * lock contention probability for two threads accessing distinct * elements is roughly 1 / (8 * #elements) under random hashes. * * actual hash code distributions encountered in practice * sometimes deviate significantly from uniform randomness. this * includes the case when n > (1<<30), so some keys must collide. * similarly for dumb or hostile usages in which multiple keys are * designed to have identical hash codes or ones that differs only * in masked-out high bits. so we use a secondary strategy that * applies when the number of nodes in a bin exceeds a * threshold. these treebins use a balanced tree to hold nodes (a * specialized form of red-black trees), bounding search time to * o(log n). each search step in a treebin is at least twice as * slow as in a regular list, but given that n cannot exceed * (1<<64) (before running out of addresses) this bounds search * steps, lock hold times, etc, to reasonable constants (roughly * 100 nodes inspected per operation worst case) so long as keys * are comparable (which is very common -- string, long, etc). * treebin nodes (treenodes) also maintain the same "next" * traversal pointers as regular nodes, so can be traversed in * iterators in the same way. * * the table is resized when occupancy exceeds a percentage * threshold (nominally, 0.75, but see below). any thread * noticing an overfull bin may assist in resizing after the * initiating thread allocates and sets up the replacement array. * however, rather than stalling, these other threads may proceed * with insertions etc. the use of treebins shields us from the * worst case effects of overfilling while resizes are in * progress. resizing proceeds by transferring bins, one by one, * from the table to the next table. however, threads claim small * blocks of indices to transfer (via field transferindex) before * doing so, reducing contention. a generation stamp in field * sizectl ensures that resizings do not overlap. because we are * using power-of-two expansion, the elements from each bin must * either stay at same index, or move with a power of two * offset. we eliminate unnecessary node creation by catching * cases where old nodes can be reused because their next fields * won't change. on average, only about one-sixth of them need * cloning when a table doubles. the nodes they replace will be * garbage collectable as soon as they are no longer referenced by * any reader thread that may be in the midst of concurrently * traversing table. upon transfer, the old table bin contains * only a special forwarding node (with hash field "moved") that * contains the next table as its key. on encountering a * forwarding node, access and update operations restart, using * the new table. * * each bin transfer requires its bin lock, which can stall * waiting for locks while resizing. however, because other * threads can join in and help resize rather than contend for * locks, average aggregate waits become shorter as resizing * progresses. the transfer operation must also ensure that all * accessible bins in both the old and new table are usable by any * traversal. this is arranged in part by proceeding from the * last bin (table.length - 1) up towards the first. upon seeing * a forwarding node, traversals (see class traverser) arrange to * move to the new table without revisiting nodes. to ensure that * no intervening nodes are skipped even when moved out of order, * a stack (see class tablestack) is created on first encounter of * a forwarding node during a traversal, to maintain its place if * later processing the current table. the need for these * save/restore mechanics is relatively rare, but when one * forwarding node is encountered, typically many more will be. * so traversers use a simple caching scheme to avoid creating so * many new tablestack nodes. (thanks to peter levart for * suggesting use of a stack here.) * * the traversal scheme also applies to partial traversals of * ranges of bins (via an alternate traverser constructor) * to support partitioned aggregate operations. also, read-only * operations give up if ever forwarded to a null table, which * provides support for shutdown-style clearing, which is also not * currently implemented. * * lazy table initialization minimizes footprint until first use, * and also avoids resizings when the first operation is from a * putall, constructor with map argument, or deserialization. * these cases attempt to override the initial capacity settings, * but harmlessly fail to take effect in cases of races. * * the element count is maintained using a specialization of * longadder. we need to incorporate a specialization rather than * just use a longadder in order to access implicit * contention-sensing that leads to creation of multiple * countercells. the counter mechanics avoid contention on * updates but can encounter cache thrashing if read too * frequently during concurrent access. to avoid reading so often, * resizing under contention is attempted only upon adding to a * bin already holding two or more nodes. under uniform hash * distributions, the probability of this occurring at threshold * is around 13%, meaning that only about 1 in 8 puts check * threshold (and after resizing, many fewer do so). * * treebins use a special form of comparison for search and * related operations (which is the main reason we cannot use * existing collections such as treemaps). treebins contain * comparable elements, but may contain others, as well as * elements that are comparable but not necessarily comparable for * the same t, so we cannot invoke compareto among them. to handle * this, the tree is ordered primarily by hash value, then by * comparable.compareto order if applicable. on lookup at a node, * if elements are not comparable or compare as 0 then both left * and right children may need to be searched in the case of tied * hash values. (this corresponds to the full list search that * would be necessary if all elements were non-comparable and had * tied hashes.) on insertion, to keep a total ordering (or as * close as is required here) across rebalancings, we compare * classes and identityhashcodes as tie-breakers. the red-black * balancing code is updated from pre-jdk-collections * (http://gee.cs.oswego.edu/dl/classes/collections/rbcell.java) * based in turn on cormen, leiserson, and rivest "introduction to * algorithms" (clr). * * treebins also require an additional locking mechanism. while * list traversal is always possible by readers even during * updates, tree traversal is not, mainly because of tree-rotations * that may change the root node and/or its linkages. treebins * include a simple read-write lock mechanism parasitic on the * main bin-synchronization strategy: structural adjustments * associated with an insertion or removal are already bin-locked * (and so cannot conflict with other writers) but must wait for * ongoing readers to finish. since there can be only one such * waiter, we use a simple scheme using a single "waiter" field to * block writers. however, readers need never block. if the root * lock is held, they proceed along the slow traversal path (via * next-pointers) until the lock becomes available or the list is * exhausted, whichever comes first. these cases are not fast, but * maximize aggregate expected throughput. * * maintaining api and serialization compatibility with previous * versions of this class introduces several oddities. mainly: we * leave untouched but unused constructor arguments refering to * concurrencylevel. we accept a loadfactor constructor argument, * but apply it only to initial table capacity (which is the only * time that we can guarantee to honor it.) we also declare an * unused "segment" class that is instantiated in minimal form * only when serializing. * * also, solely for compatibility with previous versions of this * class, it extends abstractmap, even though all of its methods * are overridden, so it is just useless baggage. * * this file is organized to make things a little easier to follow * while reading than they might otherwise: first the main static * declarations and utilities, then fields, then main public * methods (with a few factorings of multiple public methods into * internal ones), then sizing methods, trees, traversers, and * bulk operations. */
/* ---------------- constants -------------- */
/** * the largest possible table capacity. this value must be * exactly 1<<30 to stay within java array allocation and indexing * bounds for power of two table sizes, and is further required * because the top two bits of 32bit hash fields are used for * control purposes. */ private static final int maximum_capacity = 1 << 30;
/** * the default initial table capacity. must be a power of 2 * (i.e., at least 1) and at most maximum_capacity. */ private static final int default_capacity = 16;
/** * the largest possible (non-power of two) array size. * needed by toarray and related methods. */ static final int max_array_size = integer.max_value - 8;
/** * the default concurrency level for this table. unused but * defined for compatibility with previous versions of this class. */ private static final int default_concurrency_level = 16;
/** * the load factor for this table. overrides of this value in * constructors affect only the initial table capacity. the * actual floating point value isn't normally used -- it is * simpler to use expressions such as {@code n - (n >>> 2)} for * the associated resizing threshold. */ private static final float load_factor = 0.75f;
/** * the bin count threshold for using a tree rather than list for a * bin. bins are converted to trees when adding an element to a * bin with at least this many nodes. the value must be greater * than 2, and should be at least 8 to mesh with assumptions in * tree removal about conversion back to plain bins upon * shrinkage. */ static final int treeify_threshold = 8;
/** * the bin count threshold for untreeifying a (split) bin during a * resize operation. should be less than treeify_threshold, and at * most 6 to mesh with shrinkage detection under removal. */ static final int untreeify_threshold = 6;
/** * the smallest table capacity for which bins may be treeified. * (otherwise the table is resized if too many nodes in a bin.) * the value should be at least 4 * treeify_threshold to avoid * conflicts between resizing and treeification thresholds. */ static final int min_treeify_capacity = 64;
/** * minimum number of rebinnings per transfer step. ranges are * subdivided to allow multiple resizer threads. this value * serves as a lower bound to avoid resizers encountering * excessive memory contention. the value should be at least * default_capacity. */ private static final int min_transfer_stride = 16;
/** * the number of bits used for generation stamp in sizectl. * must be at least 6 for 32bit arrays. */ private static int resize_stamp_bits = 16;
/** * the maximum number of threads that can help resize. * must fit in 32 - resize_stamp_bits bits. */ private static final int max_resizers = (1 << (32 - resize_stamp_bits)) - 1;
/** * the bit shift for recording size stamp in sizectl. */ private static final int resize_stamp_shift = 32 - resize_stamp_bits;
/* * encodings for node hash fields. see above for explanation. */ static final int moved = -1; // hash for forwarding nodes static final int treebin = -2; // hash for roots of trees static final int reserved = -3; // hash for transient reservations static final int hash_bits = 0x7fffffff; // usable bits of normal node hash
/** number of cpus, to place bounds on some sizings */ static final int ncpu = runtime.getruntime().availableprocessors();
/** for serialization compatibility. */ private static final objectstreamfield[] serialpersistentfields = { new objectstreamfield("segments", segment[].class), new objectstreamfield("segmentmask", integer.type), new objectstreamfield("segmentshift", integer.type) };
/* ---------------- nodes -------------- */
/** * key-value entry. this class is never exported out as a * user-mutable map.entry (i.e., one supporting setvalue; see * mapentry below), but can be used for read-only traversals used * in bulk tasks. subclasses of node with a negative hash field * are special, and contain null keys and values (but are never * exported). otherwise, keys and vals are never null. */ static class node<k,v> implements map.entry<k,v> { final int hash; final k key; volatile v val; volatile node<k,v> next;
node(int hash, k key, v val, node<k,v> next) { this.hash = hash; this.key = key; this.val = val; this.next = next; }
public final k getkey() { return key; } public final v getvalue() { return val; } public final int hashcode() { return key.hashcode() ^ val.hashcode(); } public final string tostring(){ return key + "=" + val; } public final v setvalue(v value) { throw new unsupportedoperationexception(); }
public final boolean equals(object o) { object k, v, u; map.entry<?,?> e; return ((o instanceof map.entry) && (k = (e = (map.entry<?,?>)o).getkey()) != null && (v = e.getvalue()) != null && (k == key || k.equals(key)) && (v == (u = val) || v.equals(u))); }
/** * virtualized support for map.get(); overridden in subclasses. */ node<k,v> find(int h, object k) { node<k,v> e = this; if (k != null) { do { k ek; if (e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) return e; } while ((e = e.next) != null); } return null; } }
/* ---------------- static utilities -------------- */
/** * spreads (xors) higher bits of hash to lower and also forces top * bit to 0. because the table uses power-of-two masking, sets of * hashes that vary only in bits above the current mask will * always collide. (among known examples are sets of float keys * holding consecutive whole numbers in small tables.) so we * apply a transform that spreads the impact of higher bits * downward. there is a tradeoff between speed, utility, and * quality of bit-spreading. because many common sets of hashes * are already reasonably distributed (so don't benefit from * spreading), and because we use trees to handle large sets of * collisions in bins, we just xor some shifted bits in the * cheapest possible way to reduce systematic lossage, as well as * to incorporate impact of the highest bits that would otherwise * never be used in index calculations because of table bounds. */ static final int spread(int h) { return (h ^ (h >>> 16)) & hash_bits; }
/** * returns a power of two table size for the given desired capacity. * see hackers delight, sec 3.2 */ private static final int tablesizefor(int c) { int n = c - 1; n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; return (n < 0) ? 1 : (n >= maximum_capacity) ? maximum_capacity : n + 1; }
/** * returns x's class if it is of the form "class c implements * comparable<c>", else null. */ static class<?> comparableclassfor(object x) { if (x instanceof comparable) { class<?> c; type[] ts, as; type t; parameterizedtype p; if ((c = x.getclass()) == string.class) // bypass checks return c; if ((ts = c.getgenericinterfaces()) != null) { for (int i = 0; i < ts.length; ++i) { if (((t = ts[i]) instanceof parameterizedtype) && ((p = (parameterizedtype)t).getrawtype() == comparable.class) && (as = p.getactualtypearguments()) != null && as.length == 1 && as[0] == c) // type arg is c return c; } } } return null; }
/** * returns k.compareto(x) if x matches kc (k's screened comparable * class), else 0. */ @suppresswarnings({"rawtypes","unchecked"}) // for cast to comparable static int comparecomparables(class<?> kc, object k, object x) { return (x == null || x.getclass() != kc ? 0 : ((comparable)k).compareto(x)); }
/* ---------------- table element access -------------- */
/* * volatile access methods are used for table elements as well as * elements of in-progress next table while resizing. all uses of * the tab arguments must be null checked by callers. all callers * also paranoically precheck that tab's length is not zero (or an * equivalent check), thus ensuring that any index argument taking * the form of a hash value anded with (length - 1) is a valid * index. note that, to be correct wrt arbitrary concurrency * errors by users, these checks must operate on local variables, * which accounts for some odd-looking inline assignments below. * note that calls to settabat always occur within locked regions, * and so in principle require only release ordering, not * full volatile semantics, but are currently coded as volatile * writes to be conservative. */
@suppresswarnings("unchecked") static final <k,v> node<k,v> tabat(node<k,v>[] tab, int i) { return (node<k,v>)u.getobjectvolatile(tab, ((long)i << ashift) + abase); }
static final <k,v> boolean castabat(node<k,v>[] tab, int i, node<k,v> c, node<k,v> v) { return u.compareandswapobject(tab, ((long)i << ashift) + abase, c, v); }
static final <k,v> void settabat(node<k,v>[] tab, int i, node<k,v> v) { u.putobjectvolatile(tab, ((long)i << ashift) + abase, v); }
/* ---------------- fields -------------- */
/** * the array of bins. lazily initialized upon first insertion. * size is always a power of two. accessed directly by iterators. */ transient volatile node<k,v>[] table;
/** * the next table to use; non-null only while resizing. */ private transient volatile node<k,v>[] nexttable;
/** * base counter value, used mainly when there is no contention, * but also as a fallback during table initialization * races. updated via cas. */ private transient volatile long basecount;
/** * table initialization and resizing control. when negative, the * table is being initialized or resized: -1 for initialization, * else -(1 + the number of active resizing threads). otherwise, * when table is null, holds the initial table size to use upon * creation, or 0 for default. after initialization, holds the * next element count value upon which to resize the table. */ private transient volatile int sizectl;
/** * the next table index (plus one) to split while resizing. */ private transient volatile int transferindex;
/** * spinlock (locked via cas) used when resizing and/or creating countercells. */ private transient volatile int cellsbusy;
/** * table of counter cells. when non-null, size is a power of 2. */ private transient volatile countercell[] countercells;
// views private transient keysetview<k,v> keyset; private transient valuesview<k,v> values; private transient entrysetview<k,v> entryset;
/* ---------------- public operations -------------- */
/** * creates a new, empty map with the default initial table size (16). */ public concurrenthashmap() { }
/** * creates a new, empty map with an initial table size * accommodating the specified number of elements without the need * to dynamically resize. * * @param initialcapacity the implementation performs internal * sizing to accommodate this many elements. * @throws illegalargumentexception if the initial capacity of * elements is negative */ public concurrenthashmap(int initialcapacity) { if (initialcapacity < 0) throw new illegalargumentexception(); int cap = ((initialcapacity >= (maximum_capacity >>> 1)) ? maximum_capacity : tablesizefor(initialcapacity + (initialcapacity >>> 1) + 1)); this.sizectl = cap; }
/** * creates a new map with the same mappings as the given map. * * @param m the map */ public concurrenthashmap(map<? extends k, ? extends v> m) { this.sizectl = default_capacity; putall(m); }
/** * creates a new, empty map with an initial table size based on * the given number of elements ({@code initialcapacity}) and * initial table density ({@code loadfactor}). * * @param initialcapacity the initial capacity. the implementation * performs internal sizing to accommodate this many elements, * given the specified load factor. * @param loadfactor the load factor (table density) for * establishing the initial table size * @throws illegalargumentexception if the initial capacity of * elements is negative or the load factor is nonpositive * * @since 1.6 */ public concurrenthashmap(int initialcapacity, float loadfactor) { this(initialcapacity, loadfactor, 1); }
/** * creates a new, empty map with an initial table size based on * the given number of elements ({@code initialcapacity}), table * density ({@code loadfactor}), and number of concurrently * updating threads ({@code concurrencylevel}). * * @param initialcapacity the initial capacity. the implementation * performs internal sizing to accommodate this many elements, * given the specified load factor. * @param loadfactor the load factor (table density) for * establishing the initial table size * @param concurrencylevel the estimated number of concurrently * updating threads. the implementation may use this value as * a sizing hint. * @throws illegalargumentexception if the initial capacity is * negative or the load factor or concurrencylevel are * nonpositive */ public concurrenthashmap(int initialcapacity, float loadfactor, int concurrencylevel) { if (!(loadfactor > 0.0f) || initialcapacity < 0 || concurrencylevel <= 0) throw new illegalargumentexception(); if (initialcapacity < concurrencylevel) // use at least as many bins initialcapacity = concurrencylevel; // as estimated threads long size = (long)(1.0 + (long)initialcapacity / loadfactor); int cap = (size >= (long)maximum_capacity) ? maximum_capacity : tablesizefor((int)size); this.sizectl = cap; }
// original (since jdk1.2) map methods
/** * {@inheritdoc} */ public int size() { long n = sumcount(); return ((n < 0l) ? 0 : (n > (long)integer.max_value) ? integer.max_value : (int)n); }
/** * {@inheritdoc} */ public boolean isempty() { return sumcount() <= 0l; // ignore transient negative values }
/** * returns the value to which the specified key is mapped, * or {@code null} if this map contains no mapping for the key. * * <p>more formally, if this map contains a mapping from a key * {@code k} to a value {@code v} such that {@code key.equals(k)}, * then this method returns {@code v}; otherwise it returns * {@code null}. (there can be at most one such mapping.) * * @throws nullpointerexception if the specified key is null */ public v get(object key) { node<k,v>[] tab; node<k,v> e, p; int n, eh; k ek; //获得hash值 int h = spread(key.hashcode()); //表非空,且该处不为空 if ((tab = table) != null && (n = tab.length) > 0 && (e = tabat(tab, (n - 1) & h)) != null) { if ((eh = e.hash) == h) { //判断第1个 if ((ek = e.key) == key || (ek != null && key.equals(ek))) return e.val; } else if (eh < 0) //eh<0,找其他的 return (p = e.find(h, key)) != null ? p.val : null; while ((e = e.next) != null) { //遍历 if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) return e.val; } } return null; }
/** * tests if the specified object is a key in this table. * * @param key possible key * @return {@code true} if and only if the specified object * is a key in this table, as determined by the * {@code equals} method; {@code false} otherwise * @throws nullpointerexception if the specified key is null */ public boolean containskey(object key) { return get(key) != null; }
/** * returns {@code true} if this map maps one or more keys to the * specified value. note: this method may require a full traversal * of the map, and is much slower than method {@code containskey}. * * @param value value whose presence in this map is to be tested * @return {@code true} if this map maps one or more keys to the * specified value * @throws nullpointerexception if the specified value is null */ public boolean containsvalue(object value) { if (value == null) throw new nullpointerexception(); node<k,v>[] t; if ((t = table) != null) { traverser<k,v> it = new traverser<k,v>(t, t.length, 0, t.length); for (node<k,v> p; (p = it.advance()) != null; ) { v v; if ((v = p.val) == value || (v != null && value.equals(v))) return true; } } return false; }
/** * maps the specified key to the specified value in this table. * neither the key nor the value can be null. * * <p>the value can be retrieved by calling the {@code get} method * with a key that is equal to the original key. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key * @return the previous value associated with {@code key}, or * {@code null} if there was no mapping for {@code key} * @throws nullpointerexception if the specified key or value is null */ public v put(k key, v value) { return putval(key, value, false); }
/** implementation for put and putifabsent */ final v putval(k key, v value, boolean onlyifabsent) { if (key == null || value == null) throw new nullpointerexception(); int hash = spread(key.hashcode()); int bincount = 0; for (node<k,v>[] tab = table;;) { node<k,v> f; int n, i, fh; //tab为空,则初始化 if (tab == null || (n = tab.length) == 0) tab = inittable(); else if ((f = tabat(tab, i = (n - 1) & hash)) == null) { //该槽为空,则尝试插入 if (castabat(tab, i, null, new node<k,v>(hash, key, value, null))) break; // no lock when adding to empty bin } else if ((fh = f.hash) == moved) //正在移动, tab = helptransfer(tab, f); else { v oldval = null; synchronized (f) { //对该槽进行加锁 if (tabat(tab, i) == f) { if (fh >= 0) { bincount = 1; for (node<k,v> e = f;; ++bincount) { k ek; if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { oldval = e.val; if (!onlyifabsent) e.val = value; break; } node<k,v> pred = e; if ((e = e.next) == null) { pred.next = new node<k,v>(hash, key, value, null); break; } } } else if (f instanceof treebin) { node<k,v> p; bincount = 2; if ((p = ((treebin<k,v>)f).puttreeval(hash, key, value)) != null) { oldval = p.val; if (!onlyifabsent) p.val = value; } } } } if (bincount != 0) { if (bincount >= treeify_threshold) treeifybin(tab, i); if (oldval != null) return oldval; break; } } } addcount(1l, bincount); return null; }
/** * copies all of the mappings from the specified map to this one. * these mappings replace any mappings that this map had for any of the * keys currently in the specified map. * * @param m mappings to be stored in this map */ public void putall(map<? extends k, ? extends v> m) { trypresize(m.size()); for (map.entry<? extends k, ? extends v> e : m.entryset()) putval(e.getkey(), e.getvalue(), false); }
/** * removes the key (and its corresponding value) from this map. * this method does nothing if the key is not in the map. * * @param key the key that needs to be removed * @return the previous value associated with {@code key}, or * {@code null} if there was no mapping for {@code key} * @throws nullpointerexception if the specified key is null */ public v remove(object key) { return replacenode(key, null, null); }
/** * implementation for the four public remove/replace methods: * replaces node value with v, conditional upon match of cv if * non-null. if resulting value is null, delete. */ final v replacenode(object key, v value, object cv) { int hash = spread(key.hashcode()); for (node<k,v>[] tab = table;;) { node<k,v> f; int n, i, fh; if (tab == null || (n = tab.length) == 0 || (f = tabat(tab, i = (n - 1) & hash)) == null) break; else if ((fh = f.hash) == moved) tab = helptransfer(tab, f); else { v oldval = null; boolean validated = false; synchronized (f) { if (tabat(tab, i) == f) { if (fh >= 0) { validated = true; for (node<k,v> e = f, pred = null;;) { k ek; if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { v ev = e.val; if (cv == null || cv == ev || (ev != null && cv.equals(ev))) { oldval = ev; if (value != null) e.val = value; else if (pred != null) pred.next = e.next; else settabat(tab, i, e.next); } break; } pred = e; if ((e = e.next) == null) break; } } else if (f instanceof treebin) { validated = true; treebin<k,v> t = (treebin<k,v>)f; treenode<k,v> r, p; if ((r = t.root) != null && (p = r.findtreenode(hash, key, null)) != null) { v pv = p.val; if (cv == null || cv == pv || (pv != null && cv.equals(pv))) { oldval = pv; if (value != null) p.val = value; else if (t.removetreenode(p)) settabat(tab, i, untreeify(t.first)); } } } } } if (validated) { if (oldval != null) { if (value == null) addcount(-1l, -1); return oldval; } break; } } } return null; }
/** * removes all of the mappings from this map. */ public void clear() { long delta = 0l; // negative number of deletions int i = 0; node<k,v>[] tab = table; while (tab != null && i < tab.length) { int fh; node<k,v> f = tabat(tab, i); if (f == null) ++i; else if ((fh = f.hash) == moved) { tab = helptransfer(tab, f); i = 0; // restart } else { synchronized (f) { if (tabat(tab, i) == f) { node<k,v> p = (fh >= 0 ? f : (f instanceof treebin) ? ((treebin<k,v>)f).first : null); while (p != null) { --delta; p = p.next; } settabat(tab, i++, null); } } } } if (delta != 0l) addcount(delta, -1); }
/** * returns a {@link set} view of the keys contained in this map. * the set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. the set supports element * removal, which removes the corresponding mapping from this map, * via the {@code iterator.remove}, {@code set.remove}, * {@code removeall}, {@code retainall}, and {@code clear} * operations. it does not support the {@code add} or * {@code addall} operations. * * <p>the view's iterators and spliterators are * <a href="package-summary.html#weakly"><i>weakly consistent</i></a>. * * <p>the view's {@code spliterator} reports {@link spliterator#concurrent}, * {@link spliterator#distinct}, and {@link spliterator#nonnull}. * * @return the set view */ public keysetview<k,v> keyset() { keysetview<k,v> ks; return (ks = keyset) != null ? ks : (keyset = new keysetview<k,v>(this, null)); }
/** * returns a {@link collection} view of the values contained in this map. * the collection is backed by the map, so changes to the map are * reflected in the collection, and vice-versa. the collection * supports element removal, which removes the corresponding * mapping from this map, via the {@code iterator.remove}, * {@code collection.remove}, {@code removeall}, * {@code retainall}, and {@code clear} operations. it does not * support the {@code add} or {@code addall} operations. * * <p>the view's iterators and spliterators are * <a href="package-summary.html#weakly"><i>weakly consistent</i></a>. * * <p>the view's {@code spliterator} reports {@link spliterator#concurrent} * and {@link spliterator#nonnull}. * * @return the collection view */ public collection<v> values() { valuesview<k,v> vs; return (vs = values) != null ? vs : (values = new valuesview<k,v>(this)); }
/** * returns a {@link set} view of the mappings contained in this map. * the set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. the set supports element * removal, which removes the corresponding mapping from the map, * via the {@code iterator.remove}, {@code set.remove}, * {@code removeall}, {@code retainall}, and {@code clear} * operations. * * <p>the view's iterators and spliterators are * <a href="package-summary.html#weakly"><i>weakly consistent</i></a>. * * <p>the view's {@code spliterator} reports {@link spliterator#concurrent}, * {@link spliterator#distinct}, and {@link spliterator#nonnull}. * * @return the set view */ public set<map.entry<k,v>> entryset() { entrysetview<k,v> es; return (es = entryset) != null ? es : (entryset = new entrysetview<k,v>(this)); }
/** * returns the hash code value for this {@link map}, i.e., * the sum of, for each key-value pair in the map, * {@code key.hashcode() ^ value.hashcode()}. * * @return the hash code value for this map */ public int hashcode() { int h = 0; node<k,v>[] t; if ((t = table) != null) { traverser<k,v> it = new traverser<k,v>(t, t.length, 0, t.length); for (node<k,v> p; (p = it.advance()) != null; ) h += p.key.hashcode() ^ p.val.hashcode(); } return h; }
/** * returns a string representation of this map. the string * representation consists of a list of key-value mappings (in no * particular order) enclosed in braces ("{@code {}}"). adjacent * mappings are separated by the characters {@code ", "} (comma * and space). each key-value mapping is rendered as the key * followed by an equals sign ("{@code =}") followed by the * associated value. * * @return a string representation of this map */ public string tostring() { node<k,v>[] t; int f = (t = table) == null ? 0 : t.length; traverser<k,v> it = new traverser<k,v>(t, f, 0, f); stringbuilder sb = new stringbuilder(); sb.append('{'); node<k,v> p; if ((p = it.advance()) != null) { for (;;) { k k = p.key; v v = p.val; sb.append(k == this ? "(this map)" : k); sb.append('='); sb.append(v == this ? "(this map)" : v); if ((p = it.advance()) == null) break; sb.append(',').append(' '); } } return sb.append('}').tostring(); }
/** * compares the specified object with this map for equality. * returns {@code true} if the given object is a map with the same * mappings as this map. this operation may return misleading * results if either map is concurrently modified during execution * of this method. * * @param o object to be compared for equality with this map * @return {@code true} if the specified object is equal to this map */ public boolean equals(object o) { if (o != this) { if (!(o instanceof map)) return false; map<?,?> m = (map<?,?>) o; node<k,v>[] t; int f = (t = table) == null ? 0 : t.length; traverser<k,v> it = new traverser<k,v>(t, f, 0, f); for (node<k,v> p; (p = it.advance()) != null; ) { v val = p.val; object v = m.get(p.key); if (v == null || (v != val && !v.equals(val))) return false; } for (map.entry<?,?> e : m.entryset()) { object mk, mv, v; if ((mk = e.getkey()) == null || (mv = e.getvalue()) == null || (v = get(mk)) == null || (mv != v && !mv.equals(v))) return false; } } return true; }
/** * stripped-down version of helper class used in previous version, * declared for the sake of serialization compatibility */ static class segment<k,v> extends reentrantlock implements serializable { private static final long serialversionuid = 2249069246763182397l; final float loadfactor; segment(float lf) { this.loadfactor = lf; } }
/** * saves the state of the {@code concurrenthashmap} instance to a * stream (i.e., serializes it). * @param s the stream * @throws java.io.ioexception if an i/o error occurs * @serialdata * the key (object) and value (object) * for each key-value mapping, followed by a null pair. * the key-value mappings are emitted in no particular order. */ private void writeobject(java.io.objectoutputstream s) throws java.io.ioexception { // for serialization compatibility // emulate segment calculation from previous version of this class int sshift = 0; int ssize = 1; while (ssize < default_concurrency_level) { ++sshift; ssize <<= 1; } int segmentshift = 32 - sshift; int segmentmask = ssize - 1; @suppresswarnings("unchecked") segment<k,v>[] segments = (segment<k,v>[]) new segment<?,?>[default_concurrency_level]; for (int i = 0; i < segments.length; ++i) segments[i] = new segment<k,v>(load_factor); s.putfields().put("segments", segments); s.putfields().put("segmentshift", segmentshift); s.putfields().put("segmentmask", segmentmask); s.writefields();
node<k,v>[] t; if ((t = table) != null) { traverser<k,v> it = new traverser<k,v>(t, t.length, 0, t.length); for (node<k,v> p; (p = it.advance()) != null; ) { s.writeobject(p.key); s.writeobject(p.val); } } s.writeobject(null); s.writeobject(null); segments = null; // throw away }
/** * reconstitutes the instance from a stream (that is, deserializes it). * @param s the stream * @throws classnotfoundexception if the class of a serialized object * could not be found * @throws java.io.ioexception if an i/o error occurs */ private void readobject(java.io.objectinputstream s) throws java.io.ioexception, classnotfoundexception { /* * to improve performance in typical cases, we create nodes * while reading, then place in table once size is known. * however, we must also validate uniqueness and deal with * overpopulated bins while doing so, which requires * specialized versions of putval mechanics. */ sizectl = -1; // force exclusion for table construction s.defaultreadobject(); long size = 0l; node<k,v> p = null; for (;;) { @suppresswarnings("unchecked") k k = (k) s.readobject(); @suppresswarnings("unchecked") v v = (v) s.readobject(); if (k != null && v != null) { p = new node<k,v>(spread(k.hashcode()), k, v, p); ++size; } else break; } if (size == 0l) sizectl = 0; else { int n; if (size >= (long)(maximum_capacity >>> 1)) n = maximum_capacity; else { int sz = (int)size; n = tablesizefor(sz + (sz >>> 1) + 1); } @suppresswarnings("unchecked") node<k,v>[] tab = (node<k,v>[])new node<?,?>[n]; int mask = n - 1; long added = 0l; while (p != null) { boolean insertatfront; node<k,v> next = p.next, first; int h = p.hash, j = h & mask; if ((first = tabat(tab, j)) == null) insertatfront = true; else { k k = p.key; if (first.hash < 0) { treebin<k,v> t = (treebin<k,v>)first; if (t.puttreeval(h, k, p.val) == null) ++added; insertatfront = false; } else { int bincount = 0; insertatfront = true; node<k,v> q; k qk; for (q = first; q != null; q = q.next) { if (q.hash == h && ((qk = q.key) == k || (qk != null && k.equals(qk)))) { insertatfront = false; break; } ++bincount; } if (insertatfront && bincount >= treeify_threshold) { insertatfront = false; ++added; p.next = first; treenode<k,v> hd = null, tl = null; for (q = p; q != null; q = q.next) { treenode<k,v> t = new treenode<k,v> (q.hash, q.key, q.val, null, null); if ((t.prev = tl) == null) hd = t; else tl.next = t; tl = t; } settabat(tab, j, new treebin<k,v>(hd)); } } } if (insertatfront) { ++added; p.next = first; settabat(tab, j, p); } p = next; } table = tab; sizectl = n - (n >>> 2); basecount = added; } }
// concurrentmap methods
/** * {@inheritdoc} * * @return the previous value associated with the specified key, * or {@code null} if there was no mapping for the key * @throws nullpointerexception if the specified key or value is null */ public v putifabsent(k key, v value) { return putval(key, value, true); }
/** * {@inheritdoc} * * @throws nullpointerexception if the specified key is null */ public boolean remove(object key, object value) { if (key == null) throw new nullpointerexception(); return value != null && replacenode(key, null, value) != null; }
/** * {@inheritdoc} * * @throws nullpointerexception if any of the arguments are null */ public boolean replace(k key, v oldvalue, v newvalue) { if (key == null || oldvalue == null || newvalue == null) throw new nullpointerexception(); return replacenode(key, newvalue, oldvalue) != null; }
/** * {@inheritdoc} * * @return the previous value associated with the specified key, * or {@code null} if there was no mapping for the key * @throws nullpointerexception if the specified key or value is null */ public v replace(k key, v value) { if (key == null || value == null) throw new nullpointerexception(); return replacenode(key, value, null); }
// overrides of jdk8+ map extension method defaults
/** * returns the value to which the specified key is mapped, or the * given default value if this map contains no mapping for the * key. * * @param key the key whose associated value is to be returned * @param defaultvalue the value to return if this map contains * no mapping for the given key * @return the mapping for the key, if present; else the default value * @throws nullpointerexception if the specified key is null */ public v getordefault(object key, v defaultvalue) { v v; return (v = get(key)) == null ? defaultvalue : v; }
public void foreach(biconsumer<? super k, ? super v> action) { if (action == null) throw new nullpointerexception(); node<k,v>[] t; if ((t = table) != null) { traverser<k,v> it = new traverser<k,v>(t, t.length, 0, t.length); for (node<k,v> p; (p = it.advance()) != null; ) { action.accept(p.key, p.val); } } }
public void replaceall(bifunction<? super k, ? super v, ? extends v> function) { if (function == null) throw new nullpointerexception(); node<k,v>[] t; if ((t = table) != null) { traverser<k,v> it = new traverser<k,v>(t, t.length, 0, t.length); for (node<k,v> p; (p = it.advance()) != null; ) { v oldvalue = p.val; for (k key = p.key;;) { v newvalue = function.apply(key, oldvalue); if (newvalue == null) throw new nullpointerexception(); if (replacenode(key, newvalue, oldvalue) != null || (oldvalue = get(key)) == null) break; } } } }
/** * if the specified key is not already associated with a value, * attempts to compute its value using the given mapping function * and enters it into this map unless {@code null}. the entire * method invocation is performed atomically, so the function is * applied at most once per key. some attempted update operations * on this map by other threads may be blocked while computation * is in progress, so the computation should be short and simple, * and must not attempt to update any other mappings of this map. * * @param key key with which the specified value is to be associated * @param mappingfunction the function to compute a value * @return the current (existing or computed) value associated with * the specified key, or null if the computed value is null * @throws nullpointerexception if the specified key or mappingfunction * is null * @throws illegalstateexception if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws runtimeexception or error if the mappingfunction does so, * in which case the mapping is left unestablished */ public v computeifabsent(k key, function<? super k, ? extends v> mappingfunction) { if (key == null || mappingfunction == null) throw new nullpointerexception(); int h = spread(key.hashcode()); v val = null; int bincount = 0; for (node<k,v>[] tab = table;;) { node<k,v> f; int n, i, fh; if (tab == null || (n = tab.length) == 0) tab = inittable(); else if ((f = tabat(tab, i = (n - 1) & h)) == null) { node<k,v> r = new reservationnode<k,v>(); synchronized (r) { if (castabat(tab, i, null, r)) { bincount = 1; node<k,v> node = null; try { if ((val = mappingfunction.apply(key)) != null) node = new node<k,v>(h, key, val, null); } finally { settabat(tab, i, node); } } } if (bincount != 0) break; } else if ((fh = f.hash) == moved) tab = helptransfer(tab, f); else { boolean added = false; synchronized (f) { if (tabat(tab, i) == f) { if (fh >= 0) { bincount = 1; for (node<k,v> e = f;; ++bincount) { k ek; v ev; if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { val = e.val; break; } node<k,v> pred = e; if ((e = e.next) == null) { if ((val = mappingfunction.apply(key)) != null) { added = true; pred.next = new node<k,v>(h, key, val, null); } break; } } } else if (f instanceof treebin) { bincount = 2; treebin<k,v> t = (treebin<k,v>)f; treenode<k,v> r, p; if ((r = t.root) != null && (p = r.findtreenode(h, key, null)) != null) val = p.val; else if ((val = mappingfunction.apply(key)) != null) { added = true; t.puttreeval(h, key, val); } } } } if (bincount != 0) { if (bincount >= treeify_threshold) treeifybin(tab, i); if (!added) return val; break; } } } if (val != null) addcount(1l, bincount); return val; }
/** * if the value for the specified key is present, attempts to * compute a new mapping given the key and its current mapped * value. the entire method invocation is performed atomically. * some attempted update operations on this map by other threads * may be blocked while computation is in progress, so the * computation should be short and simple, and must not attempt to * update any other mappings of this map. * * @param key key with which a value may be associated * @param remappingfunction the function to compute a value * @return the new value associated with the specified key, or null if none * @throws nullpointerexception if the specified key or remappingfunction * is null * @throws illegalstateexception if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws runtimeexception or error if the remappingfunction does so, * in which case the mapping is unchanged */ public v computeifpresent(k key, bifunction<? super k, ? super v, ? extends v> remappingfunction) { if (key == null || remappingfunction == null) throw new nullpointerexception(); int h = spread(key.hashcode()); v val = null; int delta = 0; int bincount = 0; for (node<k,v>[] tab = table;;) { node<k,v> f; int n, i, fh; if (tab == null || (n = tab.length) == 0) tab = inittable(); else if ((f = tabat(tab, i = (n - 1) & h)) == null) break; else if ((fh = f.hash) == moved) tab = helptransfer(tab, f); else { synchronized (f) { if (tabat(tab, i) == f) { if (fh >= 0) { bincount = 1; for (node<k,v> e = f, pred = null;; ++bincount) { k ek; if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { val = remappingfunction.apply(key, e.val); if (val != null) e.val = val; else { delta = -1; node<k,v> en = e.next; if (pred != null) pred.next = en; else settabat(tab, i, en); } break; } pred = e; if ((e = e.next) == null) break; } } else if (f instanceof treebin) { bincount = 2; treebin<k,v> t = (treebin<k,v>)f; treenode<k,v> r, p; if ((r = t.root) != null && (p = r.findtreenode(h, key, null)) != null) { val = remappingfunction.apply(key, p.val); if (val != null) p.val = val; else { delta = -1; if (t.removetreenode(p)) settabat(tab, i, untreeify(t.first)); } } } } } if (bincount != 0) break; } } if (delta != 0) addcount((long)delta, bincount); return val; }
/** * attempts to compute a mapping for the specified key and its * current mapped value (or {@code null} if there is no current * mapping). the entire method invocation is performed atomically. * some attempted update operations on this map by other threads * may be blocked while computation is in progress, so the * computation should be short and simple, and must not attempt to * update any other mappings of this map. * * @param key key with which the specified value is to be associated * @param remappingfunction the function to compute a value * @return the new value associated with the specified key, or null if none * @throws nullpointerexception if the specified key or remappingfunction * is null * @throws illegalstateexception if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws runtimeexception or error if the remappingfunction does so, * in which case the mapping is unchanged */ public v compute(k key, bifunction<? super k, ? super v, ? extends v> remappingfunction) { if (key == null || remappingfunction == null) throw new nullpointerexception(); int h = spread(key.hashcode()); v val = null; int delta = 0; int bincount = 0; for (node<k,v>[] tab = table;;) { node<k,v> f; int n, i, fh; if (tab == null || (n = tab.length) == 0) tab = inittable(); else if ((f = tabat(tab, i = (n - 1) & h)) == null) { node<k,v> r = new reservationnode<k,v>(); synchronized (r) { if (castabat(tab, i, null, r)) { bincount = 1; node<k,v> node = null; try { if ((val = remappingfunction.apply(key, null)) != null) { delta = 1; node = new node<k,v>(h, key, val, null); } } finally { settabat(tab, i, node); } } } if (bincount != 0) break; } else if ((fh = f.hash) == moved) tab = helptransfer(tab, f); else { synchronized (f) { if (tabat(tab, i) == f) { if (fh >= 0) { bincount = 1; for (node<k,v> e = f, pred = null;; ++bincount) { k ek; if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { val = remappingfunction.apply(key, e.val); if (val != null) e.val = val; else { delta = -1; node<k,v> en = e.next; if (pred != null) pred.next = en; else settabat(tab, i, en); } break; } pred = e; if ((e = e.next) == null) { val = remappingfunction.apply(key, null); if (val != null) { delta = 1; pred.next = new node<k,v>(h, key, val, null); } break; } } } else if (f instanceof treebin) { bincount = 1; treebin<k,v> t = (treebin<k,v>)f; treenode<k,v> r, p; if ((r = t.root) != null) p = r.findtreenode(h, key, null); else p = null; v pv = (p == null) ? null : p.val; val = remappingfunction.apply(key, pv); if (val != null) { if (p != null) p.val = val; else { delta = 1; t.puttreeval(h, key, val); } } else if (p != null) { delta = -1; if (t.removetreenode(p)) settabat(tab, i, untreeify(t.first)); } } } } if (bincount != 0) { if (bincount >= treeify_threshold) treeifybin(tab, i); break; } } } if (delta != 0) addcount((long)delta, bincount); return val; }
/** * if the specified key is not already associated with a * (non-null) value, associates it with the given value. * otherwise, replaces the value with the results of the given * remapping function, or removes if {@code null}. the entire * method invocation is performed atomically. some attempted * update operations on this map by other threads may be blocked * while computation is in progress, so the computation should be * short and simple, and must not attempt to update any other * mappings of this map. * * @param key key with which the specified value is to be associated * @param value the value to use if absent * @param remappingfunction the function to recompute a value if present * @return the new value associated with the specified key, or null if none * @throws nullpointerexception if the specified key or the * remappingfunction is null * @throws runtimeexception or error if the remappingfunction does so, * in which case the mapping is unchanged */ public v merge(k key, v value, bifunction<? super v, ? super v, ? extends v> remappingfunction) { if (key == null || value == null || remappingfunction == null) throw new nullpointerexception(); int h = spread(key.hashcode()); v val = null; int delta = 0; int bincount = 0; for (node<k,v>[] tab = table;;) { node<k,v> f; int n, i, fh; if (tab == null || (n = tab.length) == 0) tab = inittable(); else if ((f = tabat(tab, i = (n - 1) & h)) == null) { if (castabat(tab, i, null, new node<k,v>(h, key, value, null))) { delta = 1; val = value; break; } } else if ((fh = f.hash) == moved) tab = helptransfer(tab, f); else { synchronized (f) { if (tabat(tab, i) == f) { if (fh >= 0) { bincount = 1; for (node<k,v> e = f, pred = null;; ++bincount) { k ek; if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { val = remappingfunction.apply(e.val, value); if (val != null) e.val = val; else { delta = -1; node<k,v> en = e.next; if (pred != null) pred.next = en; else settabat(tab, i, en); } break; } pred = e; if ((e = e.next) == null) { delta = 1; val = value; pred.next = new node<k,v>(h, key, val, null); break; } } } else if (f instanceof treebin) { bincount = 2; treebin<k,v> t = (treebin<k,v>)f; treenode<k,v> r = t.root; treenode<k,v> p = (r == null) ? null : r.findtreenode(h, key, null); val = (p == null) ? value : remappingfunction.apply(p.val, value); if (val != null) { if (p != null) p.val = val; else { delta = 1; t.puttreeval(h, key, val); } } else if (p != null) { delta = -1; if (t.removetreenode(p)) settabat(tab, i, untreeify(t.first)); } } } } if (bincount != 0) { if (bincount >= treeify_threshold) treeifybin(tab, i); break; } } } if (delta != 0) addcount((long)delta, bincount); return val; }
// hashtable legacy methods
/** * legacy method testing if some key maps into the specified value * in this table. this method is identical in functionality to * {@link #containsvalue(object)}, and exists solely to ensure * full compatibility with class {@link java.util.hashtable}, * which supported this method prior to introduction of the * java collections framework. * * @param value a value to search for * @return {@code true} if and only if some key maps to the * {@code value} argument in this table as * determined by the {@code equals} method; * {@code false} otherwise * @throws nullpointerexception if the specified value is null */ public boolean contains(object value) { return containsvalue(value); }
/** * returns an enumeration of the keys in this table. * * @return an enumeration of the keys in this table * @see #keyset() */ public enumeration<k> keys() { node<k,v>[] t; int f = (t = table) == null ? 0 : t.length; return new keyiterator<k,v>(t, f, 0, f, this); }
/** * returns an enumeration of the values in this table. * * @return an enumeration of the values in this table * @see #values() */ public enumeration<v> elements() { node<k,v>[] t; int f = (t = table) == null ? 0 : t.length; return new valueiterator<k,v>(t, f, 0, f, this); }
// concurrenthashmap-only methods
/** * returns the number of mappings. this method should be used * instead of {@link #size} because a concurrenthashmap may * contain more mappings than can be represented as an int. the * value returned is an estimate; the actual count may differ if * there are concurrent insertions or removals. * * @return the number of mappings * @since 1.8 */ public long mappingcount() { long n = sumcount(); return (n < 0l) ? 0l : n; // ignore transient negative values }
/** * creates a new {@link set} backed by a concurrenthashmap * from the given type to {@code boolean.true}. * * @param <k> the element type of the returned set * @return the new set * @since 1.8 */ public static <k> keysetview<k,boolean> newkeyset() { return new keysetview<k,boolean> (new concurrenthashmap<k,boolean>(), boolean.true); }
/** * creates a new {@link set} backed by a concurrenthashmap * from the given type to {@code boolean.true}. * * @param initialcapacity the implementation performs internal * sizing to accommodate this many elements. * @param <k> the element type of the returned set * @return the new set * @throws illegalargumentexception if the initial capacity of * elements is negative * @since 1.8 */ public static <k> keysetview<k,boolean> newkeyset(int initialcapacity) { return new keysetview<k,boolean> (new concurrenthashmap<k,boolean>(initialcapacity), boolean.true); }
/** * returns a {@link set} view of the keys in this map, using the * given common mapped value for any additions (i.e., {@link * collection#add} and {@link collection#addall(collection)}). * this is of course only appropriate if it is acceptable to use * the same value for all additions from this view. * * @param mappedvalue the mapped value to use for any additions * @return the set view * @throws nullpointerexception if the mappedvalue is null */ public keysetview<k,v> keyset(v mappedvalue) { if (mappedvalue == null) throw new nullpointerexception(); return new keysetview<k,v>(this, mappedvalue); }
/* ---------------- special nodes -------------- */
/** * a node inserted at head of bins during transfer operations. */ static final class forwardingnode<k,v> extends node<k,v> { final node<k,v>[] nexttable; forwardingnode(node<k,v>[] tab) { super(moved, null, null, null); this.nexttable = tab; }
node<k,v> find(int h, object k) { // loop to avoid arbitrarily deep recursion on forwarding nodes outer: for (node<k,v>[] tab = nexttable;;) { node<k,v> e; int n; if (k == null || tab == null || (n = tab.length) == 0 || (e = tabat(tab, (n - 1) & h)) == null) return null; for (;;) { int eh; k ek; if ((eh = e.hash) == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) return e; if (eh < 0) { if (e instanceof forwardingnode) { tab = ((forwardingnode<k,v>)e).nexttable; continue outer; } else return e.find(h, k); } if ((e = e.next) == null) return null; } } } }
/** * a place-holder node used in computeifabsent and compute */ static final class reservationnode<k,v> extends node<k,v> { reservationnode() { super(reserved, null, null, null); }
node<k,v> find(int h, object k) { return null; } }
/* ---------------- table initialization and resizing -------------- */
/** * returns the stamp bits for resizing a table of size n. * must be negative when shifted left by resize_stamp_shift. */ static final int resizestamp(int n) { return integer.numberofleadingzeros(n) | (1 << (resize_stamp_bits - 1)); }
/** * initializes table, using the size recorded in sizectl. */ private final node<k,v>[] inittable() { node<k,v>[] tab; int sc; while ((tab = table) == null || tab.length == 0) { if ((sc = sizectl) < 0) thread.yield(); // lost initialization race; just spin else if (u.compareandswapint(this, sizectl, sc, -1)) { try { if ((tab = table) == null || tab.length == 0) { int n = (sc > 0) ? sc : default_capacity; @suppresswarnings("unchecked") node<k,v>[] nt = (node<k,v>[])new node<?,?>[n]; table = tab = nt; sc = n - (n >>> 2); } } finally { sizectl = sc; } break; } } return tab; }
/** * adds to count, and if table is too small and not already * resizing, initiates transfer. if already resizing, helps * perform transfer if work is available. rechecks occupancy * after a transfer to see if another resize is already needed * because resizings are lagging additions. * * @param x the count to add * @param check if <0, don't check resize, if <= 1 only check if uncontended */ private final void addcount(long x, int check) { countercell[] as; long b, s; if ((as = countercells) != null || !u.compareandswaplong(this, basecount, b = basecount, s = b + x)) { countercell a; long v; int m; boolean uncontended = true; if (as == null || (m = as.length - 1) < 0 || (a = as[threadlocalrandom.getprobe() & m]) == null || !(uncontended = u.compareandswaplong(a, cellvalue, v = a.value, v + x))) { fulladdcount(x, uncontended); return; } if (check <= 1) return; s = sumcount(); } if (check >= 0) { node<k,v>[] tab, nt; int n, sc; while (s >= (long)(sc = sizectl) && (tab = table) != null && (n = tab.length) < maximum_capacity) { int rs = resizestamp(n); if (sc < 0) { if ((sc >>> resize_stamp_shift) != rs || sc == rs + 1 || sc == rs + max_resizers || (nt = nexttable) == null || transferindex <= 0) break; if (u.compareandswapint(this, sizectl, sc, sc + 1)) transfer(tab, nt); } else if (u.compareandswapint(this, sizectl, sc, (rs << resize_stamp_shift) + 2)) transfer(tab, null); s = sumcount(); } } }
/** * helps transfer if a resize is in progress. */ final node<k,v>[] helptransfer(node<k,v>[] tab, node<k,v> f) { node<k,v>[] nexttab; int sc; if (tab != null && (f instanceof forwardingnode) && (nexttab = ((forwardingnode<k,v>)f).nexttable) != null) { int rs = resizestamp(tab.length); while (nexttab == nexttable && table == tab && (sc = sizectl) < 0) { if ((sc >>> resize_stamp_shift) != rs || sc == rs + 1 || sc == rs + max_resizers || transferindex <= 0) break; if (u.compareandswapint(this, sizectl, sc, sc + 1)) { transfer(tab, nexttab); break; } } return nexttab; } return table; }
/** * tries to presize table to accommodate the given number of elements. * * @param size number of elements (doesn't need to be perfectly accurate) */ private final void trypresize(int size) { int c = (size >= (maximum_capacity >>> 1)) ? maximum_capacity : tablesizefor(size + (size >>> 1) + 1); int sc; while ((sc = sizectl) >= 0) { node<k,v>[] tab = table; int n; if (tab == null || (n = tab.length) == 0) { n = (sc > c) ? sc : c; if (u.compareandswapint(this, sizectl, sc, -1)) { try { if (table == tab) { @suppresswarnings("unchecked") node<k,v>[] nt = (node<k,v>[])new node<?,?>[n]; table = nt; sc = n - (n >>> 2); } } finally { sizectl = sc; } } } else if (c <= sc || n >= maximum_capacity) break; else if (tab == table) { int rs = resizestamp(n); if (sc < 0) { node<k,v>[] nt; if ((sc >>> resize_stamp_shift) != rs || sc == rs + 1 || sc == rs + max_resizers || (nt = nexttable) == null || transferindex <= 0) break; if (u.compareandswapint(this, sizectl, sc, sc + 1)) transfer(tab, nt); } else if (u.compareandswapint(this, sizectl, sc, (rs << resize_stamp_shift) + 2)) transfer(tab, null); } } }
/** * moves and/or copies the nodes in each bin to new table. see * above for explanation. */ private final void transfer(node<k,v>[] tab, node<k,v>[] nexttab) { int n = tab.length, stride; if ((stride = (ncpu > 1) ? (n >>> 3) / ncpu : n) < min_transfer_stride) stride = min_transfer_stride; // subdivide range if (nexttab == null) { // initiating try { @suppresswarnings("unchecked") node<k,v>[] nt = (node<k,v>[])new node<?,?>[n << 1]; nexttab = nt; } catch (throwable ex) { // try to cope with oome sizectl = integer.max_value; return; } nexttable = nexttab; transferindex = n; } int nextn = nexttab.length; forwardingnode<k,v> fwd = new forwardingnode<k,v>(nexttab); boolean advance = true; boolean finishing = false; // to ensure sweep before committing nexttab for (int i = 0, bound = 0;;) { node<k,v> f; int fh; while (advance) { int nextindex, nextbound; if (--i >= bound || finishing) advance = false; else if ((nextindex = transferindex) <= 0) { i = -1; advance = false; } else if (u.compareandswapint (this, transferindex, nextindex, nextbound = (nextindex > stride ? nextindex - stride : 0))) { bound = nextbound; i = nextindex - 1; advance = false; } } if (i < 0 || i >= n || i + n >= nextn) { int sc; if (finishing) { nexttable = null; table = nexttab; sizectl = (n << 1) - (n >>> 1); return; } if (u.compareandswapint(this, sizectl, sc = sizectl, sc - 1)) { if ((sc - 2) != resizestamp(n) << resize_stamp_shift) return; finishing = advance = true; i = n; // recheck before commit } } else if ((f = tabat(tab, i)) == null) advance = castabat(tab, i, null, fwd); else if ((fh = f.hash) == moved) advance = true; // already processed else { synchronized (f) { if (tabat(tab, i) == f) { node<k,v> ln, hn; if (fh >= 0) { int runbit = fh & n; node<k,v> lastrun = f; for (node<k,v> p = f.next; p != null; p = p.next) { int b = p.hash & n; if (b != runbit) { runbit = b; lastrun = p; } } if (runbit == 0) { ln = lastrun; hn = null; } else { hn = lastrun; ln = null; } for (node<k,v> p = f; p != lastrun; p = p.next) { int ph = p.hash; k pk = p.key; v pv = p.val; if ((ph & n) == 0) ln = new node<k,v>(ph, pk, pv, ln); else hn = new node<k,v>(ph, pk, pv, hn); } settabat(nexttab, i, ln); settabat(nexttab, i + n, hn); settabat(tab, i, fwd); advance = true; } else if (f instanceof treebin) { treebin<k,v> t = (treebin<k,v>)f; treenode<k,v> lo = null, lotail = null; treenode<k,v> hi = null, hitail = null; int lc = 0, hc = 0; for (node<k,v> e = t.first; e != null; e = e.next) { int h = e.hash; treenode<k,v> p = new treenode<k,v> (h, e.key, e.val, null, null); if ((h & n) == 0) { if ((p.prev = lotail) == null) lo = p; else lotail.next = p; lotail = p; ++lc; } else { if ((p.prev = hitail) == null) hi = p; else hitail.next = p; hitail = p; ++hc; } } ln = (lc <= untreeify_threshold) ? untreeify(lo) : (hc != 0) ? new treebin<k,v>(lo) : t; hn = (hc <= untreeify_threshold) ? untreeify(hi) : (lc != 0) ? new treebin<k,v>(hi) : t; settabat(nexttab, i, ln); settabat(nexttab, i + n, hn); settabat(tab, i, fwd); advance = true; } } } } } }
/* ---------------- counter support -------------- */
/** * a padded cell for distributing counts. adapted from longadder * and striped64. see their internal docs for explanation. */ @sun.misc.contended static final class countercell { volatile long value; countercell(long x) { value = x; } }
final long sumcount() { countercell[] as = countercells; countercell a; long sum = basecount; if (as != null) { for (int i = 0; i < as.length; ++i) { if ((a = as[i]) != null) sum += a.value; } } return sum; }
// see longadder version for explanation private final void fulladdcount(long x, boolean wasuncontended) { int h; if ((h = threadlocalrandom.getprobe()) == 0) { threadlocalrandom.localinit(); // force initialization h = threadlocalrandom.getprobe(); wasuncontended = true; } boolean collide = false; // true if last slot nonempty for (;;) { countercell[] as; countercell a; int n; long v; if ((as = countercells) != null && (n = as.length) > 0) { if ((a = as[(n - 1) & h]) == null) { if (cellsbusy == 0) { // try to attach new cell countercell r = new countercell(x); // optimistic create if (cellsbusy == 0 && u.compareandswapint(this, cellsbusy, 0, 1)) { boolean created = false; try { // recheck under lock countercell[] rs; int m, j; if ((rs = countercells) != null && (m = rs.length) > 0 && rs[j = (m - 1) & h] == null) { rs[j] = r; created = true; } } finally { cellsbusy = 0; } if (created) break; continue; // slot is now non-empty } } collide = false; } else if (!wasuncontended) // cas already known to fail wasuncontended = true; // continue after rehash else if (u.compareandswaplong(a, cellvalue, v = a.value, v + x)) break; else if (countercells != as || n >= ncpu) collide = false; // at max size or stale else if (!collide) collide = true; else if (cellsbusy == 0 && u.compareandswapint(this, cellsbusy, 0, 1)) { try { if (countercells == as) {// expand table unless stale countercell[] rs = new countercell[n << 1]; for (int i = 0; i < n; ++i) rs[i] = as[i]; countercells = rs; } } finally { cellsbusy = 0; } collide = false; continue; // retry with expanded table } h = threadlocalrandom.advanceprobe(h); } else if (cellsbusy == 0 && countercells == as && u.compareandswapint(this, cellsbusy, 0, 1)) { boolean init = false; try { // initialize table if (countercells == as) { countercell[] rs = new countercell[2]; rs[h & 1] = new countercell(x); countercells = rs; init = true; } } finally { cellsbusy = 0; } if (init) break; } else if (u.compareandswaplong(this, basecount, v = basecount, v + x)) break; // fall back on using base } }
/* ---------------- conversion from/to treebins -------------- */
/** * replaces all linked nodes in bin at given index unless table is * too small, in which case resizes instead. */ private final void treeifybin(node<k,v>[] tab, int index) { node<k,v> b; int n, sc; if (tab != null) { if ((n = tab.length) < min_treeify_capacity) trypresize(n << 1); else if ((b = tabat(tab, index)) != null && b.hash >= 0) { synchronized (b) { if (tabat(tab, index) == b) { treenode<k,v> hd = null, tl = null; for (node<k,v> e = b; e != null; e = e.next) { treenode<k,v> p = new treenode<k,v>(e.hash, e.key, e.val, null, null); if ((p.prev = tl) == null) hd = p; else tl.next = p; tl = p; } settabat(tab, index, new treebin<k,v>(hd)); } } } } }
/** * returns a list on non-treenodes replacing those in given list. */ static <k,v> node<k,v> untreeify(node<k,v> b) { node<k,v> hd = null, tl = null; for (node<k,v> q = b; q != null; q = q.next) { node<k,v> p = new node<k,v>(q.hash, q.key, q.val, null); if (tl == null) hd = p; else tl.next = p; tl = p; } return hd; }
/* ---------------- treenodes -------------- */
/** * nodes for use in treebins */ static final class treenode<k,v> extends node<k,v> { treenode<k,v> parent; // red-black tree links treenode<k,v> left; treenode<k,v> right; treenode<k,v> prev; // needed to unlink next upon deletion boolean red;
treenode(int hash, k key, v val, node<k,v> next, treenode<k,v> parent) { super(hash, key, val, next); this.parent = parent; }
node<k,v> find(int h, object k) { return findtreenode(h, k, null); }
/** * returns the treenode (or null if not found) for the given key * starting at given root. */ final treenode<k,v> findtreenode(int h, object k, class<?> kc) { if (k != null) { treenode<k,v> p = this; do { int ph, dir; k pk; treenode<k,v> q; treenode<k,v> pl = p.left, pr = p.right; if ((ph = p.hash) > h) p = pl; else if (ph < h) p = pr; else if ((pk = p.key) == k || (pk != null && k.equals(pk))) return p; else if (pl == null) p = pr; else if (pr == null) p = pl; else if ((kc != null || (kc = comparableclassfor(k)) != null) && (dir = comparecomparables(kc, k, pk)) != 0) p = (dir < 0) ? pl : pr; else if ((q = pr.findtreenode(h, k, kc)) != null) return q; else p = pl; } while (p != null); } return null; } }
/* ---------------- treebins -------------- */
/** * treenodes used at the heads of bins. treebins do not hold user * keys or values, but instead point to list of treenodes and * their root. they also maintain a parasitic read-write lock * forcing writers (who hold bin lock) to wait for readers (who do * not) to complete before tree restructuring operations. */ static final class treebin<k,v> extends node<k,v> { treenode<k,v> root; volatile treenode<k,v> first; volatile thread waiter; volatile int lockstate; // values for lockstate static final int writer = 1; // set while holding write lock static final int waiter = 2; // set when waiting for write lock static final int reader = 4; // increment value for setting read lock
/** * tie-breaking utility for ordering insertions when equal * hashcodes and non-comparable. we don't require a total * order, just a consistent insertion rule to maintain * equivalence across rebalancings. tie-breaking further than * necessary simplifies testing a bit. */ static int tiebreakorder(object a, object b) { int d; if (a == null || b == null || (d = a.getclass().getname(). compareto(b.getclass().getname())) == 0) d = (system.identityhashcode(a) <= system.identityhashcode(b) ? -1 : 1); return d; }
/** * creates bin with initial set of nodes headed by b. */ treebin(treenode<k,v> b) { super(treebin, null, null, null); this.first = b; treenode<k,v> r = null; for (treenode<k,v> x = b, next; x != null; x = next) { next = (treenode<k,v>)x.next; x.left = x.right = null; if (r == null) { x.parent = null; x.red = false; r = x; } else { k k = x.key; int h = x.hash; class<?> kc = null; for (treenode<k,v> p = r;;) { int dir, ph; k pk = p.key; if ((ph = p.hash) > h) dir = -1; else if (ph < h) dir = 1; else if ((kc == null && (kc = comparableclassfor(k)) == null) || (dir = comparecomparables(kc, k, pk)) == 0) dir = tiebreakorder(k, pk); treenode<k,v> xp = p; if ((p = (dir <= 0) ? p.left : p.right) == null) { x.parent = xp; if (dir <= 0) xp.left = x; else xp.right = x; r = balanceinsertion(r, x); break; } } } } this.root = r; assert checkinvariants(root); }
/** * acquires write lock for tree restructuring. */ private final void lockroot() { if (!u.compareandswapint(this, lockstate, 0, writer)) contendedlock(); // offload to separate method }
/** * releases write lock for tree restructuring. */ private final void unlockroot() { lockstate = 0; }
/** * possibly blocks awaiting root lock. */ private final void contendedlock() { boolean waiting = false; for (int s;;) { if (((s = lockstate) & ~waiter) == 0) { if (u.compareandswapint(this, lockstate, s, writer)) { if (waiting) waiter = null; return; } } else if ((s & waiter) == 0) { if (u.compareandswapint(this, lockstate, s, s | waiter)) { waiting = true; waiter = thread.currentthread(); } } else if (waiting) locksupport.park(this); } }
/** * returns matching node or null if none. tries to search * using tree comparisons from root, but continues linear * search when lock not available. */ final node<k,v> find(int h, object k) { if (k != null) { for (node<k,v> e = first; e != null; ) { int s; k ek; if (((s = lockstate) & (waiter|writer)) != 0) { if (e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) return e; e = e.next; } else if (u.compareandswapint(this, lockstate, s, s + reader)) { treenode<k,v> r, p; try { p = ((r = root) == null ? null : r.findtreenode(h, k, null)); } finally { thread w; if (u.getandaddint(this, lockstate, -reader) == (reader|waiter) && (w = waiter) != null) locksupport.unpark(w); } return p; } } } return null; }
/** * finds or adds a node. * @return null if added */ final treenode<k,v> puttreeval(int h, k k, v v) { class<?> kc = null; boolean searched = false; for (treenode<k,v> p = root;;) { int dir, ph; k pk; if (p == null) { first = root = new treenode<k,v>(h, k, v, null, null); break; } else if ((ph = p.hash) > h) dir = -1; else if (ph < h) dir = 1; else if ((pk = p.key) == k || (pk != null && k.equals(pk))) return p; else if ((kc == null && (kc = comparableclassfor(k)) == null) || (dir = comparecomparables(kc, k, pk)) == 0) { if (!searched) { treenode<k,v> q, ch; searched = true; if (((ch = p.left) != null && (q = ch.findtreenode(h, k, kc)) != null) || ((ch = p.right) != null && (q = ch.findtreenode(h, k, kc)) != null)) return q; } dir = tiebreakorder(k, pk); }
treenode<k,v> xp = p; if ((p = (dir <= 0) ? p.left : p.right) == null) { treenode<k,v> x, f = first; first = x = new treenode<k,v>(h, k, v, f, xp); if (f != null) f.prev = x; if (dir <= 0) xp.left = x; else xp.right = x; if (!xp.red) x.red = true; else { lockroot(); try { root = balanceinsertion(root, x); } finally { unlockroot(); } } break; } } assert checkinvariants(root); return null; }
/** * removes the given node, that must be present before this * call. this is messier than typical red-black deletion code * because we cannot swap the contents of an interior node * with a leaf successor that is pinned by "next" pointers * that are accessible independently of lock. so instead we * swap the tree linkages. * * @return true if now too small, so should be untreeified */ final boolean removetreenode(treenode<k,v> p) { treenode<k,v> next = (treenode<k,v>)p.next; treenode<k,v> pred = p.prev; // unlink traversal pointers treenode<k,v> r, rl; if (pred == null) first = next; else pred.next = next; if (next != null) next.prev = pred; if (first == null) { root = null; return true; } if ((r = root) == null || r.right == null || // too small (rl = r.left) == null || rl.left == null) return true; lockroot(); try { treenode<k,v> replacement; treenode<k,v> pl = p.left; treenode<k,v> pr = p.right; if (pl != null && pr != null) { treenode<k,v> s = pr, sl; while ((sl = s.left) != null) // find successor s = sl; boolean c = s.red; s.red = p.red; p.red = c; // swap colors treenode<k,v> sr = s.right; treenode<k,v> pp = p.parent; if (s == pr) { // p was s's direct parent p.parent = s; s.right = p; } else { treenode<k,v> sp = s.parent; if ((p.parent = sp) != null) { if (s == sp.left) sp.left = p; else sp.right = p; } if ((s.right = pr) != null) pr.parent = s; } p.left = null; if ((p.right = sr) != null) sr.parent = p; if ((s.left = pl) != null) pl.parent = s; if ((s.parent = pp) == null) r = s; else if (p == pp.left) pp.left = s; else pp.right = s; if (sr != null) replacement = sr; else replacement = p; } else if (pl != null) replacement = pl; else if (pr != null) replacement = pr; else replacement = p; if (replacement != p) { treenode<k,v> pp = replacement.parent = p.parent; if (pp == null) r = replacement; else if (p == pp.left) pp.left = replacement; else pp.right = replacement; p.left = p.right = p.parent = null; }
root = (p.red) ? r : balancedeletion(r, replacement);
if (p == replacement) { // detach pointers treenode<k,v> pp; if ((pp = p.parent) != null) { if (p == pp.left) pp.left = null; else if (p == pp.right) pp.right = null; p.parent = null; } } } finally { unlockroot(); } assert checkinvariants(root); return false; }
/* ------------------------------------------------------------ */ // red-black tree methods, all adapted from clr
static <k,v> treenode<k,v> rotateleft(treenode<k,v> root, treenode<k,v> p) { treenode<k,v> r, pp, rl; if (p != null && (r = p.right) != null) { if ((rl = p.right = r.left) != null) rl.parent = p; if ((pp = r.parent = p.parent) == null) (root = r).red = false; else if (pp.left == p) pp.left = r; else pp.right = r; r.left = p; p.parent = r; } return root; }
static <k,v> treenode<k,v> rotateright(treenode<k,v> root, treenode<k,v> p) { treenode<k,v> l, pp, lr; if (p != null && (l = p.left) != null) { if ((lr = p.left = l.right) != null) lr.parent = p; if ((pp = l.parent = p.parent) == null) (root = l).red = false; else if (pp.right == p) pp.right = l; else pp.left = l; l.right = p; p.parent = l; } return root; }
static <k,v> treenode<k,v> balanceinsertion(treenode<k,v> root, treenode<k,v> x) { x.red = true; for (treenode<k,v> xp, xpp, xppl, xppr;;) { if ((xp = x.parent) == null) { x.red = false; return x; } else if (!xp.red || (xpp = xp.parent) == null) return root; if (xp == (xppl = xpp.left)) { if ((xppr = xpp.right) != null && xppr.red) { xppr.red = false; xp.red = false; xpp.red = true; x = xpp; } else { if (x == xp.right) { root = rotateleft(root, x = xp); xpp = (xp = x.parent) == null ? null : xp.parent; } if (xp != null) { xp.red = false; if (xpp != null) { xpp.red = true; root = rotateright(root, xpp); } } } } else { if (xppl != null && xppl.red) { xppl.red = false; xp.red = false; xpp.red = true; x = xpp; } else { if (x == xp.left) { root = rotateright(root, x = xp); xpp = (xp = x.parent) == null ? null : xp.parent; } if (xp != null) { xp.red = false; if (xpp != null) { xpp.red = true; root = rotateleft(root, xpp); } } } } } }
static <k,v> treenode<k,v> balancedeletion(treenode<k,v> root, treenode<k,v> x) { for (treenode<k,v> xp, xpl, xpr;;) { if (x == null || x == root) return root; else if ((xp = x.parent) == null) { x.red = false; return x; } else if (x.red) { x.red = false; return root; } else if ((xpl = xp.left) == x) { if ((xpr = xp.right) != null && xpr.red) { xpr.red = false; xp.red = true; root = rotateleft(root, xp); xpr = (xp = x.parent) == null ? null : xp.right; } if (xpr == null) x = xp; else { treenode<k,v> sl = xpr.left, sr = xpr.right; if ((sr == null || !sr.red) && (sl == null || !sl.red)) { xpr.red = true; x = xp; } else { if (sr == null || !sr.red) { if (sl != null) sl.red = false; xpr.red = true; root = rotateright(root, xpr); xpr = (xp = x.parent) == null ? null : xp.right; } if (xpr != null) { xpr.red = (xp == null) ? false : xp.red; if ((sr = xpr.right) != null) sr.red = false; } if (xp != null) { xp.red = false; root = rotateleft(root, xp); } x = root; } } } else { // symmetric if (xpl != null && xpl.red) { xpl.red = false; xp.red = true; root = rotateright(root, xp); xpl = (xp = x.parent) == null ? null : xp.left; } if (xpl == null) x = xp; else { treenode<k,v> sl = xpl.left, sr = xpl.right; if ((sl == null || !sl.red) && (sr == null || !sr.red)) { xpl.red = true; x = xp; } else { if (sl == null || !sl.red) { if (sr != null) sr.red = false; xpl.red = true; root = rotateleft(root, xpl); xpl = (xp = x.parent) == null ? null : xp.left; } if (xpl != null) { xpl.red = (xp == null) ? false : xp.red; if ((sl = xpl.left) != null) sl.red = false; } if (xp != null) { xp.red = false; root = rotateright(root, xp); } x = root; } } } } }
/** * recursive invariant check */ static <k,v> boolean checkinvariants(treenode<k,v> t) { treenode<k,v> tp = t.parent, tl = t.left, tr = t.right, tb = t.prev, tn = (treenode<k,v>)t.next; if (tb != null && tb.next != t) return false; if (tn != null && tn.prev != t) return false; if (tp != null && t != tp.left && t != tp.right) return false; if (tl != null && (tl.parent != t || tl.hash > t.hash)) return false; if (tr != null && (tr.parent != t || tr.hash < t.hash)) return false; if (t.red && tl != null && tl.red && tr != null && tr.red) return false; if (tl != null && !checkinvariants(tl)) return false; if (tr != null && !checkinvariants(tr)) return false; return true; }
private static final sun.misc.unsafe u; private static final long lockstate; static { try { u = sun.misc.unsafe.getunsafe(); class<?> k = treebin.class; lockstate = u.objectfieldoffset (k.getdeclaredfield("lockstate")); } catch (exception e) { throw new error(e); } } }
/* ----------------table traversal -------------- */
/** * records the table, its length, and current traversal index for a * traverser that must process a region of a forwarded table before * proceeding with current table. */ static final class tablestack<k,v> { int length; int index; node<k,v>[] tab; tablestack<k,v> next; }
/** * encapsulates traversal for methods such as containsvalue; also * serves as a base class for other iterators and spliterators. * * method advance visits once each still-valid node that was * reachable upon iterator construction. it might miss some that * were added to a bin after the bin was visited, which is ok wrt * consistency guarantees. maintaining this property in the face * of possible ongoing resizes requires a fair amount of * bookkeeping state that is difficult to optimize away amidst * volatile accesses. even so, traversal maintains reasonable * throughput. * * normally, iteration proceeds bin-by-bin traversing lists. * however, if the table has been resized, then all future steps * must traverse both the bin at the current index as well as at * (index + basesize); and so on for further resizings. to * paranoically cope with potential sharing by users of iterators * across threads, iteration terminates if a bounds checks fails * for a table read. */ static class traverser<k,v> { node<k,v>[] tab; // current table; updated if resized node<k,v> next; // the next entry to use tablestack<k,v> stack, spare; // to save/restore on forwardingnodes int index; // index of bin to use next int baseindex; // current index of initial table int baselimit; // index bound for initial table final int basesize; // initial table size
traverser(node<k,v>[] tab, int size, int index, int limit) { this.tab = tab; this.basesize = size; this.baseindex = this.index = index; this.baselimit = limit; this.next = null; }
/** * advances if possible, returning next valid node, or null if none. */ final node<k,v> advance() { node<k,v> e; if ((e = next) != null) e = e.next; for (;;) { node<k,v>[] t; int i, n; // must use locals in checks if (e != null) return next = e; if (baseindex >= baselimit || (t = tab) == null || (n = t.length) <= (i = index) || i < 0) return next = null; if ((e = tabat(t, i)) != null && e.hash < 0) { if (e instanceof forwardingnode) { tab = ((forwardingnode<k,v>)e).nexttable; e = null; pushstate(t, i, n); continue; } else if (e instanceof treebin) e = ((treebin<k,v>)e).first; else e = null; } if (stack != null) recoverstate(n); else if ((index = i + basesize) >= n) index = ++baseindex; // visit upper slots if present } }
/** * saves traversal state upon encountering a forwarding node. */ private void pushstate(node<k,v>[] t, int i, int n) { tablestack<k,v> s = spare; // reuse if possible if (s != null) spare = s.next; else s = new tablestack<k,v>(); s.tab = t; s.length = n; s.index = i; s.next = stack; stack = s; }
/** * possibly pops traversal state. * * @param n length of current table */ private void recoverstate(int n) { tablestack<k,v> s; int len; while ((s = stack) != null && (index += (len = s.length)) >= n) { n = len; index = s.index; tab = s.tab; s.tab = null; tablestack<k,v> next = s.next; s.next = spare; // save for reuse stack = next; spare = s; } if (s == null && (index += basesize) >= n) index = ++baseindex; } }
/** * base of key, value, and entry iterators. adds fields to * traverser to support iterator.remove. */ static class baseiterator<k,v> extends traverser<k,v> { final concurrenthashmap<k,v> map; node<k,v> lastreturned; baseiterator(node<k,v>[] tab, int size, int index, int limit, concurrenthashmap<k,v> map) { super(tab, size, index, limit); this.map = map; advance(); }
public final boolean hasnext() { return next != null; } public final boolean hasmoreelements() { return next != null; }
public final void remove() { node<k,v> p; if ((p = lastreturned) == null) throw new illegalstateexception(); lastreturned = null; map.replacenode(p.key, null, null); } }
static final class keyiterator<k,v> extends baseiterator<k,v> implements iterator<k>, enumeration<k> { keyiterator(node<k,v>[] tab, int index, int size, int limit, concurrenthashmap<k,v> map) { super(tab, index, size, limit, map); }
public final k next() { node<k,v> p; if ((p = next) == null) throw new nosuchelementexception(); k k = p.key; lastreturned = p; advance(); return k; }
public final k nextelement() { return next(); } }
static final class valueiterator<k,v> extends baseiterator<k,v> implements iterator<v>, enumeration<v> { valueiterator(node<k,v>[] tab, int index, int size, int limit, concurrenthashmap<k,v> map) { super(tab, index, size, limit, map); }
public final v next() { node<k,v> p; if ((p = next) == null) throw new nosuchelementexception(); v v = p.val; lastreturned = p; advance(); return v; }
public final v nextelement() { return next(); } }
static final class entryiterator<k,v> extends baseiterator<k,v> implements iterator<map.entry<k,v>> { entryiterator(node<k,v>[] tab, int index, int size, int limit, concurrenthashmap<k,v> map) { super(tab, index, size, limit, map); }
public final map.entry<k,v> next() { node<k,v> p; if ((p = next) == null) throw new nosuchelementexception(); k k = p.key; v v = p.val; lastreturned = p; advance(); return new mapentry<k,v>(k, v, map); } }
/** * exported entry for entryiterator */ static final class mapentry<k,v> implements map.entry<k,v> { final k key; // non-null v val; // non-null final concurrenthashmap<k,v> map; mapentry(k key, v val, concurrenthashmap<k,v> map) { this.key = key; this.val = val; this.map = map; } public k getkey() { return key; } public v getvalue() { return val; } public int hashcode() { return key.hashcode() ^ val.hashcode(); } public string tostring() { return key + "=" + val; }
public boolean equals(object o) { object k, v; map.entry<?,?> e; return ((o instanceof map.entry) && (k = (e = (map.entry<?,?>)o).getkey()) != null && (v = e.getvalue()) != null && (k == key || k.equals(key)) && (v == val || v.equals(val))); }
/** * sets our entry's value and writes through to the map. the * value to return is somewhat arbitrary here. since we do not * necessarily track asynchronous changes, the most recent * "previous" value could be different from what we return (or * could even have been removed, in which case the put will * re-establish). we do not and cannot guarantee more. */ public v setvalue(v value) { if (value == null) throw new nullpointerexception(); v v = val; val = value; map.put(key, value); return v; } }
static final class keyspliterator<k,v> extends traverser<k,v> implements spliterator<k> { long est; // size estimate keyspliterator(node<k,v>[] tab, int size, int index, int limit, long est) { super(tab, size, index, limit); this.est = est; }
public spliterator<k> trysplit() { int i, f, h; return (h = ((i = baseindex) + (f = baselimit)) >>> 1) <= i ? null : new keyspliterator<k,v>(tab, basesize, baselimit = h, f, est >>>= 1); }
public void foreachremaining(consumer<? super k> action) { if (action == null) throw new nullpointerexception(); for (node<k,v> p; (p = advance()) != null;) action.accept(p.key); }
public boolean tryadvance(consumer<? super k> action) { if (action == null) throw new nullpointerexception(); node<k,v> p; if ((p = advance()) == null) return false; action.accept(p.key); return true; }
public long estimatesize() { return est; }
public int characteristics() { return spliterator.distinct | spliterator.concurrent | spliterator.nonnull; } }
static final class valuespliterator<k,v> extends traverser<k,v> implements spliterator<v> { long est; // size estimate valuespliterator(node<k,v>[] tab, int size, int index, int limit, long est) { super(tab, size, index, limit); this.est = est; }
public spliterator<v> trysplit() { int i, f, h; return (h = ((i = baseindex) + (f = baselimit)) >>> 1) <= i ? null : new valuespliterator<k,v>(tab, basesize, baselimit = h, f, est >>>= 1); }
public void foreachremaining(consumer<? super v> action) { if (action == null) throw new nullpointerexception(); for (node<k,v> p; (p = advance()) != null;) action.accept(p.val); }
public boolean tryadvance(consumer<? super v> action) { if (action == null) throw new nullpointerexception(); node<k,v> p; if ((p = advance()) == null) return false; action.accept(p.val); return true; }
public long estimatesize() { return est; }
public int characteristics() { return spliterator.concurrent | spliterator.nonnull; } }
static final class entryspliterator<k,v> extends traverser<k,v> implements spliterator<map.entry<k,v>> { final concurrenthashmap<k,v> map; // to export mapentry long est; // size estimate entryspliterator(node<k,v>[] tab, int size, int index, int limit, long est, concurrenthashmap<k,v> map) { super(tab, size, index, limit); this.map = map; this.est = est; }
public spliterator<map.entry<k,v>> trysplit() { int i, f, h; return (h = ((i = baseindex) + (f = baselimit)) >>> 1) <= i ? null : new entryspliterator<k,v>(tab, basesize, baselimit = h, f, est >>>= 1, map); }
public void foreachremaining(consumer<? super map.entry<k,v>> action) { if (action == null) throw new nullpointerexception(); for (node<k,v> p; (p = advance()) != null; ) action.accept(new mapentry<k,v>(p.key, p.val, map)); }
public boolean tryadvance(consumer<? super map.entry<k,v>> action) { if (action == null) throw new nullpointerexception(); node<k,v> p; if ((p = advance()) == null) return false; action.accept(new mapentry<k,v>(p.key, p.val, map)); return true; }
public long estimatesize() { return est; }
public int characteristics() { return spliterator.distinct | spliterator.concurrent | spliterator.nonnull; } }
// parallel bulk operations
/** * computes initial batch value for bulk tasks. the returned value * is approximately exp2 of the number of times (minus one) to * split task by two before executing leaf action. this value is * faster to compute and more convenient to use as a guide to * splitting than is the depth, since it is used while dividing by * two anyway. */ final int batchfor(long b) { long n; if (b == long.max_value || (n = sumcount()) <= 1l || n < b) return 0; int sp = forkjoinpool.getcommonpoolparallelism() << 2; // slack of 4 return (b <= 0l || (n /= b) >= sp) ? sp : (int)n; }
/** * performs the given action for each (key, value). * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param action the action * @since 1.8 */ public void foreach(long parallelismthreshold, biconsumer<? super k,? super v> action) { if (action == null) throw new nullpointerexception(); new foreachmappingtask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, action).invoke(); }
/** * performs the given action for each non-null transformation * of each (key, value). * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case the action is not applied) * @param action the action * @param <u> the return type of the transformer * @since 1.8 */ public <u> void foreach(long parallelismthreshold, bifunction<? super k, ? super v, ? extends u> transformer, consumer<? super u> action) { if (transformer == null || action == null) throw new nullpointerexception(); new foreachtransformedmappingtask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, transformer, action).invoke(); }
/** * returns a non-null result from applying the given search * function on each (key, value), or null if none. upon * success, further element processing is suppressed and the * results of any other parallel invocations of the search * function are ignored. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param searchfunction a function returning a non-null * result on success, else null * @param <u> the return type of the search function * @return a non-null result from applying the given search * function on each (key, value), or null if none * @since 1.8 */ public <u> u search(long parallelismthreshold, bifunction<? super k, ? super v, ? extends u> searchfunction) { if (searchfunction == null) throw new nullpointerexception(); return new searchmappingstask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, searchfunction, new atomicreference<u>()).invoke(); }
/** * returns the result of accumulating the given transformation * of all (key, value) pairs using the given reducer to * combine values, or null if none. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case it is not combined) * @param reducer a commutative associative combining function * @param <u> the return type of the transformer * @return the result of accumulating the given transformation * of all (key, value) pairs * @since 1.8 */ public <u> u reduce(long parallelismthreshold, bifunction<? super k, ? super v, ? extends u> transformer, bifunction<? super u, ? super u, ? extends u> reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducemappingstask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all (key, value) pairs using the given reducer to * combine values, and the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all (key, value) pairs * @since 1.8 */ public double reducetodouble(long parallelismthreshold, todoublebifunction<? super k, ? super v> transformer, double basis, doublebinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducemappingstodoubletask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all (key, value) pairs using the given reducer to * combine values, and the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all (key, value) pairs * @since 1.8 */ public long reducetolong(long parallelismthreshold, tolongbifunction<? super k, ? super v> transformer, long basis, longbinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducemappingstolongtask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all (key, value) pairs using the given reducer to * combine values, and the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all (key, value) pairs * @since 1.8 */ public int reducetoint(long parallelismthreshold, tointbifunction<? super k, ? super v> transformer, int basis, intbinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducemappingstointtask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * performs the given action for each key. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param action the action * @since 1.8 */ public void foreachkey(long parallelismthreshold, consumer<? super k> action) { if (action == null) throw new nullpointerexception(); new foreachkeytask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, action).invoke(); }
/** * performs the given action for each non-null transformation * of each key. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case the action is not applied) * @param action the action * @param <u> the return type of the transformer * @since 1.8 */ public <u> void foreachkey(long parallelismthreshold, function<? super k, ? extends u> transformer, consumer<? super u> action) { if (transformer == null || action == null) throw new nullpointerexception(); new foreachtransformedkeytask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, transformer, action).invoke(); }
/** * returns a non-null result from applying the given search * function on each key, or null if none. upon success, * further element processing is suppressed and the results of * any other parallel invocations of the search function are * ignored. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param searchfunction a function returning a non-null * result on success, else null * @param <u> the return type of the search function * @return a non-null result from applying the given search * function on each key, or null if none * @since 1.8 */ public <u> u searchkeys(long parallelismthreshold, function<? super k, ? extends u> searchfunction) { if (searchfunction == null) throw new nullpointerexception(); return new searchkeystask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, searchfunction, new atomicreference<u>()).invoke(); }
/** * returns the result of accumulating all keys using the given * reducer to combine values, or null if none. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param reducer a commutative associative combining function * @return the result of accumulating all keys using the given * reducer to combine values, or null if none * @since 1.8 */ public k reducekeys(long parallelismthreshold, bifunction<? super k, ? super k, ? extends k> reducer) { if (reducer == null) throw new nullpointerexception(); return new reducekeystask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all keys using the given reducer to combine values, or * null if none. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case it is not combined) * @param reducer a commutative associative combining function * @param <u> the return type of the transformer * @return the result of accumulating the given transformation * of all keys * @since 1.8 */ public <u> u reducekeys(long parallelismthreshold, function<? super k, ? extends u> transformer, bifunction<? super u, ? super u, ? extends u> reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducekeystask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all keys using the given reducer to combine values, and * the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all keys * @since 1.8 */ public double reducekeystodouble(long parallelismthreshold, todoublefunction<? super k> transformer, double basis, doublebinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducekeystodoubletask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all keys using the given reducer to combine values, and * the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all keys * @since 1.8 */ public long reducekeystolong(long parallelismthreshold, tolongfunction<? super k> transformer, long basis, longbinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducekeystolongtask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all keys using the given reducer to combine values, and * the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all keys * @since 1.8 */ public int reducekeystoint(long parallelismthreshold, tointfunction<? super k> transformer, int basis, intbinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducekeystointtask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * performs the given action for each value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param action the action * @since 1.8 */ public void foreachvalue(long parallelismthreshold, consumer<? super v> action) { if (action == null) throw new nullpointerexception(); new foreachvaluetask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, action).invoke(); }
/** * performs the given action for each non-null transformation * of each value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case the action is not applied) * @param action the action * @param <u> the return type of the transformer * @since 1.8 */ public <u> void foreachvalue(long parallelismthreshold, function<? super v, ? extends u> transformer, consumer<? super u> action) { if (transformer == null || action == null) throw new nullpointerexception(); new foreachtransformedvaluetask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, transformer, action).invoke(); }
/** * returns a non-null result from applying the given search * function on each value, or null if none. upon success, * further element processing is suppressed and the results of * any other parallel invocations of the search function are * ignored. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param searchfunction a function returning a non-null * result on success, else null * @param <u> the return type of the search function * @return a non-null result from applying the given search * function on each value, or null if none * @since 1.8 */ public <u> u searchvalues(long parallelismthreshold, function<? super v, ? extends u> searchfunction) { if (searchfunction == null) throw new nullpointerexception(); return new searchvaluestask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, searchfunction, new atomicreference<u>()).invoke(); }
/** * returns the result of accumulating all values using the * given reducer to combine values, or null if none. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param reducer a commutative associative combining function * @return the result of accumulating all values * @since 1.8 */ public v reducevalues(long parallelismthreshold, bifunction<? super v, ? super v, ? extends v> reducer) { if (reducer == null) throw new nullpointerexception(); return new reducevaluestask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all values using the given reducer to combine values, or * null if none. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case it is not combined) * @param reducer a commutative associative combining function * @param <u> the return type of the transformer * @return the result of accumulating the given transformation * of all values * @since 1.8 */ public <u> u reducevalues(long parallelismthreshold, function<? super v, ? extends u> transformer, bifunction<? super u, ? super u, ? extends u> reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducevaluestask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all values using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all values * @since 1.8 */ public double reducevaluestodouble(long parallelismthreshold, todoublefunction<? super v> transformer, double basis, doublebinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducevaluestodoubletask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all values using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all values * @since 1.8 */ public long reducevaluestolong(long parallelismthreshold, tolongfunction<? super v> transformer, long basis, longbinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducevaluestolongtask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all values using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all values * @since 1.8 */ public int reducevaluestoint(long parallelismthreshold, tointfunction<? super v> transformer, int basis, intbinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreducevaluestointtask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * performs the given action for each entry. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param action the action * @since 1.8 */ public void foreachentry(long parallelismthreshold, consumer<? super map.entry<k,v>> action) { if (action == null) throw new nullpointerexception(); new foreachentrytask<k,v>(null, batchfor(parallelismthreshold), 0, 0, table, action).invoke(); }
/** * performs the given action for each non-null transformation * of each entry. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case the action is not applied) * @param action the action * @param <u> the return type of the transformer * @since 1.8 */ public <u> void foreachentry(long parallelismthreshold, function<map.entry<k,v>, ? extends u> transformer, consumer<? super u> action) { if (transformer == null || action == null) throw new nullpointerexception(); new foreachtransformedentrytask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, transformer, action).invoke(); }
/** * returns a non-null result from applying the given search * function on each entry, or null if none. upon success, * further element processing is suppressed and the results of * any other parallel invocations of the search function are * ignored. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param searchfunction a function returning a non-null * result on success, else null * @param <u> the return type of the search function * @return a non-null result from applying the given search * function on each entry, or null if none * @since 1.8 */ public <u> u searchentries(long parallelismthreshold, function<map.entry<k,v>, ? extends u> searchfunction) { if (searchfunction == null) throw new nullpointerexception(); return new searchentriestask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, searchfunction, new atomicreference<u>()).invoke(); }
/** * returns the result of accumulating all entries using the * given reducer to combine values, or null if none. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param reducer a commutative associative combining function * @return the result of accumulating all entries * @since 1.8 */ public map.entry<k,v> reduceentries(long parallelismthreshold, bifunction<map.entry<k,v>, map.entry<k,v>, ? extends map.entry<k,v>> reducer) { if (reducer == null) throw new nullpointerexception(); return new reduceentriestask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all entries using the given reducer to combine values, * or null if none. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element, or null if there is no transformation (in * which case it is not combined) * @param reducer a commutative associative combining function * @param <u> the return type of the transformer * @return the result of accumulating the given transformation * of all entries * @since 1.8 */ public <u> u reduceentries(long parallelismthreshold, function<map.entry<k,v>, ? extends u> transformer, bifunction<? super u, ? super u, ? extends u> reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreduceentriestask<k,v,u> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all entries using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all entries * @since 1.8 */ public double reduceentriestodouble(long parallelismthreshold, todoublefunction<map.entry<k,v>> transformer, double basis, doublebinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreduceentriestodoubletask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all entries using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all entries * @since 1.8 */ public long reduceentriestolong(long parallelismthreshold, tolongfunction<map.entry<k,v>> transformer, long basis, longbinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreduceentriestolongtask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/** * returns the result of accumulating the given transformation * of all entries using the given reducer to combine values, * and the given basis as an identity value. * * @param parallelismthreshold the (estimated) number of elements * needed for this operation to be executed in parallel * @param transformer a function returning the transformation * for an element * @param basis the identity (initial default value) for the reduction * @param reducer a commutative associative combining function * @return the result of accumulating the given transformation * of all entries * @since 1.8 */ public int reduceentriestoint(long parallelismthreshold, tointfunction<map.entry<k,v>> transformer, int basis, intbinaryoperator reducer) { if (transformer == null || reducer == null) throw new nullpointerexception(); return new mapreduceentriestointtask<k,v> (null, batchfor(parallelismthreshold), 0, 0, table, null, transformer, basis, reducer).invoke(); }
/* ----------------views -------------- */
/** * base class for views. */ abstract static class collectionview<k,v,e> implements collection<e>, java.io.serializable { private static final long serialversionuid = 7249069246763182397l; final concurrenthashmap<k,v> map; collectionview(concurrenthashmap<k,v> map) { this.map = map; }
/** * returns the map backing this view. * * @return the map backing this view */ public concurrenthashmap<k,v> getmap() { return map; }
/** * removes all of the elements from this view, by removing all * the mappings from the map backing this view. */ public final void clear() { map.clear(); } public final int size() { return map.size(); } public final boolean isempty() { return map.isempty(); }
// implementations below rely on concrete classes supplying these // abstract methods /** * returns an iterator over the elements in this collection. * * <p>the returned iterator is * <a href="package-summary.html#weakly"><i>weakly consistent</i></a>. * * @return an iterator over the elements in this collection */ public abstract iterator<e> iterator(); public abstract boolean contains(object o); public abstract boolean remove(object o);
private static final string oomemsg = "required array size too large";
public final object[] toarray() { long sz = map.mappingcount(); if (sz > max_array_size) throw new outofmemoryerror(oomemsg); int n = (int)sz; object[] r = new object[n]; int i = 0; for (e e : this) { if (i == n) { if (n >= max_array_size) throw new outofmemoryerror(oomemsg); if (n >= max_array_size - (max_array_size >>> 1) - 1) n = max_array_size; else n += (n >>> 1) + 1; r = arrays.copyof(r, n); } r[i++] = e; } return (i == n) ? r : arrays.copyof(r, i); }
@suppresswarnings("unchecked") public final <t> t[] toarray(t[] a) { long sz = map.mappingcount(); if (sz > max_array_size) throw new outofmemoryerror(oomemsg); int m = (int)sz; t[] r = (a.length >= m) ? a : (t[])java.lang.reflect.array .newinstance(a.getclass().getcomponenttype(), m); int n = r.length; int i = 0; for (e e : this) { if (i == n) { if (n >= max_array_size) throw new outofmemoryerror(oomemsg); if (n >= max_array_size - (max_array_size >>> 1) - 1) n = max_array_size; else n += (n >>> 1) + 1; r = arrays.copyof(r, n); } r[i++] = (t)e; } if (a == r && i < n) { r[i] = null; // null-terminate return r; } return (i == n) ? r : arrays.copyof(r, i); }
/** * returns a string representation of this collection. * the string representation consists of the string representations * of the collection's elements in the order they are returned by * its iterator, enclosed in square brackets ({@code "[]"}). * adjacent elements are separated by the characters {@code ", "} * (comma and space). elements are converted to strings as by * {@link string#valueof(object)}. * * @return a string representation of this collection */ public final string tostring() { stringbuilder sb = new stringbuilder(); sb.append('['); iterator<e> it = iterator(); if (it.hasnext()) { for (;;) { object e = it.next(); sb.append(e == this ? "(this collection)" : e); if (!it.hasnext()) break; sb.append(',').append(' '); } } return sb.append(']').tostring(); }
public final boolean containsall(collection<?> c) { if (c != this) { for (object e : c) { if (e == null || !contains(e)) return false; } } return true; }
public final boolean removeall(collection<?> c) { if (c == null) throw new nullpointerexception(); boolean modified = false; for (iterator<e> it = iterator(); it.hasnext();) { if (c.contains(it.next())) { it.remove(); modified = true; } } return modified; }
public final boolean retainall(collection<?> c) { if (c == null) throw new nullpointerexception(); boolean modified = false; for (iterator<e> it = iterator(); it.hasnext();) { if (!c.contains(it.next())) { it.remove(); modified = true; } } return modified; }
}
/** * a view of a concurrenthashmap as a {@link set} of keys, in * which additions may optionally be enabled by mapping to a * common value. this class cannot be directly instantiated. * see {@link #keyset() keyset()}, * {@link #keyset(object) keyset(v)}, * {@link #newkeyset() newkeyset()}, * {@link #newkeyset(int) newkeyset(int)}. * * @since 1.8 */ public static class keysetview<k,v> extends collectionview<k,v,k> implements set<k>, java.io.serializable { private static final long serialversionuid = 7249069246763182397l; private final v value; keysetview(concurrenthashmap<k,v> map, v value) { // non-public super(map); this.value = value; }
/** * returns the default mapped value for additions, * or {@code null} if additions are not supported. * * @return the default mapped value for additions, or {@code null} * if not supported */ public v getmappedvalue() { return value; }
/** * {@inheritdoc} * @throws nullpointerexception if the specified key is null */ public boolean contains(object o) { return map.containskey(o); }
/** * removes the key from this map view, by removing the key (and its * corresponding value) from the backing map. this method does * nothing if the key is not in the map. * * @param o the key to be removed from the backing map * @return {@code true} if the backing map contained the specified key * @throws nullpointerexception if the specified key is null */ public boolean remove(object o) { return map.remove(o) != null; }
/** * @return an iterator over the keys of the backing map */ public iterator<k> iterator() { node<k,v>[] t; concurrenthashmap<k,v> m = map; int f = (t = m.table) == null ? 0 : t.length; return new keyiterator<k,v>(t, f, 0, f, m); }
/** * adds the specified key to this set view by mapping the key to * the default mapped value in the backing map, if defined. * * @param e key to be added * @return {@code true} if this set changed as a result of the call * @throws nullpointerexception if the specified key is null * @throws unsupportedoperationexception if no default mapped value * for additions was provided */ public boolean add(k e) { v v; if ((v = value) == null) throw new unsupportedoperationexception(); return map.putval(e, v, true) == null; }
/** * adds all of the elements in the specified collection to this set, * as if by calling {@link #add} on each one. * * @param c the elements to be inserted into this set * @return {@code true} if this set changed as a result of the call * @throws nullpointerexception if the collection or any of its * elements are {@code null} * @throws unsupportedoperationexception if no default mapped value * for additions was provided */ public boolean addall(collection<? extends k> c) { boolean added = false; v v; if ((v = value) == null) throw new unsupportedoperationexception(); for (k e : c) { if (map.putval(e, v, true) == null) added = true; } return added; }
public int hashcode() { int h = 0; for (k e : this) h += e.hashcode(); return h; }
public boolean equals(object o) { set<?> c; return ((o instanceof set) && ((c = (set<?>)o) == this || (containsall(c) && c.containsall(this)))); }
public spliterator<k> spliterator() { node<k,v>[] t; concurrenthashmap<k,v> m = map; long n = m.sumcount(); int f = (t = m.table) == null ? 0 : t.length; return new keyspliterator<k,v>(t, f, 0, f, n < 0l ? 0l : n); }
public void foreach(consumer<? super k> action) { if (action == null) throw new nullpointerexception(); node<k,v>[] t; if ((t = map.table) != null) { traverser<k,v> it = new traverser<k,v>(t, t.length, 0, t.length); for (node<k,v> p; (p = it.advance()) != null; ) action.accept(p.key); } } }
/** * a view of a concurrenthashmap as a {@link collection} of * values, in which additions are disabled. this class cannot be * directly instantiated. see {@link #values()}. */ static final class valuesview<k,v> extends collectionview<k,v,v> implements collection<v>, java.io.serializable { private static final long serialversionuid = 2249069246763182397l; valuesview(concurrenthashmap<k,v> map) { super(map); } public final boolean contains(object o) { return map.containsvalue(o); }
public final boolean remove(object o) { if (o != null) { for (iterator<v> it = iterator(); it.hasnext();) { if (o.equals(it.next())) { it.remove(); return true; } } } return false; }
public final iterator<v> iterator() { concurrenthashmap<k,v> m = map; node<k,v>[] t; int f = (t = m.table) == null ? 0 : t.length; return new valueiterator<k,v>(t, f, 0, f, m); }
public final boolean add(v e) { throw new unsupportedoperationexception(); } public final boolean addall(collection<? extends v> c) { throw new unsupportedoperationexception(); }
public spliterator<v> spliterator() { node<k,v>[] t; concurrenthashmap<k,v> m = map; long n = m.sumcount(); int f = (t = m.table) == null ? 0 : t.length; return new valuespliterator<k,v>(t, f, 0, f, n < 0l ? 0l : n); }
public void foreach(consumer<? super v> action) { if (action == null) throw new nullpointerexception(); node<k,v>[] t; if ((t = map.table) != null) { traverser<k,v> it = new traverser<k,v>(t, t.length, 0, t.length); for (node<k,v> p; (p = it.advance()) != null; ) action.accept(p.val); } } }
/** * a view of a concurrenthashmap as a {@link set} of (key, value) * entries. this class cannot be directly instantiated. see * {@link #entryset()}. */ static final class entrysetview<k,v> extends collectionview<k,v,map.entry<k,v>> implements set<map.entry<k,v>>, java.io.serializable { private static final long serialversionuid = 2249069246763182397l; entrysetview(concurrenthashmap<k,v> map) { super(map); }
public boolean contains(object o) { object k, v, r; map.entry<?,?> e; return ((o instanceof map.entry) && (k = (e = (map.entry<?,?>)o).getkey()) != null && (r = map.get(k)) != null && (v = e.getvalue()) != null && (v == r || v.equals(r))); }
public boolean remove(object o) { object k, v; map.entry<?,?> e; return ((o instanceof map.entry) && (k = (e = (map.entry<?,?>)o).getkey()) != null && (v = e.getvalue()) != null && map.remove(k, v)); }
/** * @return an iterator over the entries of the backing map */ public iterator<map.entry<k,v>> iterator() { concurrenthashmap<k,v> m = map; node<k,v>[] t; int f = (t = m.table) == null ? 0 : t.length; return new entryiterator<k,v>(t, f, 0, f, m); }
public boolean add(entry<k,v> e) { return map.putval(e.getkey(), e.getvalue(), false) == null; }
public boolean addall(collection<? extends entry<k,v>> c) { boolean added = false; for (entry<k,v> e : c) { if (add(e)) added = true; } return added; }
public final int hashcode() { int h = 0; node<k,v>[] t; if ((t = map.table) != null) { traverser<k,v> it = new traverser<k,v>(t, t.length, 0, t.length); for (node<k,v> p; (p = it.advance()) != null; ) { h += p.hashcode(); } } return h; }
public final boolean equals(object o) { set<?> c; return ((o instanceof set) && ((c = (set<?>)o) == this || (containsall(c) && c.containsall(this)))); }
public spliterator<map.entry<k,v>> spliterator() { node<k,v>[] t; concurrenthashmap<k,v> m = map; long n = m.sumcount(); int f = (t = m.table) == null ? 0 : t.length; return new entryspliterator<k,v>(t, f, 0, f, n < 0l ? 0l : n, m); }
public void foreach(consumer<? super map.entry<k,v>> action) { if (action == null) throw new nullpointerexception(); node<k,v>[] t; if ((t = map.table) != null) { traverser<k,v> it = new traverser<k,v>(t, t.length, 0, t.length); for (node<k,v> p; (p = it.advance()) != null; ) action.accept(new mapentry<k,v>(p.key, p.val, map)); } }
}
// -------------------------------------------------------
/** * base class for bulk tasks. repeats some fields and code from * class traverser, because we need to subclass countedcompleter. */ @suppresswarnings("serial") abstract static class bulktask<k,v,r> extends countedcompleter<r> { node<k,v>[] tab; // same as traverser node<k,v> next; tablestack<k,v> stack, spare; int index; int baseindex; int baselimit; final int basesize; int batch; // split control
bulktask(bulktask<k,v,?> par, int b, int i, int f, node<k,v>[] t) { super(par); this.batch = b; this.index = this.baseindex = i; if ((this.tab = t) == null) this.basesize = this.baselimit = 0; else if (par == null) this.basesize = this.baselimit = t.length; else { this.baselimit = f; this.basesize = par.basesize; } }
/** * same as traverser version */ final node<k,v> advance() { node<k,v> e; if ((e = next) != null) e = e.next; for (;;) { node<k,v>[] t; int i, n; if (e != null) return next = e; if (baseindex >= baselimit || (t = tab) == null || (n = t.length) <= (i = index) || i < 0) return next = null; if ((e = tabat(t, i)) != null && e.hash < 0) { if (e instanceof forwardingnode) { tab = ((forwardingnode<k,v>)e).nexttable; e = null; pushstate(t, i, n); continue; } else if (e instanceof treebin) e = ((treebin<k,v>)e).first; else e = null; } if (stack != null) recoverstate(n); else if ((index = i + basesize) >= n) index = ++baseindex; } }
private void pushstate(node<k,v>[] t, int i, int n) { tablestack<k,v> s = spare; if (s != null) spare = s.next; else s = new tablestack<k,v>(); s.tab = t; s.length = n; s.index = i; s.next = stack; stack = s; }
private void recoverstate(int n) { tablestack<k,v> s; int len; while ((s = stack) != null && (index += (len = s.length)) >= n) { n = len; index = s.index; tab = s.tab; s.tab = null; tablestack<k,v> next = s.next; s.next = spare; // save for reuse stack = next; spare = s; } if (s == null && (index += basesize) >= n) index = ++baseindex; } }
/* * task classes. coded in a regular but ugly format/style to * simplify checks that each variant differs in the right way from * others. the null screenings exist because compilers cannot tell * that we've already null-checked task arguments, so we force * simplest hoisted bypass to help avoid convoluted traps. */ @suppresswarnings("serial") static final class foreachkeytask<k,v> extends bulktask<k,v,void> { final consumer<? super k> action; foreachkeytask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, consumer<? super k> action) { super(p, b, i, f, t); this.action = action; } public final void compute() { final consumer<? super k> action; if ((action = this.action) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); new foreachkeytask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, action).fork(); } for (node<k,v> p; (p = advance()) != null;) action.accept(p.key); propagatecompletion(); } } }
@suppresswarnings("serial") static final class foreachvaluetask<k,v> extends bulktask<k,v,void> { final consumer<? super v> action; foreachvaluetask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, consumer<? super v> action) { super(p, b, i, f, t); this.action = action; } public final void compute() { final consumer<? super v> action; if ((action = this.action) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); new foreachvaluetask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, action).fork(); } for (node<k,v> p; (p = advance()) != null;) action.accept(p.val); propagatecompletion(); } } }
@suppresswarnings("serial") static final class foreachentrytask<k,v> extends bulktask<k,v,void> { final consumer<? super entry<k,v>> action; foreachentrytask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, consumer<? super entry<k,v>> action) { super(p, b, i, f, t); this.action = action; } public final void compute() { final consumer<? super entry<k,v>> action; if ((action = this.action) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); new foreachentrytask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, action).fork(); } for (node<k,v> p; (p = advance()) != null; ) action.accept(p); propagatecompletion(); } } }
@suppresswarnings("serial") static final class foreachmappingtask<k,v> extends bulktask<k,v,void> { final biconsumer<? super k, ? super v> action; foreachmappingtask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, biconsumer<? super k,? super v> action) { super(p, b, i, f, t); this.action = action; } public final void compute() { final biconsumer<? super k, ? super v> action; if ((action = this.action) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); new foreachmappingtask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, action).fork(); } for (node<k,v> p; (p = advance()) != null; ) action.accept(p.key, p.val); propagatecompletion(); } } }
@suppresswarnings("serial") static final class foreachtransformedkeytask<k,v,u> extends bulktask<k,v,void> { final function<? super k, ? extends u> transformer; final consumer<? super u> action; foreachtransformedkeytask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, function<? super k, ? extends u> transformer, consumer<? super u> action) { super(p, b, i, f, t); this.transformer = transformer; this.action = action; } public final void compute() { final function<? super k, ? extends u> transformer; final consumer<? super u> action; if ((transformer = this.transformer) != null && (action = this.action) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); new foreachtransformedkeytask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, transformer, action).fork(); } for (node<k,v> p; (p = advance()) != null; ) { u u; if ((u = transformer.apply(p.key)) != null) action.accept(u); } propagatecompletion(); } } }
@suppresswarnings("serial") static final class foreachtransformedvaluetask<k,v,u> extends bulktask<k,v,void> { final function<? super v, ? extends u> transformer; final consumer<? super u> action; foreachtransformedvaluetask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, function<? super v, ? extends u> transformer, consumer<? super u> action) { super(p, b, i, f, t); this.transformer = transformer; this.action = action; } public final void compute() { final function<? super v, ? extends u> transformer; final consumer<? super u> action; if ((transformer = this.transformer) != null && (action = this.action) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); new foreachtransformedvaluetask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, transformer, action).fork(); } for (node<k,v> p; (p = advance()) != null; ) { u u; if ((u = transformer.apply(p.val)) != null) action.accept(u); } propagatecompletion(); } } }
@suppresswarnings("serial") static final class foreachtransformedentrytask<k,v,u> extends bulktask<k,v,void> { final function<map.entry<k,v>, ? extends u> transformer; final consumer<? super u> action; foreachtransformedentrytask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, function<map.entry<k,v>, ? extends u> transformer, consumer<? super u> action) { super(p, b, i, f, t); this.transformer = transformer; this.action = action; } public final void compute() { final function<map.entry<k,v>, ? extends u> transformer; final consumer<? super u> action; if ((transformer = this.transformer) != null && (action = this.action) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); new foreachtransformedentrytask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, transformer, action).fork(); } for (node<k,v> p; (p = advance()) != null; ) { u u; if ((u = transformer.apply(p)) != null) action.accept(u); } propagatecompletion(); } } }
@suppresswarnings("serial") static final class foreachtransformedmappingtask<k,v,u> extends bulktask<k,v,void> { final bifunction<? super k, ? super v, ? extends u> transformer; final consumer<? super u> action; foreachtransformedmappingtask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, bifunction<? super k, ? super v, ? extends u> transformer, consumer<? super u> action) { super(p, b, i, f, t); this.transformer = transformer; this.action = action; } public final void compute() { final bifunction<? super k, ? super v, ? extends u> transformer; final consumer<? super u> action; if ((transformer = this.transformer) != null && (action = this.action) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); new foreachtransformedmappingtask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, transformer, action).fork(); } for (node<k,v> p; (p = advance()) != null; ) { u u; if ((u = transformer.apply(p.key, p.val)) != null) action.accept(u); } propagatecompletion(); } } }
@suppresswarnings("serial") static final class searchkeystask<k,v,u> extends bulktask<k,v,u> { final function<? super k, ? extends u> searchfunction; final atomicreference<u> result; searchkeystask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, function<? super k, ? extends u> searchfunction, atomicreference<u> result) { super(p, b, i, f, t); this.searchfunction = searchfunction; this.result = result; } public final u getrawresult() { return result.get(); } public final void compute() { final function<? super k, ? extends u> searchfunction; final atomicreference<u> result; if ((searchfunction = this.searchfunction) != null && (result = this.result) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { if (result.get() != null) return; addtopendingcount(1); new searchkeystask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, searchfunction, result).fork(); } while (result.get() == null) { u u; node<k,v> p; if ((p = advance()) == null) { propagatecompletion(); break; } if ((u = searchfunction.apply(p.key)) != null) { if (result.compareandset(null, u)) quietlycompleteroot(); break; } } } } }
@suppresswarnings("serial") static final class searchvaluestask<k,v,u> extends bulktask<k,v,u> { final function<? super v, ? extends u> searchfunction; final atomicreference<u> result; searchvaluestask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, function<? super v, ? extends u> searchfunction, atomicreference<u> result) { super(p, b, i, f, t); this.searchfunction = searchfunction; this.result = result; } public final u getrawresult() { return result.get(); } public final void compute() { final function<? super v, ? extends u> searchfunction; final atomicreference<u> result; if ((searchfunction = this.searchfunction) != null && (result = this.result) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { if (result.get() != null) return; addtopendingcount(1); new searchvaluestask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, searchfunction, result).fork(); } while (result.get() == null) { u u; node<k,v> p; if ((p = advance()) == null) { propagatecompletion(); break; } if ((u = searchfunction.apply(p.val)) != null) { if (result.compareandset(null, u)) quietlycompleteroot(); break; } } } } }
@suppresswarnings("serial") static final class searchentriestask<k,v,u> extends bulktask<k,v,u> { final function<entry<k,v>, ? extends u> searchfunction; final atomicreference<u> result; searchentriestask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, function<entry<k,v>, ? extends u> searchfunction, atomicreference<u> result) { super(p, b, i, f, t); this.searchfunction = searchfunction; this.result = result; } public final u getrawresult() { return result.get(); } public final void compute() { final function<entry<k,v>, ? extends u> searchfunction; final atomicreference<u> result; if ((searchfunction = this.searchfunction) != null && (result = this.result) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { if (result.get() != null) return; addtopendingcount(1); new searchentriestask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, searchfunction, result).fork(); } while (result.get() == null) { u u; node<k,v> p; if ((p = advance()) == null) { propagatecompletion(); break; } if ((u = searchfunction.apply(p)) != null) { if (result.compareandset(null, u)) quietlycompleteroot(); return; } } } } }
@suppresswarnings("serial") static final class searchmappingstask<k,v,u> extends bulktask<k,v,u> { final bifunction<? super k, ? super v, ? extends u> searchfunction; final atomicreference<u> result; searchmappingstask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, bifunction<? super k, ? super v, ? extends u> searchfunction, atomicreference<u> result) { super(p, b, i, f, t); this.searchfunction = searchfunction; this.result = result; } public final u getrawresult() { return result.get(); } public final void compute() { final bifunction<? super k, ? super v, ? extends u> searchfunction; final atomicreference<u> result; if ((searchfunction = this.searchfunction) != null && (result = this.result) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { if (result.get() != null) return; addtopendingcount(1); new searchmappingstask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, searchfunction, result).fork(); } while (result.get() == null) { u u; node<k,v> p; if ((p = advance()) == null) { propagatecompletion(); break; } if ((u = searchfunction.apply(p.key, p.val)) != null) { if (result.compareandset(null, u)) quietlycompleteroot(); break; } } } } }
@suppresswarnings("serial") static final class reducekeystask<k,v> extends bulktask<k,v,k> { final bifunction<? super k, ? super k, ? extends k> reducer; k result; reducekeystask<k,v> rights, nextright; reducekeystask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, reducekeystask<k,v> nextright, bifunction<? super k, ? super k, ? extends k> reducer) { super(p, b, i, f, t); this.nextright = nextright; this.reducer = reducer; } public final k getrawresult() { return result; } public final void compute() { final bifunction<? super k, ? super k, ? extends k> reducer; if ((reducer = this.reducer) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new reducekeystask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, reducer)).fork(); } k r = null; for (node<k,v> p; (p = advance()) != null; ) { k u = p.key; r = (r == null) ? u : u == null ? r : reducer.apply(r, u); } result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") reducekeystask<k,v> t = (reducekeystask<k,v>)c, s = t.rights; while (s != null) { k tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class reducevaluestask<k,v> extends bulktask<k,v,v> { final bifunction<? super v, ? super v, ? extends v> reducer; v result; reducevaluestask<k,v> rights, nextright; reducevaluestask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, reducevaluestask<k,v> nextright, bifunction<? super v, ? super v, ? extends v> reducer) { super(p, b, i, f, t); this.nextright = nextright; this.reducer = reducer; } public final v getrawresult() { return result; } public final void compute() { final bifunction<? super v, ? super v, ? extends v> reducer; if ((reducer = this.reducer) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new reducevaluestask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, reducer)).fork(); } v r = null; for (node<k,v> p; (p = advance()) != null; ) { v v = p.val; r = (r == null) ? v : reducer.apply(r, v); } result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") reducevaluestask<k,v> t = (reducevaluestask<k,v>)c, s = t.rights; while (s != null) { v tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class reduceentriestask<k,v> extends bulktask<k,v,map.entry<k,v>> { final bifunction<map.entry<k,v>, map.entry<k,v>, ? extends map.entry<k,v>> reducer; map.entry<k,v> result; reduceentriestask<k,v> rights, nextright; reduceentriestask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, reduceentriestask<k,v> nextright, bifunction<entry<k,v>, map.entry<k,v>, ? extends map.entry<k,v>> reducer) { super(p, b, i, f, t); this.nextright = nextright; this.reducer = reducer; } public final map.entry<k,v> getrawresult() { return result; } public final void compute() { final bifunction<map.entry<k,v>, map.entry<k,v>, ? extends map.entry<k,v>> reducer; if ((reducer = this.reducer) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new reduceentriestask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, reducer)).fork(); } map.entry<k,v> r = null; for (node<k,v> p; (p = advance()) != null; ) r = (r == null) ? p : reducer.apply(r, p); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") reduceentriestask<k,v> t = (reduceentriestask<k,v>)c, s = t.rights; while (s != null) { map.entry<k,v> tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducekeystask<k,v,u> extends bulktask<k,v,u> { final function<? super k, ? extends u> transformer; final bifunction<? super u, ? super u, ? extends u> reducer; u result; mapreducekeystask<k,v,u> rights, nextright; mapreducekeystask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducekeystask<k,v,u> nextright, function<? super k, ? extends u> transformer, bifunction<? super u, ? super u, ? extends u> reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.reducer = reducer; } public final u getrawresult() { return result; } public final void compute() { final function<? super k, ? extends u> transformer; final bifunction<? super u, ? super u, ? extends u> reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducekeystask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, reducer)).fork(); } u r = null; for (node<k,v> p; (p = advance()) != null; ) { u u; if ((u = transformer.apply(p.key)) != null) r = (r == null) ? u : reducer.apply(r, u); } result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducekeystask<k,v,u> t = (mapreducekeystask<k,v,u>)c, s = t.rights; while (s != null) { u tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducevaluestask<k,v,u> extends bulktask<k,v,u> { final function<? super v, ? extends u> transformer; final bifunction<? super u, ? super u, ? extends u> reducer; u result; mapreducevaluestask<k,v,u> rights, nextright; mapreducevaluestask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducevaluestask<k,v,u> nextright, function<? super v, ? extends u> transformer, bifunction<? super u, ? super u, ? extends u> reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.reducer = reducer; } public final u getrawresult() { return result; } public final void compute() { final function<? super v, ? extends u> transformer; final bifunction<? super u, ? super u, ? extends u> reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducevaluestask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, reducer)).fork(); } u r = null; for (node<k,v> p; (p = advance()) != null; ) { u u; if ((u = transformer.apply(p.val)) != null) r = (r == null) ? u : reducer.apply(r, u); } result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducevaluestask<k,v,u> t = (mapreducevaluestask<k,v,u>)c, s = t.rights; while (s != null) { u tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreduceentriestask<k,v,u> extends bulktask<k,v,u> { final function<map.entry<k,v>, ? extends u> transformer; final bifunction<? super u, ? super u, ? extends u> reducer; u result; mapreduceentriestask<k,v,u> rights, nextright; mapreduceentriestask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreduceentriestask<k,v,u> nextright, function<map.entry<k,v>, ? extends u> transformer, bifunction<? super u, ? super u, ? extends u> reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.reducer = reducer; } public final u getrawresult() { return result; } public final void compute() { final function<map.entry<k,v>, ? extends u> transformer; final bifunction<? super u, ? super u, ? extends u> reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreduceentriestask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, reducer)).fork(); } u r = null; for (node<k,v> p; (p = advance()) != null; ) { u u; if ((u = transformer.apply(p)) != null) r = (r == null) ? u : reducer.apply(r, u); } result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreduceentriestask<k,v,u> t = (mapreduceentriestask<k,v,u>)c, s = t.rights; while (s != null) { u tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducemappingstask<k,v,u> extends bulktask<k,v,u> { final bifunction<? super k, ? super v, ? extends u> transformer; final bifunction<? super u, ? super u, ? extends u> reducer; u result; mapreducemappingstask<k,v,u> rights, nextright; mapreducemappingstask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducemappingstask<k,v,u> nextright, bifunction<? super k, ? super v, ? extends u> transformer, bifunction<? super u, ? super u, ? extends u> reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.reducer = reducer; } public final u getrawresult() { return result; } public final void compute() { final bifunction<? super k, ? super v, ? extends u> transformer; final bifunction<? super u, ? super u, ? extends u> reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducemappingstask<k,v,u> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, reducer)).fork(); } u r = null; for (node<k,v> p; (p = advance()) != null; ) { u u; if ((u = transformer.apply(p.key, p.val)) != null) r = (r == null) ? u : reducer.apply(r, u); } result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducemappingstask<k,v,u> t = (mapreducemappingstask<k,v,u>)c, s = t.rights; while (s != null) { u tr, sr; if ((sr = s.result) != null) t.result = (((tr = t.result) == null) ? sr : reducer.apply(tr, sr)); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducekeystodoubletask<k,v> extends bulktask<k,v,double> { final todoublefunction<? super k> transformer; final doublebinaryoperator reducer; final double basis; double result; mapreducekeystodoubletask<k,v> rights, nextright; mapreducekeystodoubletask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducekeystodoubletask<k,v> nextright, todoublefunction<? super k> transformer, double basis, doublebinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final double getrawresult() { return result; } public final void compute() { final todoublefunction<? super k> transformer; final doublebinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { double r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducekeystodoubletask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyasdouble(r, transformer.applyasdouble(p.key)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducekeystodoubletask<k,v> t = (mapreducekeystodoubletask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyasdouble(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducevaluestodoubletask<k,v> extends bulktask<k,v,double> { final todoublefunction<? super v> transformer; final doublebinaryoperator reducer; final double basis; double result; mapreducevaluestodoubletask<k,v> rights, nextright; mapreducevaluestodoubletask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducevaluestodoubletask<k,v> nextright, todoublefunction<? super v> transformer, double basis, doublebinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final double getrawresult() { return result; } public final void compute() { final todoublefunction<? super v> transformer; final doublebinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { double r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducevaluestodoubletask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyasdouble(r, transformer.applyasdouble(p.val)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducevaluestodoubletask<k,v> t = (mapreducevaluestodoubletask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyasdouble(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreduceentriestodoubletask<k,v> extends bulktask<k,v,double> { final todoublefunction<map.entry<k,v>> transformer; final doublebinaryoperator reducer; final double basis; double result; mapreduceentriestodoubletask<k,v> rights, nextright; mapreduceentriestodoubletask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreduceentriestodoubletask<k,v> nextright, todoublefunction<map.entry<k,v>> transformer, double basis, doublebinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final double getrawresult() { return result; } public final void compute() { final todoublefunction<map.entry<k,v>> transformer; final doublebinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { double r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreduceentriestodoubletask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyasdouble(r, transformer.applyasdouble(p)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreduceentriestodoubletask<k,v> t = (mapreduceentriestodoubletask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyasdouble(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducemappingstodoubletask<k,v> extends bulktask<k,v,double> { final todoublebifunction<? super k, ? super v> transformer; final doublebinaryoperator reducer; final double basis; double result; mapreducemappingstodoubletask<k,v> rights, nextright; mapreducemappingstodoubletask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducemappingstodoubletask<k,v> nextright, todoublebifunction<? super k, ? super v> transformer, double basis, doublebinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final double getrawresult() { return result; } public final void compute() { final todoublebifunction<? super k, ? super v> transformer; final doublebinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { double r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducemappingstodoubletask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyasdouble(r, transformer.applyasdouble(p.key, p.val)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducemappingstodoubletask<k,v> t = (mapreducemappingstodoubletask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyasdouble(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducekeystolongtask<k,v> extends bulktask<k,v,long> { final tolongfunction<? super k> transformer; final longbinaryoperator reducer; final long basis; long result; mapreducekeystolongtask<k,v> rights, nextright; mapreducekeystolongtask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducekeystolongtask<k,v> nextright, tolongfunction<? super k> transformer, long basis, longbinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final long getrawresult() { return result; } public final void compute() { final tolongfunction<? super k> transformer; final longbinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { long r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducekeystolongtask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyaslong(r, transformer.applyaslong(p.key)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducekeystolongtask<k,v> t = (mapreducekeystolongtask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyaslong(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducevaluestolongtask<k,v> extends bulktask<k,v,long> { final tolongfunction<? super v> transformer; final longbinaryoperator reducer; final long basis; long result; mapreducevaluestolongtask<k,v> rights, nextright; mapreducevaluestolongtask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducevaluestolongtask<k,v> nextright, tolongfunction<? super v> transformer, long basis, longbinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final long getrawresult() { return result; } public final void compute() { final tolongfunction<? super v> transformer; final longbinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { long r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducevaluestolongtask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyaslong(r, transformer.applyaslong(p.val)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducevaluestolongtask<k,v> t = (mapreducevaluestolongtask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyaslong(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreduceentriestolongtask<k,v> extends bulktask<k,v,long> { final tolongfunction<map.entry<k,v>> transformer; final longbinaryoperator reducer; final long basis; long result; mapreduceentriestolongtask<k,v> rights, nextright; mapreduceentriestolongtask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreduceentriestolongtask<k,v> nextright, tolongfunction<map.entry<k,v>> transformer, long basis, longbinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final long getrawresult() { return result; } public final void compute() { final tolongfunction<map.entry<k,v>> transformer; final longbinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { long r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreduceentriestolongtask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyaslong(r, transformer.applyaslong(p)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreduceentriestolongtask<k,v> t = (mapreduceentriestolongtask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyaslong(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducemappingstolongtask<k,v> extends bulktask<k,v,long> { final tolongbifunction<? super k, ? super v> transformer; final longbinaryoperator reducer; final long basis; long result; mapreducemappingstolongtask<k,v> rights, nextright; mapreducemappingstolongtask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducemappingstolongtask<k,v> nextright, tolongbifunction<? super k, ? super v> transformer, long basis, longbinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final long getrawresult() { return result; } public final void compute() { final tolongbifunction<? super k, ? super v> transformer; final longbinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { long r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducemappingstolongtask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyaslong(r, transformer.applyaslong(p.key, p.val)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducemappingstolongtask<k,v> t = (mapreducemappingstolongtask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyaslong(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducekeystointtask<k,v> extends bulktask<k,v,integer> { final tointfunction<? super k> transformer; final intbinaryoperator reducer; final int basis; int result; mapreducekeystointtask<k,v> rights, nextright; mapreducekeystointtask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducekeystointtask<k,v> nextright, tointfunction<? super k> transformer, int basis, intbinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final integer getrawresult() { return result; } public final void compute() { final tointfunction<? super k> transformer; final intbinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { int r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducekeystointtask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyasint(r, transformer.applyasint(p.key)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducekeystointtask<k,v> t = (mapreducekeystointtask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyasint(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducevaluestointtask<k,v> extends bulktask<k,v,integer> { final tointfunction<? super v> transformer; final intbinaryoperator reducer; final int basis; int result; mapreducevaluestointtask<k,v> rights, nextright; mapreducevaluestointtask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducevaluestointtask<k,v> nextright, tointfunction<? super v> transformer, int basis, intbinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final integer getrawresult() { return result; } public final void compute() { final tointfunction<? super v> transformer; final intbinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { int r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducevaluestointtask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyasint(r, transformer.applyasint(p.val)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducevaluestointtask<k,v> t = (mapreducevaluestointtask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyasint(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreduceentriestointtask<k,v> extends bulktask<k,v,integer> { final tointfunction<map.entry<k,v>> transformer; final intbinaryoperator reducer; final int basis; int result; mapreduceentriestointtask<k,v> rights, nextright; mapreduceentriestointtask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreduceentriestointtask<k,v> nextright, tointfunction<map.entry<k,v>> transformer, int basis, intbinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final integer getrawresult() { return result; } public final void compute() { final tointfunction<map.entry<k,v>> transformer; final intbinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { int r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreduceentriestointtask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyasint(r, transformer.applyasint(p)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreduceentriestointtask<k,v> t = (mapreduceentriestointtask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyasint(t.result, s.result); s = t.rights = s.nextright; } } } } }
@suppresswarnings("serial") static final class mapreducemappingstointtask<k,v> extends bulktask<k,v,integer> { final tointbifunction<? super k, ? super v> transformer; final intbinaryoperator reducer; final int basis; int result; mapreducemappingstointtask<k,v> rights, nextright; mapreducemappingstointtask (bulktask<k,v,?> p, int b, int i, int f, node<k,v>[] t, mapreducemappingstointtask<k,v> nextright, tointbifunction<? super k, ? super v> transformer, int basis, intbinaryoperator reducer) { super(p, b, i, f, t); this.nextright = nextright; this.transformer = transformer; this.basis = basis; this.reducer = reducer; } public final integer getrawresult() { return result; } public final void compute() { final tointbifunction<? super k, ? super v> transformer; final intbinaryoperator reducer; if ((transformer = this.transformer) != null && (reducer = this.reducer) != null) { int r = this.basis; for (int i = baseindex, f, h; batch > 0 && (h = ((f = baselimit) + i) >>> 1) > i;) { addtopendingcount(1); (rights = new mapreducemappingstointtask<k,v> (this, batch >>>= 1, baselimit = h, f, tab, rights, transformer, r, reducer)).fork(); } for (node<k,v> p; (p = advance()) != null; ) r = reducer.applyasint(r, transformer.applyasint(p.key, p.val)); result = r; countedcompleter<?> c; for (c = firstcomplete(); c != null; c = c.nextcomplete()) { @suppresswarnings("unchecked") mapreducemappingstointtask<k,v> t = (mapreducemappingstointtask<k,v>)c, s = t.rights; while (s != null) { t.result = reducer.applyasint(t.result, s.result); s = t.rights = s.nextright; } } } } }
// unsafe mechanics private static final sun.misc.unsafe u; private static final long sizectl; private static final long transferindex; private static final long basecount; private static final long cellsbusy; private static final long cellvalue; private static final long abase; private static final int ashift;
static { try { u = sun.misc.unsafe.getunsafe(); class<?> k = concurrenthashmap.class; sizectl = u.objectfieldoffset (k.getdeclaredfield("sizectl")); transferindex = u.objectfieldoffset (k.getdeclaredfield("transferindex")); basecount = u.objectfieldoffset (k.getdeclaredfield("basecount")); cellsbusy = u.objectfieldoffset (k.getdeclaredfield("cellsbusy")); class<?> ck = countercell.class; cellvalue = u.objectfieldoffset (ck.getdeclaredfield("value")); class<?> ak = node[].class; abase = u.arraybaseoffset(ak); int scale = u.arrayindexscale(ak); if ((scale & (scale - 1)) != 0) throw new error("data type scale not a power of two"); ashift = 31 - integer.numberofleadingzeros(scale); } catch (exception e) { throw new error(e); } }}
上一篇: 【工具类】身份证号码相关验证