JDK容器类Map源码解读
java.util.map接口是jdk1.2开始提供的一个基于键值对的散列表接口,其设计的初衷是为了替换jdk1.0中的java.util.dictionary抽象类。dictionary是jdk最初的键值对类,它不可以存储null作为key和value,目前这个类早已不被使用了。目前都是在使用map接口,它是可以存储null值作为key和value,但map的key是不可以重复的。其常用的实现类主要有hashmap,treemap,concurrenthashmap等
hashmap源码解读
目前jdk已经发布到jdk12,主流的jdk版本是jdk8, 但是如果阅读hashmap的源码建议先看jdk7的源码。jdk7和jdk8的源码中hashmap的实现原理大体相同,只不过是在jdk8中做了部分优化。但是jdk8的源码可读性非常差。
hashmap 是一个存储键值对(key-value)映射的散列表,继承于abstractmap,实现了map、cloneable、java.io.serializable接口,hashmap是线程不安全的,它存储的映射也是无序的。
hashmap的底层主要是基于数组和链表来实现的(jdk8之后又引入了红黑树),数据存储时会通过对key进行哈希操作取到哈希值,然后将哈希值对数组长度取模,得到的值就是该键值对在数组中的索引index值,如果数组该位置没有值则直接将该键值对放在该位置,如果该位置已经有值则将其插入相应链表的位置,jdk8开始为优化链表长度过长导致的性能问题从而引入了红黑树,当链表的长度大于8时会自动将链表转成红黑树。
jdk7中hashmap的源码解读
jdk7中hashmap采用entry数组来存储键值对,每一个键值对组成了一个entry实体,entry类实际上是一个单向的链表结构,它具有next指针,可以连接下一个entry实体组成链表。
jdk7中hashmap源码中的主要字段
// 数组默认的大小 // 1 << 4,表示1,左移4位,变成10000,即16,以二进制形式运行,效率更高 static final int default_initial_capacity = 1 << 4; // 数组最大值 static final int maximum_capacity = 1 << 30; // 默认的负载因子 static final float default_load_factor = 0.75f; // 真正存放数据的数组 transient entry<k,v>[] table = (entry<k,v>[]) empty_table;
hashmap中默认的数组容量为 16,负载因子为 0.75。map 在使用过程中不断的往里面存放数据,当数量达到了 16 * 0.75 = 12 就需要将当前 16 的容量进行扩容,而扩容这个过程涉及到 rehash、复制数据等操作,所以非常消耗性能。因此通常建议能提前预估 hashmap 的大小最好,尽量的减少扩容带来的性能损耗。
jdk7中hashmap源码中的构造器
/** 默认的初始化容量、默认的加载因子 * constructs an empty <tt>hashmap</tt> with the default initial capacity * (16) and the default load factor (0.75). */ public hashmap() { //16 0.75 this(default_initial_capacity, default_load_factor); } /** * constructs an empty <tt>hashmap</tt> with the specified initial * capacity and the default load factor (0.75). * * @param initialcapacity the initial capacity. * @throws illegalargumentexception if the initial capacity is negative. */ public hashmap(int initialcapacity) { this(initialcapacity, default_load_factor); } /** 做了两件事:1、为threshold、loadfactor赋值 2、调用init() * constructs an empty <tt>hashmap</tt> with the specified initial * capacity and load factor. * * @param initialcapacity the initial capacity * @param loadfactor the load factor * @throws illegalargumentexception if the initial capacity is negative * or the load factor is nonpositive */ public hashmap(int initialcapacity, float loadfactor) { if (initialcapacity < 0) throw new illegalargumentexception("illegal initial capacity: " + initialcapacity); if (initialcapacity > maximum_capacity) //限制最大容量 initialcapacity = maximum_capacity; if (loadfactor <= 0 || float.isnan(loadfactor)) //检查 loadfactor throw new illegalargumentexception("illegal load factor: " + loadfactor); //真正在做的,只是记录下loadfactor、initialcpacity的值 this.loadfactor = loadfactor; //记录下loadfactor threshold = initialcapacity; //初始的 阈值threshold=initialcapacity=16 init(); } /** * constructs a new <tt>hashmap</tt> with the same mappings as the * specified <tt>map</tt>. the <tt>hashmap</tt> is created with * default load factor (0.75) and an initial capacity sufficient to * hold the mappings in the specified <tt>map</tt>. * * @param m the map whose mappings are to be placed in this map * @throws nullpointerexception if the specified map is null */ public hashmap(map<? extends k, ? extends v> m) { this(math.max((int) (m.size() / default_load_factor) + 1, default_initial_capacity), default_load_factor); inflatetable(threshold); putallforcreate(m); }
jdk7中hashmap源码中的put方法
/** * associates the specified value with the specified key in this map. * if the map previously contained a mapping for the key, the old * value is replaced. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt>. * (a <tt>null</tt> return can also indicate that the map * previously associated <tt>null</tt> with <tt>key</tt>.) */ public v put(k key, v value) { if (table == empty_table) { inflatetable(threshold); //初始化表 (初始化、扩容 合并为了一个方法) } if (key == null) //对key为null做特殊处理 return putfornullkey(value); int hash = hash(key); //计算hash值 int i = indexfor(hash, table.length); //根据hash值计算出index下标 for (entry<k,v> e = table[i]; e != null; e = e.next) { //遍历下标为i处的链表 object k; if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { //如果key值相同,覆盖旧值,返回新值 v oldvalue = e.value; e.value = value; //新值 覆盖 旧值 e.recordaccess(this); //do nothing return oldvalue; //返回旧值 } } modcount++; //修改次数+1,类似于一个version number addentry(hash, key, value, i); return null; } /** * adds a new entry with the specified key, value and hash code to * the specified bucket. it is the responsibility of this * method to resize the table if appropriate. * * subclass overrides this to alter the behavior of put method. */ void addentry(int hash, k key, v value, int bucketindex) { if ((size >= threshold) && (null != table[bucketindex])) { //如果size大于threshold && table在下标为index的地方已经有entry了 resize(2 * table.length); //扩容,将数组长度变为原来两倍 hash = (null != key) ? hash(key) : 0; //重新计算 hash 值 bucketindex = indexfor(hash, table.length); //重新计算下标 } createentry(hash, key, value, bucketindex); //创建entry } /** * rehashes the contents of this map into a new array with a * larger capacity. this method is called automatically when the * number of keys in this map reaches its threshold. * * if current capacity is maximum_capacity, this method does not * resize the map, but sets threshold to integer.max_value. * this has the effect of preventing future calls. * * @param newcapacity the new capacity, must be a power of two; * must be greater than current capacity unless current * capacity is maximum_capacity (in which case value * is irrelevant). */ void resize(int newcapacity) { entry[] oldtable = table; int oldcapacity = oldtable.length; if (oldcapacity == maximum_capacity) { //状态检查 threshold = integer.max_value; return; } entry[] newtable = new entry[newcapacity]; //实例化新的table transfer(newtable, inithashseedasneeded(newcapacity)); //赋值数组元素到新的数组 table = newtable; threshold = (int)math.min(newcapacity * loadfactor, maximum_capacity + 1); } /** * transfers all entries from current table to newtable. */ void transfer(entry[] newtable, boolean rehash) { int newcapacity = newtable.length; for (entry<k,v> e : table) { while(null != e) { entry<k,v> next = e.next; if (rehash) { e.hash = null == e.key ? 0 : hash(e.key); //对key进行hash } int i = indexfor(e.hash, newcapacity); //用新的index来取模 e.next = newtable[i]; newtable[i] = e; //把元素存入新table新的新的index处 e = next; } } } /** * like addentry except that this version is used when creating entries * as part of map construction or "pseudo-construction" (cloning, * deserialization). this version needn't worry about resizing the table. * * subclass overrides this to alter the behavior of hashmap(map), * clone, and readobject. */ void createentry(int hash, k key, v value, int bucketindex) { entry<k,v> e = table[bucketindex]; //获取table中存的entry table[bucketindex] = new entry<>(hash, key, value, e); //将新的entry放到数组中,next指向旧的table[i] size++; //修改map中元素个数 }
jdk7中hashmap源码中的put方法
/** * returns the value to which the specified key is mapped, * or {@code null} if this map contains no mapping for the key. * * <p>more formally, if this map contains a mapping from a key * {@code k} to a value {@code v} such that {@code (key==null ? k==null : * key.equals(k))}, then this method returns {@code v}; otherwise * it returns {@code null}. (there can be at most one such mapping.) * * <p>a return value of {@code null} does not <i>necessarily</i> * indicate that the map contains no mapping for the key; it's also * possible that the map explicitly maps the key to {@code null}. * the {@link #containskey containskey} operation may be used to * distinguish these two cases. * * @see #put(object, object) */ public v get(object key) { if (key == null) return getfornullkey(); entry<k,v> entry = getentry(key); return null == entry ? null : entry.getvalue(); } /** * returns the entry associated with the specified key in the * hashmap. returns null if the hashmap contains no mapping * for the key. */ final entry<k,v> getentry(object key) { if (size == 0) { return null; } int hash = (key == null) ? 0 : hash(key); for (entry<k,v> e = table[indexfor(hash, table.length)]; e != null; e = e.next) { object k; if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) return e; } return null; }
jdk8中hashmap的源码解读
jdk8中hashmap采用node数组来存储键值对,node其实就是jdk7中的entry,只不过是换了一个名字,同样每一个键值对组成了一个node实体,然后组成链表。当 hash 冲突严重时,链表会变的越来越长,这样在查询时的效率就会越来越低,jdk8所做的优化就是,当链表的长度达到8的时候会转变成红黑树treenode。
jdk8中hashmap源码中的主要字段
static final int default_initial_capacity = 1 << 4; static final int maximum_capacity = 1 << 30; static final float default_load_factor = 0.75f; // 用于判断是否需要将链表转换为红黑树的阈值 static final int treeify_threshold = 8; // 用于判断是否需要将红黑树转换为链表的阈值 static final int untreeify_threshold = 6; static final int min_treeify_capacity = 64; // 存放数据的数组 transient node<k,v>[] table;
jdk8中hashmap源码中的构造器
/** * constructs an empty <tt>hashmap</tt> with the default initial capacity * (16) and the default load factor (0.75). */ public hashmap() { this.loadfactor = default_load_factor; // all other fields defaulted } /** * constructs an empty <tt>hashmap</tt> with the specified initial * capacity and the default load factor (0.75). * * @param initialcapacity the initial capacity. * @throws illegalargumentexception if the initial capacity is negative. */ public hashmap(int initialcapacity) { this(initialcapacity, default_load_factor); } /** * constructs an empty <tt>hashmap</tt> with the specified initial * capacity and load factor. * * @param initialcapacity the initial capacity * @param loadfactor the load factor * @throws illegalargumentexception if the initial capacity is negative * or the load factor is nonpositive */ public hashmap(int initialcapacity, float loadfactor) { if (initialcapacity < 0) throw new illegalargumentexception("illegal initial capacity: " + initialcapacity); if (initialcapacity > maximum_capacity) initialcapacity = maximum_capacity; if (loadfactor <= 0 || float.isnan(loadfactor)) throw new illegalargumentexception("illegal load factor: " + loadfactor); this.loadfactor = loadfactor; this.threshold = tablesizefor(initialcapacity); } /** * constructs a new <tt>hashmap</tt> with the same mappings as the * specified <tt>map</tt>. the <tt>hashmap</tt> is created with * default load factor (0.75) and an initial capacity sufficient to * hold the mappings in the specified <tt>map</tt>. * * @param m the map whose mappings are to be placed in this map * @throws nullpointerexception if the specified map is null */ public hashmap(map<? extends k, ? extends v> m) { this.loadfactor = default_load_factor; putmapentries(m, false); }
jdk8中hashmap源码中的put方法
/** * associates the specified value with the specified key in this map. * if the map previously contained a mapping for the key, the old * value is replaced. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt>. * (a <tt>null</tt> return can also indicate that the map * previously associated <tt>null</tt> with <tt>key</tt>.) */ public v put(k key, v value) { return putval(hash(key), key, value, false, true); } /** * implements map.put and related methods. 添加元素 * * @param hash hash for key * @param key the key * @param value the value to put * @param onlyifabsent if true, don't change existing value * @param evict if false, the table is in creation mode. * @return previous value, or null if none */ final v putval(int hash, k key, v value, boolean onlyifabsent, boolean evict) { node<k,v>[] tab; node<k,v> p; int n, i; if ((tab = table) == null || (n = tab.length) == 0) //若table为null n = (tab = resize()).length; //resize if ((p = tab[i = (n - 1) & hash]) == null) //计算下标i,取出i处的元素为p,如果p为null tab[i] = newnode(hash, key, value, null); //创建新的node,放到数组中 else { //若 p!=null node<k,v> e; k k; if (p.hash == hash && ((k = p.key) == key || (key != null && key.equals(k)))) //若key相同 e = p; //直接覆盖 else if (p instanceof treenode) //如果为 树节点 e = ((treenode<k,v>)p).puttreeval(this, tab, hash, key, value); //放到树中 else { //如果key不相同,也不是treenode for (int bincount = 0; ; ++bincount) { //遍历i处的链表 if ((e = p.next) == null) { //找到尾部 p.next = newnode(hash, key, value, null); //在末尾添加一个node if (bincount >= treeify_threshold - 1) // -1 for 1st //如果链表长度 >= 8 treeifybin(tab, hash); //将链表转成共黑树 break; } if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) //若果key相同,直接退出循环 break; p = e; } } if (e != null) { // existing mapping for key v oldvalue = e.value; if (!onlyifabsent || oldvalue == null) e.value = value; afternodeaccess(e); return oldvalue; } } ++modcount; if (++size > threshold) resize(); afternodeinsertion(evict); return null; } /** * replaces all linked nodes in bin at index for given hash unless * table is too small, in which case resizes instead. */ final void treeifybin(node<k,v>[] tab, int hash) { int n, index; node<k,v> e; if (tab == null || (n = tab.length) < min_treeify_capacity) resize(); else if ((e = tab[index = (n - 1) & hash]) != null) { treenode<k,v> hd = null, tl = null; do { treenode<k,v> p = replacementtreenode(e, null); if (tl == null) hd = p; else { p.prev = tl; tl.next = p; } tl = p; } while ((e = e.next) != null); if ((tab[index] = hd) != null) hd.treeify(tab); } } /** * initializes or doubles table size. if null, allocates in * accord with initial capacity target held in field threshold. * otherwise, because we are using power-of-two expansion, the * elements from each bin must either stay at same index, or move * with a power of two offset in the new table. * * @return the table */ final node<k,v>[] resize() { node<k,v>[] oldtab = table; int oldcap = (oldtab == null) ? 0 : oldtab.length; // 如果 旧数组为null就讲旧的容量看做是0,否则用旧的table长度当做容量 int oldthr = threshold; int newcap, newthr = 0; if (oldcap > 0) { if (oldcap >= maximum_capacity) { threshold = integer.max_value; return oldtab; } else if ((newcap = oldcap << 1) < maximum_capacity && oldcap >= default_initial_capacity) newthr = oldthr << 1; // double threshold } else if (oldthr > 0) // initial capacity was placed in threshold newcap = oldthr; else { // zero initial threshold signifies using defaults newcap = default_initial_capacity; newthr = (int)(default_load_factor * default_initial_capacity); } if (newthr == 0) { float ft = (float)newcap * loadfactor; newthr = (newcap < maximum_capacity && ft < (float)maximum_capacity ? (int)ft : integer.max_value); } threshold = newthr; @suppresswarnings({"rawtypes","unchecked"}) node<k,v>[] newtab = (node<k,v>[])new node[newcap]; //创建新的数组 table = newtab; //赋值给table if (oldtab != null) { for (int j = 0; j < oldcap; ++j) { node<k,v> e; if ((e = oldtab[j]) != null) { oldtab[j] = null; if (e.next == null) newtab[e.hash & (newcap - 1)] = e; else if (e instanceof treenode) ((treenode<k,v>)e).split(this, newtab, j, oldcap); else { // preserve order node<k,v> lohead = null, lotail = null; node<k,v> hihead = null, hitail = null; node<k,v> next; do { next = e.next; if ((e.hash & oldcap) == 0) { if (lotail == null) lohead = e; else lotail.next = e; lotail = e; } else { if (hitail == null) hihead = e; else hitail.next = e; hitail = e; } } while ((e = next) != null); if (lotail != null) { lotail.next = null; newtab[j] = lohead; } if (hitail != null) { hitail.next = null; newtab[j + oldcap] = hihead; } } } } } return newtab; }
jdk8中hashmap源码中的get方法
/** * returns the value to which the specified key is mapped, * or {@code null} if this map contains no mapping for the key. * * <p>more formally, if this map contains a mapping from a key * {@code k} to a value {@code v} such that {@code (key==null ? k==null : * key.equals(k))}, then this method returns {@code v}; otherwise * it returns {@code null}. (there can be at most one such mapping.) * * <p>a return value of {@code null} does not <i>necessarily</i> * indicate that the map contains no mapping for the key; it's also * possible that the map explicitly maps the key to {@code null}. * the {@link #containskey containskey} operation may be used to * distinguish these two cases. * * @see #put(object, object) */ public v get(object key) { node<k,v> e; return (e = getnode(hash(key), key)) == null ? null : e.value; } /** * implements map.get and related methods. * * @param hash hash for key * @param key the key * @return the node, or null if none */ final node<k,v> getnode(int hash, object key) { node<k,v>[] tab; node<k,v> first, e; int n; k k; if ((tab = table) != null && (n = tab.length) > 0 && (first = tab[(n - 1) & hash]) != null) { if (first.hash == hash && // always check first node ((k = first.key) == key || (key != null && key.equals(k)))) return first; if ((e = first.next) != null) { if (first instanceof treenode) return ((treenode<k,v>)first).gettreenode(hash, key); do { if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) return e; } while ((e = e.next) != null); } } return null; }
concurrenthashmap源码解读
concurrenthashmap是一个线程安全的hashmap实现,concurrenthashmap在jdk7和jdk8中的实现差别比较大,jdk7中concurrenthashmap是使用segment数组来存放数据,一个segment就相当于一个hashmap的数据结构,每个segment使用一个锁。jdk8之后segment虽保留,但仅是为了兼容旧版本,已经不再使用,jdk8中concurrenthashmap使用和hashmap一样的数据结构node数组来存储数据,每个数组位置使用一个锁。
jdk7中的concurrenthashmap源码解读
jdk7中concurrenthashmap的底层segment组,而segment其实就是特殊的hashmap,segment的数据结构跟hashmap一样,同时它继承了reentrantlock,通过reentrantlock提供的锁实现了线程的安全。concurrenthashmap使用分段锁技术,将数据分成一段一段的存储,每个segment就是一段,然后给每一段数据配一把锁,当一个线程占用锁访问其中一个段数据的时候,其他段的数据也能被其他线程访问,能够实现并发访问,segment数组的长度就是concurrenthashmap的线程并行级别,segment数组默认的长度为16,也就是说最多同时可以有16个线程去访问concurrenthashmap。segment 数组不能扩容,而是对 segment 数组某个位置的segmen内部的数组hashentry[] 进行扩容,扩容后容量为原来的 2 倍,该方法没有考虑并发,因为执行该方法之前已经获取了锁。
jdk7中的concurrenthashmap源码中的主要字段
// 数组默认大小 static final int default_initial_capacity = 16; // 默认的负载因子 static final float default_load_factor = 0.75f; // 默认线程并发度 static final int default_concurrency_level = 16; static final int min_segment_table_capacity = 2; static final int max_segments = 1 << 16; // 数组最大大小 static final int maximum_capacity = 1 << 30; static final int maximum_capacity = 1 << 30; static final int retries_before_lock = 2;
jdk7中的concurrenthashmap源码中的构造器
/** * creates a new, empty map with a default initial capacity (16), * load factor (0.75) and concurrencylevel (16). */ public concurrenthashmap() { this(default_initial_capacity, default_load_factor, default_concurrency_level); } /** * creates a new, empty map with the specified initial capacity, * and with default load factor (0.75) and concurrencylevel (16). * * @param initialcapacity the initial capacity. the implementation * performs internal sizing to accommodate this many elements. * @throws illegalargumentexception if the initial capacity of * elements is negative. */ public concurrenthashmap(int initialcapacity) { this(initialcapacity, default_load_factor, default_concurrency_level); } /** * creates a new, empty map with the specified initial capacity * and load factor and with the default concurrencylevel (16). * * @param initialcapacity the implementation performs internal * sizing to accommodate this many elements. * @param loadfactor the load factor threshold, used to control resizing. * resizing may be performed when the average number of elements per * bin exceeds this threshold. * @throws illegalargumentexception if the initial capacity of * elements is negative or the load factor is nonpositive * * @since 1.6 */ public concurrenthashmap(int initialcapacity, float loadfactor) { this(initialcapacity, loadfactor, default_concurrency_level); } /** * creates a new, empty map with the specified initial * capacity, load factor and concurrency level. * * @param initialcapacity the initial capacity. the implementation * performs internal sizing to accommodate this many elements. * @param loadfactor the load factor threshold, used to control resizing. * resizing may be performed when the average number of elements per * bin exceeds this threshold. * @param concurrencylevel the estimated number of concurrently * updating threads. the implementation performs internal sizing * to try to accommodate this many threads. * @throws illegalargumentexception if the initial capacity is * negative or the load factor or concurrencylevel are * nonpositive. */ @suppresswarnings("unchecked") public concurrenthashmap(int initialcapacity, float loadfactor, int concurrencylevel) { if (!(loadfactor > 0) || initialcapacity < 0 || concurrencylevel <= 0) //参数检查 throw new illegalargumentexception(); if (concurrencylevel > max_segments) //concurrentcylevel实际上就是最大并发数 concurrencylevel = max_segments; // find power-of-two sizes best matching arguments int sshift = 0; int ssize = 1; while (ssize < concurrencylevel) { ++sshift; ssize <<= 1; } this.segmentshift = 32 - sshift; this.segmentmask = ssize - 1; if (initialcapacity > maximum_capacity) initialcapacity = maximum_capacity; int c = initialcapacity / ssize; if (c * ssize < initialcapacity) ++c; int cap = min_segment_table_capacity; while (cap < c) cap <<= 1; // create segments and segments[0] segment<k,v> s0 = new segment<k,v>(loadfactor, (int)(cap * loadfactor), (hashentry<k,v>[])new hashentry[cap]); //创建一个segment segment<k,v>[] ss = (segment<k,v>[])new segment[ssize]; //创建一个segment数组 unsafe.putorderedobject(ss, sbase, s0); // ordered write of segments[0] //将s0设置为ss的第一个元素 this.segments = ss; //将ss作为segments }
jdk7中的concurrenthashmap源码中put方法
/** * maps the specified key to the specified value in this table. * neither the key nor the value can be null. * * <p> the value can be retrieved by calling the <tt>get</tt> method * with a key that is equal to the original key. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key * @return the previous value associated with <tt>key</tt>, or * <tt>null</tt> if there was no mapping for <tt>key</tt> * @throws nullpointerexception if the specified key or value is null */ @suppresswarnings("unchecked") public v put(k key, v value) { segment<k,v> s; if (value == null) throw new nullpointerexception(); int hash = hash(key); // 计算hash值 int j = (hash >>> segmentshift) & segmentmask; //计算下标j if ((s = (segment<k,v>)unsafe.getobject // nonvolatile; recheck (segments, (j << sshift) + sbase)) == null) // in ensuresegment s = ensuresegment(j); //若j处有segment就返回,若没有就创建并返回 return s.put(key, hash, value, false); //将值put到segment中去 } // segment 中put数据的方法 final v put(k key, int hash, v value, boolean onlyifabsent) { hashentry<k,v> node = trylock() ? null : scanandlockforput(key, hash, value); //如果trylock成功,就返回null,否则。。。 v oldvalue; try { hashentry<k,v>[] tab = table; int index = (tab.length - 1) & hash; //根据table数组的长度 和 hash值计算index小标 hashentry<k,v> first = entryat(tab, index); //找到table数组在 index处链表的头部 for (hashentry<k,v> e = first;;) { //从first开始遍历链表 if (e != null) { //若e!=null k k; if ((k = e.key) == key || (e.hash == hash && key.equals(k))) { //如果key相同 oldvalue = e.value; //获取旧值 if (!onlyifabsent) { //若absent=false e.value = value; //覆盖旧值 ++modcount; // } break; //若已经找到,就退出链表遍历 } e = e.next; //若key不相同,继续遍历 } else { //直到e为null if (node != null) //将元素放到链表头部 node.setnext(first); else node = new hashentry<k,v>(hash, key, value, first); //创建新的entry int c = count + 1; //count 用来记录元素个数 if (c > threshold && tab.length < maximum_capacity) //如果hashmap元素个数超过threshold,并且table长度小于最大容量 rehash(node); //rehash跟resize的功能差不多,将table的长度变为原来的两倍,重新打包entries,并将给定的node添加到新的table else //如果还有容量 setentryat(tab, index, node); //就在index处添加链表节点 ++modcount; //修改操作数 count = c; //将count+1 oldvalue = null; // break; } } } finally { unlock(); //执行完操作后,释放锁 } return oldvalue; //返回oldvalue } /** 将table的长度变为原来的两倍,重新打包entries,并将给定的node添加到新的table * doubles size of table and repacks entries, also adding the * given node to new table */ @suppresswarnings("unchecked") private void rehash(hashentry<k,v> node) { /* * reclassify nodes in each list to new table. because we * are using power-of-two expansion, the elements from * each bin must either stay at same index, or move with a * power of two offset. we eliminate unnecessary node * creation by catching cases where old nodes can be * reused because their next fields won't change. * statistically, at the default threshold, only about * one-sixth of them need cloning when a table * doubles. the nodes they replace will be garbage * collectable as soon as they are no longer referenced by * any reader thread that may be in the midst of * concurrently traversing table. entry accesses use plain * array indexing because they are followed by volatile * table write. */ hashentry<k,v>[] oldtable = table; int oldcapacity = oldtable.length; int newcapacity = oldcapacity << 1; threshold = (int)(newcapacity * loadfactor); hashentry<k,v>[] newtable = (hashentry<k,v>[]) new hashentry[newcapacity]; int sizemask = newcapacity - 1; for (int i = 0; i < oldcapacity ; i++) { hashentry<k,v> e = oldtable[i]; if (e != null) { hashentry<k,v> next = e.next; int idx = e.hash & sizemask; if (next == null) // single node on list newtable[idx] = e; else { // reuse consecutive sequence at same slot hashentry<k,v> lastrun = e; int lastidx = idx; for (hashentry<k,v> last = next; last != null; last = last.next) { int k = last.hash & sizemask; if (k != lastidx) { lastidx = k; lastrun = last; } } newtable[lastidx] = lastrun; // clone remaining nodes for (hashentry<k,v> p = e; p != lastrun; p = p.next) { v v = p.value; int h = p.hash; int k = h & sizemask; hashentry<k,v> n = newtable[k]; newtable[k] = new hashentry<k,v>(h, p.key, v, n); } } } } int nodeindex = node.hash & sizemask; // add the new node node.setnext(newtable[nodeindex]); newtable[nodeindex] = node; table = newtable; }
jdk7中的concurrenthashmap源码中get方法
/** * returns the value to which the specified key is mapped, * or {@code null} if this map contains no mapping for the key. * * <p>more formally, if this map contains a mapping from a key * {@code k} to a value {@code v} such that {@code key.equals(k)}, * then this method returns {@code v}; otherwise it returns * {@code null}. (there can be at most one such mapping.) * * @throws nullpointerexception if the specified key is null */ public v get(object key) { segment<k,v> s; // manually integrate access methods to reduce overhead hashentry<k,v>[] tab; int h = hash(key); long u = (((h >>> segmentshift) & segmentmask) << sshift) + sbase; if ((s = (segment<k,v>)unsafe.getobjectvolatile(segments, u)) != null && (tab = s.table) != null) { for (hashentry<k,v> e = (hashentry<k,v>) unsafe.getobjectvolatile (tab, ((long)(((tab.length - 1) & h)) << tshift) + tbase); e != null; e = e.next) { k k; if ((k = e.key) == key || (e.hash == h && key.equals(k))) return e.value; } } return null; }
jdk8中的concurrenthashmap源码解读
jdk8中的concurrenthashmap取消了基于 segment 的分段锁思想,改用 cas + synchronized 控制并发操作,锁的粒度变得更小,并发度更高。并且追随jdk8的hashmap底层实现,使用数组+链表+红黑树进行数据存储。
jdk8中的concurrenthashmap源码中的主要字段
private static final int maximum_capacity = 1 << 30; private static final int default_capacity = 16; private static final float load_factor = 0.75f; static final int treeify_threshold = 8; static final int untreeify_threshold = 6; static final int min_treeify_capacity = 64; private static final int min_transfer_stride = 16; static final int moved = -1; // hash for forwarding nodes //转发节点的hash值 static final int treebin = -2; // hash for roots of trees //树的根节点的hash值 static final int reserved = -3; // hash for transient reservations //临时节点的 hash值 static final int hash_bits = 0x7fffffff; // usable bits of normal node hash //正常节点的hash值
jdk8中的concurrenthashmap源码中构造器
/** * creates a new, empty map with the default initial table size (16). */ public concurrenthashmap() { } /** * creates a new, empty map with an initial table size * accommodating the specified number of elements without the need * to dynamically resize. * * @param initialcapacity the implementation performs internal * sizing to accommodate this many elements. * @throws illegalargumentexception if the initial capacity of * elements is negative */ public concurrenthashmap(int initialcapacity) { if (initialcapacity < 0) throw new illegalargumentexception(); int cap = ((initialcapacity >= (maximum_capacity >>> 1)) ? maximum_capacity : tablesizefor(initialcapacity + (initialcapacity >>> 1) + 1)); this.sizectl = cap; } /** * creates a new, empty map with an initial table size based on * the given number of elements ({@code initialcapacity}) and * initial table density ({@code loadfactor}). * * @param initialcapacity the initial capacity. the implementation * performs internal sizing to accommodate this many elements, * given the specified load factor. * @param loadfactor the load factor (table density) for * establishing the initial table size * @throws illegalargumentexception if the initial capacity of * elements is negative or the load factor is nonpositive * * @since 1.6 */ public concurrenthashmap(int initialcapacity, float loadfactor) { this(initialcapacity, loadfactor, 1); } /** * creates a new, empty map with an initial table size based on * the given number of elements ({@code initialcapacity}), table * density ({@code loadfactor}), and number of concurrently * updating threads ({@code concurrencylevel}). * * @param initialcapacity the initial capacity. the implementation * performs internal sizing to accommodate this many elements, * given the specified load factor. * @param loadfactor the load factor (table density) for * establishing the initial table size * @param concurrencylevel the estimated number of concurrently * updating threads. the implementation may use this value as * a sizing hint. * @throws illegalargumentexception if the initial capacity is * negative or the load factor or concurrencylevel are * nonpositive */ public concurrenthashmap(int initialcapacity, float loadfactor, int concurrencylevel) { if (!(loadfactor > 0.0f) || initialcapacity < 0 || concurrencylevel <= 0) throw new illegalargumentexception(); if (initialcapacity < concurrencylevel) // use at least as many bins initialcapacity = concurrencylevel; // as estimated threads long size = (long)(1.0 + (long)initialcapacity / loadfactor); int cap = (size >= (long)maximum_capacity) ? maximum_capacity : tablesizefor((int)size); this.sizectl = cap; }
jdk8中的concurrenthashmap源码中的put方法
/** * maps the specified key to the specified value in this table. * neither the key nor the value can be null. * * <p>the value can be retrieved by calling the {@code get} method * with a key that is equal to the original key. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key * @return the previous value associated with {@code key}, or * {@code null} if there was no mapping for {@code key} * @throws nullpointerexception if the specified key or value is null */ public v put(k key, v value) { return putval(key, value, false); } /** implementation for put and putifabsent */ final v putval(k key, v value, boolean onlyifabsent) { if (key == null || value == null) throw new nullpointerexception(); int hash = spread(key.hashcode()); //计算hash值 int bincount = 0; for (node<k,v>[] tab = table;;) { //自旋 node<k,v> f; int n, i, fh; if (tab == null || (n = tab.length) == 0) //table==null || table.length==0 tab = inittable(); //就inittable else if ((f = tabat(tab, i = (n - 1) & hash)) == null) { //若下标 i 处的元素为null if (castabat(tab, i, null, //直接用cas操作,i处的元素 new node<k,v>(hash, key, value, null))) break; // no lock when adding to empty bin 想emptybin中假如元素的时候,不需要加锁 } else if ((fh = f.hash) == moved) //若下标 i 处的元素不为null,且f.hash==moved moved为常量值-1 tab = helptransfer(tab, f); // else { //如果是一般的节点 v oldval = null; synchronized (f) { //当头部元素不为null,且不需要转换成树时,需要进行同步操作 if (tabat(tab, i) == f) { if (fh >= 0) { //若 链表头部hash值 >=0 bincount = 1; for (node<k,v> e = f;; ++bincount) { k ek; if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { //如果key相同 oldval = e.val; if (!onlyifabsent) //且不为absent e.val = value; //旧值覆盖新值 break; } node<k,v> pred = e; if ((e = e.next) == null), { //如果链表遍历完成,还没退出,说明没有相同的key存在,在尾部添加节点 pred.next = new node<k,v>(hash, key, value, null); break; } } } else if (f instanceof treebin) { //如果f是tree的节点 node<k,v> p; bincount = 2; if ((p = ((treebin<k,v>)f).puttreeval(hash, key, value)) != null) { oldval = p.val; if (!onlyifabsent) p.val = value; } } } } if (bincount != 0) { if (bincount >= treeify_threshold) treeifybin(tab, i); if (oldval != null) return oldval; break; } } } addcount(1l, bincount); return null; } /** * initializes table, using the size recorded in sizectl. *///通过cas抢sizectl,来抢占inittable的资格,其他线程自旋等待,直到table不为null private final node<k,v>[] inittable() { node<k,v>[] tab; int sc; while ((tab = table) == null || tab.length == 0) { if ((sc = sizectl) < 0) thread.yield(); // lost initialization race; just spin //线程让步,让其他线程优先执行 else if (u.compareandswapint(this, sizectl, sc, -1)) { try { if ((tab = table) == null || tab.length == 0) { int n = (sc > 0) ? sc : default_capacity; @suppresswarnings("unchecked") node<k,v>[] nt = (node<k,v>[])new node<?,?>[n]; //初始化数组 table = tab = nt; //将nt赋值给table sc = n - (n >>> 2); } } finally { sizectl = sc; } break; } } return tab; }
jdk8中的concurrenthashmap源码中的get方法
/** * returns the value to which the specified key is mapped, * or {@code null} if this map contains no mapping for the key. * * <p>more formally, if this map contains a mapping from a key * {@code k} to a value {@code v} such that {@code key.equals(k)}, * then this method returns {@code v}; otherwise it returns * {@code null}. (there can be at most one such mapping.) * * @throws nullpointerexception if the specified key is null */ public v get(object key) { node<k,v>[] tab; node<k,v> e, p; int n, eh; k ek; int h = spread(key.hashcode()); if ((tab = table) != null && (n = tab.length) > 0 && (e = tabat(tab, (n - 1) & h)) != null) { if ((eh = e.hash) == h) { if ((ek = e.key) == key || (ek != null && key.equals(ek))) return e.val; } else if (eh < 0) return (p = e.find(h, key)) != null ? p.val : null; while ((e = e.next) != null) { if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek)))) return e.val; } } return null; }
上一篇: 详解Vue源码之数据的代理访问
下一篇: 高仙芝是高丽人?真相是什么