001/** 002 * 003 * Copyright (c) 2014, the Railo Company Ltd. All rights reserved. 004 * 005 * This library is free software; you can redistribute it and/or 006 * modify it under the terms of the GNU Lesser General Public 007 * License as published by the Free Software Foundation; either 008 * version 2.1 of the License, or (at your option) any later version. 009 * 010 * This library is distributed in the hope that it will be useful, 011 * but WITHOUT ANY WARRANTY; without even the implied warranty of 012 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 013 * Lesser General Public License for more details. 014 * 015 * You should have received a copy of the GNU Lesser General Public 016 * License along with this library. If not, see <http://www.gnu.org/licenses/>. 017 * 018 **/ 019package lucee.commons.collection.concurrent; 020 021import java.io.IOException; 022import java.io.Serializable; 023import java.util.AbstractCollection; 024import java.util.AbstractMap; 025import java.util.AbstractSet; 026import java.util.Collection; 027import java.util.ConcurrentModificationException; 028import java.util.Enumeration; 029import java.util.Hashtable; 030import java.util.Iterator; 031import java.util.Map; 032import java.util.NoSuchElementException; 033import java.util.Set; 034import java.util.TreeMap; 035import java.util.concurrent.ConcurrentHashMap; 036import java.util.concurrent.ConcurrentMap; 037import java.util.concurrent.atomic.AtomicInteger; 038import java.util.concurrent.locks.ReentrantLock; 039 040import lucee.commons.collection.AbstractMapPro; 041import lucee.commons.collection.HashMapPro; 042import lucee.runtime.exp.PageException; 043import lucee.runtime.type.KeyImpl; 044 045/** 046 * <p>Concurrent hash map and linked list implementation of the 047 * <tt>ConcurrentMap</tt> interface, with predictable iteration order. 048 * This implementation differs from <tt>ConcurrentHashMap</tt> in that it 049 * maintains a doubly-linked list running through all of its entries. 050 * This linked list defines the iteration ordering, which is normally the 051 * order in which keys were inserted into the map (<i>insertion-order</i>). 052 * Note that insertion order is not affected if a key is <i>re-inserted</i> 053 * into the map. (A key <tt>k</tt> is reinserted into a map <tt>m</tt> if 054 * <tt>m.put(k, v)</tt> is invoked when <tt>m.containsKey(k)</tt> would 055 * return <tt>true</tt> immediately prior to the invocation.) 056 * 057 * <p>This implementation spares its clients from the unspecified, generally 058 * chaotic ordering provided by {@link ConcurrentHashMap} (and {@link Hashtable}), 059 * without incurring the increased cost associated with {@link TreeMap}. It 060 * can be used to produce a copy of a map that has the same order as the 061 * original, regardless of the original map's implementation: 062 * <pre> 063 * void foo(Map m) { 064 * Map copy = new ConcurrentLinkedHashMap(m); 065 * ... 066 * } 067 * </pre> 068 * This technique is particularly useful if a module takes a map on input, 069 * copies it, and later returns results whose order is determined by that of 070 * the copy. (Clients generally appreciate having things returned in the same 071 * order they were presented.) 072 * 073 * <p>A special {@link #ConcurrentLinkedHashMap(int,float,int, int,{@link EvictionPolicy}) constructor} 074 * is provided to create a concurrent linked hash map whose order of iteration 075 * is the order designated by the relevant eviction policy class. Invoking the 076 * <tt>put</tt> or <tt>get</tt> method results in an access to the corresponding 077 * entry (assuming it exists after the invocation completes). The <tt>putAll</tt> 078 * method generates one entry access for each mapping in the specified map, in the 079 * order that key-value mappings are provided by the specified map's entry set iterator. 080 * <i>No other methods generate entry accesses.</i> In particular, operations on 081 * collection-views do <i>not</i> affect the order of iteration of the backing 082 * map. 083 * 084 * <p>The {@link #removeEldestEntry(Map.Entry)} method may be overridden to 085 * impose a policy for removing stale mappings automatically when new mappings 086 * are added to the map. 087 * 088 * Performance is likely to be just slightly below that of <tt>ComcurrentHashMap</tt>, 089 * due to the added expense of maintaining the linked list, with one exception: 090 * Iteration over the collection-views of a <tt>ConcurrentLinkedHashMap</tt> requires 091 * time proportional to the <i>size</i> of the map, regardless of its capacity. 092 * Iteration over a <tt>ConcurrentHashMap</tt> is likely to be more expensive, 093 * requiring time proportional to its <i>capacity</i>. 094 * 095 * 096 * @author Justin Cater - Original code by Doug Lea 097 * @param <K> the type of keys maintained by this map 098 * @param <V> the type of mapped values 099 * 100 */ 101public class ConcurrentLinkedHashMapPro<K,V> extends AbstractMapPro<K, V> 102implements ConcurrentMap<K, V>, Serializable { 103 104 private static final long serialVersionUID = -6894959298396386516L; 105 106 /* 107 * The basic strategy is to subdivide the table among Segments, 108 * each of which itself is a concurrently readable hash table. 109 */ 110 111 /* ---------------- Constants -------------- */ 112 113 /** 114 * The default initial capacity for this table, 115 * used when not otherwise specified in a constructor. 116 */ 117 static final int DEFAULT_INITIAL_CAPACITY = 16; 118 119 /** 120 * The default load factor for this table, used when not 121 * otherwise specified in a constructor. 122 */ 123 static final float DEFAULT_LOAD_FACTOR = 0.75f; 124 125 /** 126 * The default concurrency level for this table, used when not 127 * otherwise specified in a constructor. 128 */ 129 static final int DEFAULT_CONCURRENCY_LEVEL = 16; 130 131 /** 132 * The maximum capacity, used if a higher value is implicitly 133 * specified by either of the constructors with arguments. MUST 134 * be a power of two <= 1<<30 to ensure that entries are indexable 135 * using ints. 136 */ 137 static final int MAXIMUM_CAPACITY = 1 << 30; 138 139 /** 140 * The maximum number of segments to allow; used to bound 141 * constructor arguments. 142 */ 143 static final int MAX_SEGMENTS = 1 << 16; // slightly conservative 144 145 /** 146 * Number of unsynchronized retries in size and containsValue 147 * methods before resorting to locking. This is used to avoid 148 * unbounded retries if tables undergo continuous modification 149 * which would make it impossible to obtain an accurate result. 150 */ 151 static final int RETRIES_BEFORE_LOCK = 2; 152 153 /** 154 * The maxSize attribute defines the maximum number of name/value 155 * pairs the map will hold. The Integer.MAX_VALUE mark disables this upper bound 156 * limit. 157 */ 158 static final int UNLIMITED_SIZE = Integer.MAX_VALUE; 159 160 /* ---------------- Fields -------------- */ 161 162 /** 163 * Mask value for indexing into segments. The upper bits of a 164 * key's hash code are used to choose the segment. 165 */ 166 final int segmentMask; 167 168 /** 169 * Shift value for indexing within segments. 170 */ 171 final int segmentShift; 172 173 /** 174 * The segments, each of which is a specialized hash table 175 */ 176 final Segment<K,V>[] segments; 177 178 /** 179 * The eviction policy to be used 180 */ 181 final EvictionPolicy evictionPolicy; 182 183 /** 184 * The maxSize attribute defines the maximum number of name/value 185 * pairs the map will hold. The UNLIMITED_SIZE mark disables 186 * this upper bound limit. 187 */ 188 final int maxSize; 189 190 /** 191 * The head of the doubly linked list. 192 */ 193 transient HashEntry<K,V> header; 194 195 /** 196 * The lock for atomic access to the doubly linked list. 197 */ 198 transient ReentrantLock modifyListLock; 199 200 transient Set<K> keySet; 201 transient Set<Map.Entry<K,V>> entrySet; 202 transient Collection<V> values; 203 204 /* ---------------- Small Utilities -------------- */ 205 206 /** 207 * Applies a supplemental hash function to a given hashCode, which 208 * defends against poor quality hash functions. This is critical 209 * because ConcurrentHashMap uses power-of-two length hash tables, 210 * that otherwise encounter collisions for hashCodes that do not 211 * differ in lower or upper bits. 212 */ 213 private static int hash(Object k) { 214 if(k instanceof KeyImpl) return ((KeyImpl) k).wangJenkinsHash(); 215 // Spread bits to regularize both segment and index locations, 216 // using variant of single-word Wang/Jenkins hash. 217 int h=k.hashCode(); 218 219 h += (h << 15) ^ 0xffffcd7d; 220 h ^= (h >>> 10); 221 h += (h << 3); 222 h ^= (h >>> 6); 223 h += (h << 2) + (h << 14); 224 return h ^ (h >>> 16); 225 } 226 227 /** 228 * Returns the segment that should be used for key with given hash 229 * @param hash the hash code for the key 230 * @return the segment 231 */ 232 final Segment<K,V> segmentFor(int hash) { 233 return segments[(hash >>> segmentShift) & segmentMask]; 234 } 235 236 /* ---------------- Inner Classes -------------- */ 237 238 /** 239 * ConcurrentHashMap list entry. Note that this is never exported 240 * out as a user-visible Map.Entry. 241 * 242 * Because the value field is volatile, not final, it is legal wrt 243 * the Java Memory Model for an unsynchronized reader to see null 244 * instead of initial value when read via a data race. Although a 245 * reordering leading to this is not likely to ever actually 246 * occur, the Segment.readValueUnderLock method is used as a 247 * backup in case a null (pre-initialized) value is ever seen in 248 * an unsynchronized access method. 249 */ 250 static final class HashEntry<K,V> implements Entry<K,V> { 251 final K key; 252 final int hash; 253 volatile V value; 254 HashEntry<K,V> next; 255 HashEntry<K,V> after; 256 HashEntry<K,V> before; 257 long accessCount; 258 final long creationTime; 259 long lastAccessedTime; 260 ReentrantLock modifyListLock; 261 AtomicInteger cloneAllFlag; 262 263 HashEntry(K key, int hash, HashEntry<K,V> next, V value, long accessCount, long creationTime, long lastAccessedTime) { 264 this.key = key; 265 this.hash = hash; 266 this.next = next; 267 this.value = value; 268 this.accessCount = accessCount; 269 this.creationTime = creationTime; 270 this.lastAccessedTime = lastAccessedTime; 271 } 272 273 @SuppressWarnings("unchecked") 274 static final <K,V> HashEntry<K,V>[] newArray(int i) { 275 return new HashEntry[i]; 276 } 277 278 /** 279 * Removes this entry from the linked list. 280 */ 281 public void remove() { 282 before.after = after; 283 after.before = before; 284 } 285 286 /** 287 * Inserts this entry before the specified existing entry in the list. 288 */ 289 public void addBefore(HashEntry<K,V> existingEntry) { 290 after = existingEntry; 291 before = existingEntry.before; 292 before.after = this; 293 after.before = this; 294 } 295 296 /** 297 * This method is invoked by the superclass whenever the value 298 * of a pre-existing entry is read by Map.get or modified by Map.set. 299 * If the enclosing Map is access-ordered, it moves the entry 300 * to the end of the list; otherwise, it does nothing. 301 */ 302 void recordAccess(HashEntry<K,V> header, EvictionPolicy evictionPolicy) { 303 waitForModifyPermition(header); 304 remove(); 305 addBefore((HashEntry<K, V>)evictionPolicy.recordAccess(header, this)); 306 accessCount++; 307 lastAccessedTime = System.currentTimeMillis(); 308 grandModifyAndCloneAllPermition(header); 309 } 310 311 /** 312 * This method is invoked by the superclass whenever a new 313 * entry is inserted by Map.put 314 */ 315 void recordInsertion(HashEntry<K,V> header, EvictionPolicy evictionPolicy) { 316 waitForModifyPermition(header); 317 addBefore((HashEntry<K, V>)evictionPolicy.recordInsertion(header, this)); 318 grandModifyAndCloneAllPermition(header); 319 } 320 321 void recordRemoval(HashEntry<K,V> header) { 322 waitForModifyPermition(header); 323 remove(); 324 grandModifyAndCloneAllPermition(header); 325 } 326 327 public HashEntry<K,V> clone(HashEntry<K,V> next, HashEntry<K,V> header) { 328 waitForModifyPermition(header); 329 HashEntry<K,V> nextEntry = after; 330 remove(); 331 HashEntry<K,V> theClone = new HashEntry<K, V>(key, hash, next, value, accessCount, creationTime, lastAccessedTime); 332 theClone.addBefore(nextEntry); 333 grandModifyAndCloneAllPermition(header); 334 return theClone; 335 } 336 337 public HashEntry<K,V> cloneAll(HashEntry<K,V> header) { 338 waitForCloneAllPermition(header); 339 HashEntry<K,V> rootClone = new HashEntry<K, V>(key, hash, next, value, accessCount, creationTime, lastAccessedTime); 340 rootClone.before = rootClone.after = rootClone; 341 342 HashEntry<K,V> pointer = after; 343 while(pointer != header) { 344 HashEntry<K,V> nextClone = new HashEntry<K, V>(pointer.key, pointer.hash, pointer.next, pointer.value, pointer.accessCount, pointer.creationTime, pointer.lastAccessedTime); 345 nextClone.addBefore(rootClone); 346 pointer = pointer.after; 347 } 348 grandModifyPermition(header); 349 return rootClone; 350 } 351 352 private void waitForModifyPermition(HashEntry<K,V> header) { 353 while(!checkForModifyPermition(header)) { 354 try { 355 Thread.sleep(0,1); 356 } catch (InterruptedException e) {} 357 } 358 } 359 360 private boolean checkForModifyPermition(HashEntry<K,V> header) { 361 if(header.cloneAllFlag.getAndDecrement() <= 0) { 362 header.modifyListLock.lock(); 363 return true; 364 } 365 header.cloneAllFlag.getAndIncrement(); 366 return false; 367 } 368 369 private void grandModifyAndCloneAllPermition(HashEntry<K,V> header) { 370 header.modifyListLock.unlock(); 371 header.cloneAllFlag.getAndIncrement(); 372 } 373 374 private void waitForCloneAllPermition(HashEntry<K,V> header) { 375 while(!checkForCloneAllPermition(header)) { 376 try { 377 Thread.sleep(0,1); 378 } catch (InterruptedException e) {} 379 } 380 } 381 382 private boolean checkForCloneAllPermition(HashEntry<K,V> header) { 383 if(header.cloneAllFlag.getAndIncrement() >= 0) 384 return true; 385 grandModifyPermition(header); 386 return false; 387 } 388 389 private void grandModifyPermition(HashEntry<K,V> header) { 390 header.cloneAllFlag.getAndDecrement(); 391 } 392 393 public K getKey() { 394 return key; 395 } 396 397 public V getValue() { 398 return value; 399 } 400 401 public V setValue(V value) { 402 V oldValue = this.value; 403 this.value = value; 404 return oldValue; 405 } 406 407 public Entry<K, V> getAfter() { 408 return after; 409 } 410 411 public Entry<K, V> getBefore() { 412 return before; 413 } 414 415 public long getAccessCount() { 416 return accessCount; 417 } 418 419 public long getCreationTime() { 420 return creationTime; 421 } 422 423 public long getLastAccessTime() { 424 return lastAccessedTime; 425 } 426 427 } 428 429 /** 430 * Segments are specialized versions of hash tables. This 431 * subclasses from ReentrantLock opportunistically, just to 432 * simplify some locking and avoid separate construction. 433 */ 434 static final class Segment<K,V> extends ReentrantLock implements Serializable { 435 /* 436 * Segments maintain a table of entry lists that are ALWAYS 437 * kept in a consistent state, so can be read without locking. 438 * Next fields of nodes are immutable (final). All list 439 * additions are performed at the front of each bin. This 440 * makes it easy to check changes, and also fast to traverse. 441 * When nodes would otherwise be changed, new nodes are 442 * created to replace them. This works well for hash tables 443 * since the bin lists tend to be short. (The average length 444 * is less than two for the default load factor threshold.) 445 * 446 * Read operations can thus proceed without locking, but rely 447 * on selected uses of volatiles to ensure that completed 448 * write operations performed by other threads are 449 * noticed. For most purposes, the "count" field, tracking the 450 * number of elements, serves as that volatile variable 451 * ensuring visibility. This is convenient because this field 452 * needs to be read in many read operations anyway: 453 * 454 * - All (unsynchronized) read operations must first read the 455 * "count" field, and should not look at table entries if 456 * it is 0. 457 * 458 * - All (synchronized) write operations should write to 459 * the "count" field after structurally changing any bin. 460 * The operations must not take any action that could even 461 * momentarily cause a concurrent read operation to see 462 * inconsistent data. This is made easier by the nature of 463 * the read operations in Map. For example, no operation 464 * can reveal that the table has grown but the threshold 465 * has not yet been updated, so there are no atomicity 466 * requirements for this with respect to reads. 467 * 468 * As a guide, all critical volatile reads and writes to the 469 * count field are marked in code comments. 470 */ 471 472 private static final long serialVersionUID = 2249069246763182397L; 473 474 /** 475 * The number of elements in this segment's region. 476 */ 477 transient volatile int count; 478 479 /** 480 * Number of updates that alter the size of the table. This is 481 * used during bulk-read methods to make sure they see a 482 * consistent snapshot: If modCounts change during a traversal 483 * of segments computing size or checking containsValue, then 484 * we might have an inconsistent view of state so (usually) 485 * must retry. 486 */ 487 transient int modCount; 488 489 /** 490 * The table is rehashed when its size exceeds this threshold. 491 * (The value of this field is always <tt>(int)(capacity * 492 * loadFactor)</tt>.) 493 */ 494 transient int threshold; 495 496 /** 497 * The per-segment table. 498 */ 499 transient volatile HashEntry<K,V>[] table; 500 501 /** 502 * The load factor for the hash table. Even though this value 503 * is same for all segments, it is replicated to avoid needing 504 * links to outer object. 505 * 506 * @serial 507 */ 508 final float loadFactor; 509 510 /** 511 * The eviction policy for this linked hash map. Even though this value 512 * is same for all segments, it is replicated to avoid needing 513 * links to outer object. 514 * 515 * @serial 516 */ 517 final EvictionPolicy evictionPolicy; 518 519 Segment(int initialCapacity, float lf, EvictionPolicy ep) { 520 loadFactor = lf; 521 evictionPolicy = ep; 522 setTable(HashEntry.<K,V>newArray(initialCapacity)); 523 } 524 525 @SuppressWarnings("unchecked") 526 static final <K,V> Segment<K,V>[] newArray(int i) { 527 return new Segment[i]; 528 } 529 530 /** 531 * Sets table to new HashEntry array. 532 * Call only while holding lock or in constructor. 533 */ 534 void setTable(HashEntry<K,V>[] newTable) { 535 threshold = (int)(newTable.length * loadFactor); 536 table = newTable; 537 } 538 539 /** 540 * Returns properly casted first entry of bin for given hash. 541 */ 542 HashEntry<K,V> getFirst(int hash) { 543 HashEntry<K,V>[] tab = table; 544 return tab[hash & (tab.length - 1)]; 545 } 546 547 /** 548 * Reads value field of an entry under lock. Called if value 549 * field ever appears to be null. This is possible only if a 550 * compiler happens to reorder a HashEntry initialization with 551 * its table assignment, which is legal under memory model 552 * but is not known to ever occur. 553 */ 554 V readValueUnderLock(HashEntry<K,V> e) { 555 lock(); 556 try { 557 return e.value; 558 } finally { 559 unlock(); 560 } 561 } 562 563 /* Specialized implementations of map methods */ 564 565 V get(Object key, int hash, HashEntry<K, V> header, V defaultValue) { 566 if (count != 0) { // read-volatile 567 HashEntry<K,V> e = getFirst(hash); 568 while (e != null) { 569 if (e.hash == hash && key.equals(e.key)) { 570 V v = e.value; 571 if (v != null) { 572 if(evictionPolicy.accessOrder()) 573 e.recordAccess(header, evictionPolicy); 574 return v; 575 } 576 return readValueUnderLock(e); // recheck 577 } 578 e = e.next; 579 } 580 } 581 return defaultValue; 582 } 583 584 boolean containsKey(Object key, int hash) { 585 if (count != 0) { // read-volatile 586 HashEntry<K,V> e = getFirst(hash); 587 while (e != null) { 588 if (e.hash == hash && key.equals(e.key)) 589 return true; 590 e = e.next; 591 } 592 } 593 return false; 594 } 595 596 boolean containsValue(Object value) { 597 if (count != 0) { // read-volatile 598 HashEntry<K,V>[] tab = table; 599 int len = tab.length; 600 for (int i = 0 ; i < len; i++) { 601 for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) { 602 V v = e.value; 603 if (v == null) // recheck 604 v = readValueUnderLock(e); 605 if (value.equals(v)) 606 return true; 607 } 608 } 609 } 610 return false; 611 } 612 613 boolean replace(K key, int hash, V oldValue, V newValue) { 614 lock(); 615 try { 616 HashEntry<K,V> e = getFirst(hash); 617 while (e != null && (e.hash != hash || !key.equals(e.key))) 618 e = e.next; 619 620 boolean replaced = false; 621 if (e != null && oldValue.equals(e.value)) { 622 replaced = true; 623 e.value = newValue; 624 } 625 return replaced; 626 } finally { 627 unlock(); 628 } 629 } 630 631 V replace(K key, int hash, V newValue) { 632 lock(); 633 try { 634 HashEntry<K,V> e = getFirst(hash); 635 while (e != null && (e.hash != hash || !key.equals(e.key))) 636 e = e.next; 637 638 V oldValue = null; 639 if (e != null) { 640 oldValue = e.value; 641 e.value = newValue; 642 } 643 return oldValue; 644 } finally { 645 unlock(); 646 } 647 } 648 649 650 V put(K key, int hash, V value, boolean onlyIfAbsent, HashEntry<K, V> header) { 651 lock(); 652 try { 653 654 int c = count; 655 if (c++ > threshold) // ensure capacity 656 rehash(header); 657 HashEntry<K,V>[] tab = table; 658 int index = hash & (tab.length - 1); 659 HashEntry<K,V> first = tab[index]; 660 HashEntry<K,V> e = first; 661 while (e != null && (e.hash != hash || !key.equals(e.key))) 662 e = e.next; 663 664 V oldValue; 665 if (e != null) { 666 oldValue = e.value; 667 if (!onlyIfAbsent) 668 e.value = value; 669 } 670 else { 671 oldValue = null; 672 ++modCount; 673 long now = System.currentTimeMillis(); 674 e = new HashEntry<K,V>(key, hash, first, value, 1, now, now); 675 if(evictionPolicy.insertionOrder()) 676 e.recordInsertion(header, evictionPolicy); 677 else 678 e.addBefore(header); 679 tab[index] = e; 680 count = c; // write-volatile 681 } 682 return oldValue; 683 } finally { 684 unlock(); 685 } 686 } 687 688 void rehash(HashEntry<K, V> header) { 689 HashEntry<K,V>[] oldTable = table; 690 int oldCapacity = oldTable.length; 691 if (oldCapacity >= MAXIMUM_CAPACITY) 692 return; 693 694 /* 695 * Reclassify nodes in each list to new Map. Because we are 696 * using power-of-two expansion, the elements from each bin 697 * must either stay at same index, or move with a power of two 698 * offset. We eliminate unnecessary node creation by catching 699 * cases where old nodes can be reused because their next 700 * fields won't change. Statistically, at the default 701 * threshold, only about one-sixth of them need cloning when 702 * a table doubles. The nodes they replace will be garbage 703 * collectable as soon as they are no longer referenced by any 704 * reader thread that may be in the midst of traversing table 705 * right now. 706 */ 707 708 HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1); 709 threshold = (int)(newTable.length * loadFactor); 710 int sizeMask = newTable.length - 1; 711 for (int i = 0; i < oldCapacity ; i++) { 712 // We need to guarantee that any existing reads of old Map can 713 // proceed. So we cannot yet null out each bin. 714 HashEntry<K,V> e = oldTable[i]; 715 716 if (e != null) { 717 HashEntry<K,V> next = e.next; 718 int idx = e.hash & sizeMask; 719 720 // Single node on list 721 if (next == null) 722 newTable[idx] = e; 723 724 else { 725 // Reuse trailing consecutive sequence at same slot 726 HashEntry<K,V> lastRun = e; 727 int lastIdx = idx; 728 for (HashEntry<K,V> last = next; 729 last != null; 730 last = last.next) { 731 int k = last.hash & sizeMask; 732 if (k != lastIdx) { 733 lastIdx = k; 734 lastRun = last; 735 } 736 } 737 newTable[lastIdx] = lastRun; 738 739 // Clone all remaining nodes 740 for (HashEntry<K,V> p = e; p != lastRun; p = p.next) { 741 int k = p.hash & sizeMask; 742 HashEntry<K,V> n = newTable[k]; 743 newTable[k] = p.clone(n, header); 744 } 745 } 746 } 747 } 748 table = newTable; 749 } 750 751 /** 752 * Remove; match on key only if value null, else match both. 753 */ 754 V remove(Object key, int hash, Object value, HashEntry<K, V> header, V defaultValue) { 755 lock(); 756 try { 757 int c = count - 1; 758 HashEntry<K,V>[] tab = table; 759 int index = hash & (tab.length - 1); 760 HashEntry<K,V> first = tab[index]; 761 HashEntry<K,V> e = first; 762 while (e != null && (e.hash != hash || !key.equals(e.key))) 763 e = e.next; 764 765 V old = defaultValue; 766 if (e != null) { 767 if (value == null || value.equals(e.value)) { 768 old = e.value; 769 e.recordRemoval(header); 770 // All entries following removed node can stay 771 // in list, but all preceding ones need to be 772 // cloned. 773 ++modCount; 774 HashEntry<K,V> newFirst = e.next; 775 for (HashEntry<K,V> p = first; p != e; p = p.next) 776 newFirst = p.clone(newFirst, header); 777 tab[index] = newFirst; 778 count = c; // write-volatile 779 } 780 } 781 return old; 782 } finally { 783 unlock(); 784 } 785 } 786 787 /** 788 * Remove; match on key only if value null, else match both. 789 * @throws PageException 790 */ 791 V removeE(Map<K,V> m,Object key, int hash, Object value, HashEntry<K, V> header) throws PageException { 792 lock(); 793 try { 794 int c = count - 1; 795 HashEntry<K,V>[] tab = table; 796 int index = hash & (tab.length - 1); 797 HashEntry<K,V> first = tab[index]; 798 HashEntry<K,V> e = first; 799 while (e != null && (e.hash != hash || !key.equals(e.key))) 800 e = e.next; 801 802 if (e != null) { 803 if (value == null || value.equals(e.value)) { 804 e.recordRemoval(header); 805 // All entries following removed node can stay 806 // in list, but all preceding ones need to be 807 // cloned. 808 ++modCount; 809 HashEntry<K,V> newFirst = e.next; 810 for (HashEntry<K,V> p = first; p != e; p = p.next) 811 newFirst = p.clone(newFirst, header); 812 tab[index] = newFirst; 813 count = c; // write-volatile 814 return e.value; 815 } 816 } 817 throw HashMapPro.invalidKey(m, key, true); 818 } finally { 819 unlock(); 820 } 821 } 822 823 void clear(HashEntry<K,V> header) { 824 if (count != 0) { 825 lock(); 826 try { 827 HashEntry<K,V>[] tab = table; 828 for (int i = 0; i < tab.length ; i++) { 829 if(tab[i] != null){ 830 tab[i].recordRemoval(header); 831 tab[i] = null; 832 } 833 } 834 ++modCount; 835 count = 0; // write-volatile 836 } finally { 837 unlock(); 838 } 839 } 840 } 841 } 842 843 844 845 /* ---------------- Public operations -------------- */ 846 847 /** 848 * Creates a new, empty map with the specified initial 849 * capacity, load factor, concurrency level, max size and eviction policy. 850 * 851 * @param initialCapacity the initial capacity. The implementation 852 * performs internal sizing to accommodate this many elements. 853 * @param loadFactor the load factor threshold, used to control resizing. 854 * Resizing may be performed when the average number of elements per 855 * bin exceeds this threshold. 856 * @param concurrencyLevel the estimated number of concurrently 857 * updating threads. The implementation performs internal sizing 858 * to try to accommodate this many threads. 859 * @param maxSize the maximum number of name/value pairs this map 860 * will hold. 861 * @param evictionPolicy the eviction policy to be used 862 * @throws IllegalArgumentException if the initial capacity is 863 * negative or the load factor or concurrencyLevel are 864 * nonpositive. 865 */ 866 public ConcurrentLinkedHashMapPro(int initialCapacity, 867 float loadFactor, int concurrencyLevel, int maxSize, EvictionPolicy evictionPolicy) { 868 if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) 869 throw new IllegalArgumentException(); 870 871 this.maxSize = maxSize; 872 this.evictionPolicy = evictionPolicy; 873 874 if (concurrencyLevel > MAX_SEGMENTS) 875 concurrencyLevel = MAX_SEGMENTS; 876 877 // Find power-of-two sizes best matching arguments 878 int sshift = 0; 879 int ssize = 1; 880 while (ssize < concurrencyLevel) { 881 ++sshift; 882 ssize <<= 1; 883 } 884 segmentShift = 32 - sshift; 885 segmentMask = ssize - 1; 886 this.segments = Segment.newArray(ssize); 887 888 if (initialCapacity > MAXIMUM_CAPACITY) 889 initialCapacity = MAXIMUM_CAPACITY; 890 int c = initialCapacity / ssize; 891 if (c * ssize < initialCapacity) 892 ++c; 893 int cap = 1; 894 while (cap < c) 895 cap <<= 1; 896 897 for (int i = 0; i < this.segments.length; ++i) 898 this.segments[i] = new Segment<K,V>(cap, loadFactor, evictionPolicy); 899 900 header = new HashEntry<K,V>(null, -1, null, null, -1, -1, -1); 901 header.before = header.after = header; 902 header.modifyListLock = new ReentrantLock(); 903 header.cloneAllFlag = new AtomicInteger(); 904 } 905 906 /** 907 * Creates a new, empty map with the specified initial capacity 908 * and load factor and with the default concurrencyLevel (16). 909 * 910 * @param initialCapacity The implementation performs internal 911 * sizing to accommodate this many elements. 912 * @param loadFactor the load factor threshold, used to control resizing. 913 * Resizing may be performed when the average number of elements per 914 * bin exceeds this threshold. 915 * @throws IllegalArgumentException if the initial capacity of 916 * elements is negative or the load factor is nonpositive 917 * 918 * @since 1.6 919 */ 920 public ConcurrentLinkedHashMapPro(int initialCapacity, float loadFactor) { 921 this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL, UNLIMITED_SIZE, new FIFOPolicy()); 922 } 923 924 /** 925 * Creates a new, empty map with the specified initial capacity, 926 * and with default load factor (0.75) and concurrencyLevel (16). 927 * 928 * @param initialCapacity the initial capacity. The implementation 929 * performs internal sizing to accommodate this many elements. 930 * @throws IllegalArgumentException if the initial capacity of 931 * elements is negative. 932 */ 933 public ConcurrentLinkedHashMapPro(int initialCapacity) { 934 this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, UNLIMITED_SIZE, new FIFOPolicy()); 935 } 936 937 /** 938 * Creates a new, empty map with a default initial capacity (16), 939 * load factor (0.75) and concurrencyLevel (16). 940 */ 941 public ConcurrentLinkedHashMapPro() { 942 this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, UNLIMITED_SIZE, new FIFOPolicy()); 943 } 944 945 /** 946 * Creates a new map with the same mappings as the given map. 947 * The map is created with a capacity of 1.5 times the number 948 * of mappings in the given map or 16 (whichever is greater), 949 * and a default load factor (0.75) and concurrencyLevel (16). 950 * 951 * @param m the map 952 */ 953 public ConcurrentLinkedHashMapPro(Map<? extends K, ? extends V> m) { 954 this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, 955 DEFAULT_INITIAL_CAPACITY), 956 DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, UNLIMITED_SIZE, new FIFOPolicy()); 957 putAll(m); 958 } 959 960 /** 961 * Returns <tt>true</tt> if this map contains no key-value mappings. 962 * 963 * @return <tt>true</tt> if this map contains no key-value mappings 964 */ 965 public boolean isEmpty() { 966 final Segment<K,V>[] segments = this.segments; 967 /* 968 * We keep track of per-segment modCounts to avoid ABA 969 * problems in which an element in one segment was added and 970 * in another removed during traversal, in which case the 971 * table was never actually empty at any point. Note the 972 * similar use of modCounts in the size() and containsValue() 973 * methods, which are the only other methods also susceptible 974 * to ABA problems. 975 */ 976 int[] mc = new int[segments.length]; 977 int mcsum = 0; 978 for (int i = 0; i < segments.length; ++i) { 979 if (segments[i].count != 0) 980 return false; 981 mcsum += mc[i] = segments[i].modCount; 982 } 983 // If mcsum happens to be zero, then we know we got a snapshot 984 // before any modifications at all were made. This is 985 // probably common enough to bother tracking. 986 if (mcsum != 0) { 987 for (int i = 0; i < segments.length; ++i) { 988 if (segments[i].count != 0 || 989 mc[i] != segments[i].modCount) 990 return false; 991 } 992 } 993 return true; 994 } 995 996 /** 997 * Returns the number of key-value mappings in this map. If the 998 * map contains more than <tt>Integer.MAX_VALUE</tt> elements, returns 999 * <tt>Integer.MAX_VALUE</tt>. 1000 * 1001 * @return the number of key-value mappings in this map 1002 */ 1003 public int size() { 1004 final Segment<K,V>[] segments = this.segments; 1005 long sum = 0; 1006 long check = 0; 1007 int[] mc = new int[segments.length]; 1008 // Try a few times to get accurate count. On failure due to 1009 // continuous async changes in table, resort to locking. 1010 for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) { 1011 check = 0; 1012 sum = 0; 1013 int mcsum = 0; 1014 for (int i = 0; i < segments.length; ++i) { 1015 sum += segments[i].count; 1016 mcsum += mc[i] = segments[i].modCount; 1017 } 1018 if (mcsum != 0) { 1019 for (int i = 0; i < segments.length; ++i) { 1020 check += segments[i].count; 1021 if (mc[i] != segments[i].modCount) { 1022 check = -1; // force retry 1023 break; 1024 } 1025 } 1026 } 1027 if (check == sum) 1028 break; 1029 } 1030 if (check != sum) { // Resort to locking all segments 1031 sum = 0; 1032 for (int i = 0; i < segments.length; ++i) 1033 segments[i].lock(); 1034 for (int i = 0; i < segments.length; ++i) 1035 sum += segments[i].count; 1036 for (int i = 0; i < segments.length; ++i) 1037 segments[i].unlock(); 1038 } 1039 if (sum > Integer.MAX_VALUE) 1040 return Integer.MAX_VALUE; 1041 return (int)sum; 1042 } 1043 1044 /** 1045 * Returns the value to which the specified key is mapped, 1046 * or {@code null} if this map contains no mapping for the key. 1047 * 1048 * <p>More formally, if this map contains a mapping from a key 1049 * {@code k} to a value {@code v} such that {@code key.equals(k)}, 1050 * then this method returns {@code v}; otherwise it returns 1051 * {@code null}. (There can be at most one such mapping.) 1052 * 1053 * @throws NullPointerException if the specified key is null 1054 */ 1055 public V get(Object key) { 1056 int hash = hash(key); 1057 return segmentFor(hash).get(key, hash, header,null); 1058 } 1059 1060 1061 @Override 1062 public V g(K key) throws PageException { 1063 int hash = hash(key); 1064 Segment<K, V> seg = segmentFor(hash); 1065 1066 if (seg.count != 0) { // read-volatile 1067 HashEntry<K,V> e = seg.getFirst(hash); 1068 while (e != null) { 1069 if (e.hash == hash && key.equals(e.key)) { 1070 V v = e.value; 1071 if (v != null) { 1072 if(evictionPolicy.accessOrder()) 1073 e.recordAccess(header, evictionPolicy); 1074 return v; 1075 } 1076 return seg.readValueUnderLock(e); // recheck 1077 } 1078 e = e.next; 1079 } 1080 } 1081 throw HashMapPro.invalidKey(this, key,false); 1082 } 1083 1084 @Override 1085 public V g(K key, V defaultValue) { 1086 int hash = hash(key); 1087 return segmentFor(hash).get(key, hash, header,defaultValue); 1088 } 1089 1090 /** 1091 * Tests if the specified object is a key in this table. 1092 * 1093 * @param key possible key 1094 * @return <tt>true</tt> if and only if the specified object 1095 * is a key in this table, as determined by the 1096 * <tt>equals</tt> method; <tt>false</tt> otherwise. 1097 * @throws NullPointerException if the specified key is null 1098 */ 1099 public boolean containsKey(Object key) { 1100 int hash = hash(key); 1101 return segmentFor(hash).containsKey(key, hash); 1102 } 1103 1104 /** 1105 * Returns <tt>true</tt> if this map maps one or more keys to the 1106 * specified value. Note: This method requires a full internal 1107 * traversal of the hash table, and so is much slower than 1108 * method <tt>containsKey</tt>. 1109 * 1110 * @param value value whose presence in this map is to be tested 1111 * @return <tt>true</tt> if this map maps one or more keys to the 1112 * specified value 1113 * @throws NullPointerException if the specified value is null 1114 */ 1115 public boolean containsValue(Object value) { 1116 if (value == null) 1117 throw new NullPointerException(); 1118 1119 // See explanation of modCount use above 1120 1121 final Segment<K,V>[] segments = this.segments; 1122 int[] mc = new int[segments.length]; 1123 1124 // Try a few times without locking 1125 for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) { 1126 int mcsum = 0; 1127 for (int i = 0; i < segments.length; ++i) { 1128 mcsum += mc[i] = segments[i].modCount; 1129 if (segments[i].containsValue(value)) 1130 return true; 1131 } 1132 boolean cleanSweep = true; 1133 if (mcsum != 0) { 1134 for (int i = 0; i < segments.length; ++i) { 1135 if (mc[i] != segments[i].modCount) { 1136 cleanSweep = false; 1137 break; 1138 } 1139 } 1140 } 1141 if (cleanSweep) 1142 return false; 1143 } 1144 // Resort to locking all segments 1145 for (int i = 0; i < segments.length; ++i) 1146 segments[i].lock(); 1147 boolean found = false; 1148 try { 1149 for (int i = 0; i < segments.length; ++i) { 1150 if (segments[i].containsValue(value)) { 1151 found = true; 1152 break; 1153 } 1154 } 1155 } finally { 1156 for (int i = 0; i < segments.length; ++i) 1157 segments[i].unlock(); 1158 } 1159 return found; 1160 } 1161 1162 /** 1163 * Legacy method testing if some key maps into the specified value 1164 * in this table. This method is identical in functionality to 1165 * {@link #containsValue}, and exists solely to ensure 1166 * full compatibility with class {@link java.util.Hashtable}, 1167 * which supported this method prior to introduction of the 1168 * Java Collections framework. 1169 1170 * @param value a value to search for 1171 * @return <tt>true</tt> if and only if some key maps to the 1172 * <tt>value</tt> argument in this table as 1173 * determined by the <tt>equals</tt> method; 1174 * <tt>false</tt> otherwise 1175 * @throws NullPointerException if the specified value is null 1176 */ 1177 public boolean contains(Object value) { 1178 return containsValue(value); 1179 } 1180 1181 /** 1182 * Maps the specified key to the specified value in this table. 1183 * Neither the key nor the value can be null. 1184 * 1185 * <p> The value can be retrieved by calling the <tt>get</tt> method 1186 * with a key that is equal to the original key. 1187 * 1188 * @param key key with which the specified value is to be associated 1189 * @param value value to be associated with the specified key 1190 * @return the previous value associated with <tt>key</tt>, or 1191 * <tt>null</tt> if there was no mapping for <tt>key</tt> 1192 * @throws NullPointerException if the specified key or value is null 1193 */ 1194 public V put(K key, V value) { 1195 1196 HashEntry<K, V> evictEntry = (HashEntry<K, V>) evictionPolicy.evictElement(header); 1197 if(evictEntry != null && size() >= maxSize) 1198 segmentFor(evictEntry.hash).remove(evictEntry.key, evictEntry.hash, evictEntry.value, header,null); 1199 1200 int hash = hash(key); 1201 return segmentFor(hash).put(key, hash, value, false, header); 1202 } 1203 1204 /** 1205 * {@inheritDoc} 1206 * 1207 * @return the previous value associated with the specified key, 1208 * or <tt>null</tt> if there was no mapping for the key 1209 * @throws NullPointerException if the specified key or value is null 1210 */ 1211 public V putIfAbsent(K key, V value) { 1212 if (value == null) 1213 throw new NullPointerException(); 1214 1215 HashEntry<K, V> evictEntry = (HashEntry<K, V>) evictionPolicy.evictElement(header); 1216 if(evictEntry != null && size() >= maxSize) 1217 segmentFor(evictEntry.hash).remove(evictEntry.key, evictEntry.hash, evictEntry.value, header,null); 1218 1219 int hash = hash(key); 1220 return segmentFor(hash).put(key, hash, value, true, header); 1221 } 1222 1223 /** 1224 * Copies all of the mappings from the specified map to this one. 1225 * These mappings replace any mappings that this map had for any of the 1226 * keys currently in the specified map. 1227 * 1228 * @param m mappings to be stored in this map 1229 */ 1230 public void putAll(Map<? extends K, ? extends V> m) { 1231 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1232 put(e.getKey(), e.getValue()); 1233 } 1234 1235 /** 1236 * Removes the key (and its corresponding value) from this map. 1237 * This method does nothing if the key is not in the map. 1238 * 1239 * @param key the key that needs to be removed 1240 * @return the previous value associated with <tt>key</tt>, or 1241 * <tt>null</tt> if there was no mapping for <tt>key</tt> 1242 * @throws NullPointerException if the specified key is null 1243 */ 1244 public V remove(Object key) { 1245 int hash = hash(key); 1246 return segmentFor(hash).remove(key, hash, null, header,null); 1247 } 1248 1249 1250 @Override 1251 public V r(K key) throws PageException { 1252 int hash = hash(key); 1253 return segmentFor(hash).removeE(this,key, hash, null, header); 1254 } 1255 1256 @Override 1257 public V r(K key, V defaultValue) { 1258 int hash = hash(key); 1259 return segmentFor(hash).remove(key, hash, null, header,defaultValue); 1260 } 1261 1262 /** 1263 * {@inheritDoc} 1264 * 1265 * @throws NullPointerException if the specified key is null 1266 */ 1267 public boolean remove(Object key, Object value) { 1268 int hash = hash(key); 1269 if (value == null) 1270 return false; 1271 return segmentFor(hash).remove(key, hash, value, header,null) != null; 1272 } 1273 1274 /** 1275 * {@inheritDoc} 1276 * 1277 * @throws NullPointerException if any of the arguments are null 1278 */ 1279 public boolean replace(K key, V oldValue, V newValue) { 1280 if (oldValue == null || newValue == null) 1281 throw new NullPointerException(); 1282 int hash = hash(key); 1283 return segmentFor(hash).replace(key, hash, oldValue, newValue); 1284 } 1285 1286 /** 1287 * {@inheritDoc} 1288 * 1289 * @return the previous value associated with the specified key, 1290 * or <tt>null</tt> if there was no mapping for the key 1291 * @throws NullPointerException if the specified key or value is null 1292 */ 1293 public V replace(K key, V value) { 1294 if (value == null) 1295 throw new NullPointerException(); 1296 int hash = hash(key); 1297 return segmentFor(hash).replace(key, hash, value); 1298 } 1299 1300 /** 1301 * Removes all of the mappings from this map. 1302 */ 1303 public void clear() { 1304 for (int i = 0; i < segments.length; ++i) 1305 segments[i].clear(header); 1306 header.before = header.after = header; 1307 } 1308 1309 /** 1310 * Returns <tt>true</tt> if this map should remove its eldest entry. 1311 * This method is invoked by <tt>put</tt> and <tt>putAll</tt> after 1312 * inserting a new entry into the map. It provides the implementor 1313 * with the opportunity to remove the eldest entry each time a new one 1314 * is added. This is useful if the map represents a cache: it allows 1315 * the map to reduce memory consumption by deleting stale entries. 1316 * 1317 * <p>Sample use: this override will allow the map to grow up to 100 1318 * entries and then delete the eldest entry each time a new entry is 1319 * added, maintaining a steady state of 100 entries. 1320 * <pre> 1321 * private static final int MAX_ENTRIES = 100; 1322 * 1323 * protected boolean removeEldestEntry(Map.Entry eldest) { 1324 * return size() > MAX_ENTRIES; 1325 * } 1326 * </pre> 1327 * 1328 * <p>This method typically does not modify the map in any way, 1329 * instead allowing the map to modify itself as directed by its 1330 * return value. It <i>is</i> permitted for this method to modify 1331 * the map directly, but if it does so, it <i>must</i> return 1332 * <tt>false</tt> (indicating that the map should not attempt any 1333 * further modification). The effects of returning <tt>true</tt> 1334 * after modifying the map from within this method are unspecified. 1335 * 1336 * <p>This implementation merely returns <tt>false</tt> (so that this 1337 * map acts like a normal map - the eldest element is never removed). 1338 * 1339 * @param eldest The least recently inserted entry in the map, or if 1340 * this is an access-ordered map, the least recently accessed 1341 * entry. This is the entry that will be removed it this 1342 * method returns <tt>true</tt>. If the map was empty prior 1343 * to the <tt>put</tt> or <tt>putAll</tt> invocation resulting 1344 * in this invocation, this will be the entry that was just 1345 * inserted; in other words, if the map contains a single 1346 * entry, the eldest entry is also the newest. 1347 * @return <tt>true</tt> if the eldest entry should be removed 1348 * from the map; <tt>false</tt> if it should be retained. 1349 */ 1350 protected boolean removeEldestEntry(Map.Entry<K,V> eldest) { 1351 return false; 1352 } 1353 1354 /** 1355 * Returns a {@link Set} view of the keys contained in this map. 1356 * The set is backed by the map, so changes to the map are 1357 * reflected in the set, and vice-versa. The set supports element 1358 * removal, which removes the corresponding mapping from this map, 1359 * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, 1360 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> 1361 * operations. It does not support the <tt>add</tt> or 1362 * <tt>addAll</tt> operations. 1363 * 1364 * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator 1365 * that will never throw {@link ConcurrentModificationException}, 1366 * and guarantees to traverse elements as they existed upon 1367 * construction of the iterator, and may (but is not guaranteed to) 1368 * reflect any modifications subsequent to construction. 1369 */ 1370 public Set<K> keySet() { 1371 Set<K> ks = keySet; 1372 return (ks != null) ? ks : (keySet = new KeySet()); 1373 } 1374 1375 /** 1376 * Returns a {@link Collection} view of the values contained in this map. 1377 * The collection is backed by the map, so changes to the map are 1378 * reflected in the collection, and vice-versa. The collection 1379 * supports element removal, which removes the corresponding 1380 * mapping from this map, via the <tt>Iterator.remove</tt>, 1381 * <tt>Collection.remove</tt>, <tt>removeAll</tt>, 1382 * <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not 1383 * support the <tt>add</tt> or <tt>addAll</tt> operations. 1384 * 1385 * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator 1386 * that will never throw {@link ConcurrentModificationException}, 1387 * and guarantees to traverse elements as they existed upon 1388 * construction of the iterator, and may (but is not guaranteed to) 1389 * reflect any modifications subsequent to construction. 1390 */ 1391 public Collection<V> values() { 1392 Collection<V> vs = values; 1393 return (vs != null) ? vs : (values = new Values()); 1394 } 1395 1396 /** 1397 * Returns a {@link Set} view of the mappings contained in this map. 1398 * The set is backed by the map, so changes to the map are 1399 * reflected in the set, and vice-versa. The set supports element 1400 * removal, which removes the corresponding mapping from the map, 1401 * via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, 1402 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> 1403 * operations. It does not support the <tt>add</tt> or 1404 * <tt>addAll</tt> operations. 1405 * 1406 * <p>The view's <tt>iterator</tt> is a "weakly consistent" iterator 1407 * that will never throw {@link ConcurrentModificationException}, 1408 * and guarantees to traverse elements as they existed upon 1409 * construction of the iterator, and may (but is not guaranteed to) 1410 * reflect any modifications subsequent to construction. 1411 */ 1412 public Set<Map.Entry<K,V>> entrySet() { 1413 Set<Map.Entry<K,V>> es = entrySet; 1414 return (es != null) ? es : (entrySet = new EntrySet()); 1415 } 1416 1417 /** 1418 * Returns an enumeration of the keys in this table. 1419 * 1420 * @return an enumeration of the keys in this table 1421 * @see #keySet() 1422 */ 1423 public Enumeration<K> keys() { 1424 return new KeyIterator(); 1425 } 1426 1427 /** 1428 * Returns an enumeration of the values in this table. 1429 * 1430 * @return an enumeration of the values in this table 1431 * @see #values() 1432 */ 1433 public Enumeration<V> elements() { 1434 return new ValueIterator(); 1435 } 1436 1437 /* ---------------- Iterator Support -------------- */ 1438 1439 abstract class HashIterator { 1440 HashEntry<K,V> nextEntry = null; 1441 HashEntry<K,V> lastReturned = null; 1442 1443 HashEntry<K,V> snapshotHeader = null; 1444 1445 HashIterator() { 1446 if(evictionPolicy.recordAccess(header, header) != null) 1447 snapshotHeader = header.cloneAll(header); 1448 else 1449 snapshotHeader = header; 1450 1451 nextEntry = snapshotHeader.after; 1452 } 1453 1454 public boolean hasMoreElements() { return hasNext(); } 1455 1456 public boolean hasNext() { return nextEntry != snapshotHeader; } 1457 1458 HashEntry<K,V> nextEntry() { 1459 if (nextEntry == snapshotHeader) 1460 throw new NoSuchElementException(); 1461 HashEntry<K,V> e = lastReturned = nextEntry; 1462 nextEntry = e.after; 1463 1464 return e; 1465 } 1466 1467 public void remove() { 1468 if (lastReturned == null) 1469 throw new IllegalStateException(); 1470 ConcurrentLinkedHashMapPro.this.remove(lastReturned.key); 1471 lastReturned = null; 1472 } 1473 1474 } 1475 1476 final class KeyIterator 1477 extends HashIterator 1478 implements Iterator<K>, Enumeration<K> 1479 { 1480 public K next() { return super.nextEntry().key; } 1481 public K nextElement() { return super.nextEntry().key; } 1482 } 1483 1484 final class ValueIterator 1485 extends HashIterator 1486 implements Iterator<V>, Enumeration<V> 1487 { 1488 public V next() { return super.nextEntry().value; } 1489 public V nextElement() { return super.nextEntry().value; } 1490 } 1491 1492 /** 1493 * Custom Entry class used by EntryIterator.next(), that relays 1494 * setValue changes to the underlying map. 1495 */ 1496 final class WriteThroughEntry 1497 extends AbstractMap.SimpleEntry<K,V> implements Map.Entry<K,V> { 1498 1499 private static final long serialVersionUID = 1573332674915851631L; 1500 1501 WriteThroughEntry(K k, V v) { 1502 super(k,v); 1503 } 1504 1505 /** 1506 * Set our entry's value and write through to the map. The 1507 * value to return is somewhat arbitrary here. Since a 1508 * WriteThroughEntry does not necessarily track asynchronous 1509 * changes, the most recent "previous" value could be 1510 * different from what we return (or could even have been 1511 * removed in which case the put will re-establish). We do not 1512 * and cannot guarantee more. 1513 */ 1514 public V setValue(V value) { 1515 if (value == null) throw new NullPointerException(); 1516 V v = super.setValue(value); 1517 ConcurrentLinkedHashMapPro.this.put(getKey(), value); 1518 return v; 1519 } 1520 } 1521 1522 final class EntryIterator 1523 extends HashIterator 1524 implements Iterator<Map.Entry<K,V>> 1525 { 1526 public Map.Entry<K,V> next() { 1527 HashEntry<K,V> e = super.nextEntry(); 1528 return new WriteThroughEntry(e.key, e.value); 1529 } 1530 } 1531 1532 final class KeySet extends AbstractSet<K> { 1533 public Iterator<K> iterator() { 1534 return new KeyIterator(); 1535 } 1536 public int size() { 1537 return ConcurrentLinkedHashMapPro.this.size(); 1538 } 1539 public boolean isEmpty() { 1540 return ConcurrentLinkedHashMapPro.this.isEmpty(); 1541 } 1542 public boolean contains(Object o) { 1543 return ConcurrentLinkedHashMapPro.this.containsKey(o); 1544 } 1545 public boolean remove(Object o) { 1546 return ConcurrentLinkedHashMapPro.this.remove(o) != null; 1547 } 1548 public void clear() { 1549 ConcurrentLinkedHashMapPro.this.clear(); 1550 } 1551 } 1552 1553 final class Values extends AbstractCollection<V> { 1554 public Iterator<V> iterator() { 1555 return new ValueIterator(); 1556 } 1557 public int size() { 1558 return ConcurrentLinkedHashMapPro.this.size(); 1559 } 1560 public boolean isEmpty() { 1561 return ConcurrentLinkedHashMapPro.this.isEmpty(); 1562 } 1563 public boolean contains(Object o) { 1564 return ConcurrentLinkedHashMapPro.this.containsValue(o); 1565 } 1566 public void clear() { 1567 ConcurrentLinkedHashMapPro.this.clear(); 1568 } 1569 } 1570 1571 final class EntrySet extends AbstractSet<Map.Entry<K,V>> { 1572 public Iterator<Map.Entry<K,V>> iterator() { 1573 return new EntryIterator(); 1574 } 1575 public boolean contains(Object o) { 1576 if (!(o instanceof Map.Entry<?,?>)) 1577 return false; 1578 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 1579 V v = ConcurrentLinkedHashMapPro.this.get(e.getKey()); 1580 return v != null && v.equals(e.getValue()); 1581 } 1582 public boolean remove(Object o) { 1583 if (!(o instanceof Map.Entry<?,?>)) 1584 return false; 1585 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 1586 return ConcurrentLinkedHashMapPro.this.remove(e.getKey(), e.getValue()); 1587 } 1588 public int size() { 1589 return ConcurrentLinkedHashMapPro.this.size(); 1590 } 1591 public boolean isEmpty() { 1592 return ConcurrentLinkedHashMapPro.this.isEmpty(); 1593 } 1594 public void clear() { 1595 ConcurrentLinkedHashMapPro.this.clear(); 1596 } 1597 } 1598 1599 /* ---------------- Serialization Support -------------- */ 1600 1601 /** 1602 * Save the state of the <tt>ConcurrentHashMap</tt> instance to a 1603 * stream (i.e., serialize it). 1604 * @param s the stream 1605 * @serialData 1606 * the key (Object) and value (Object) 1607 * for each key-value mapping, followed by a null pair. 1608 * The key-value mappings are emitted in no particular order. 1609 */ 1610 private void writeObject(java.io.ObjectOutputStream s) throws IOException { 1611 s.defaultWriteObject(); 1612 1613 for (int k = 0; k < segments.length; ++k) { 1614 Segment<K,V> seg = segments[k]; 1615 seg.lock(); 1616 try { 1617 HashEntry<K,V>[] tab = seg.table; 1618 for (int i = 0; i < tab.length; ++i) { 1619 for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) { 1620 s.writeObject(e.key); 1621 s.writeObject(e.value); 1622 } 1623 } 1624 } finally { 1625 seg.unlock(); 1626 } 1627 } 1628 s.writeObject(null); 1629 s.writeObject(null); 1630 } 1631 1632 /** 1633 * Reconstitute the <tt>ConcurrentLinkedHashMap</tt> instance from a 1634 * stream (i.e., deserialize it). 1635 * @param s the stream 1636 */ 1637 private void readObject(java.io.ObjectInputStream s) 1638 throws IOException, ClassNotFoundException { 1639 s.defaultReadObject(); 1640 1641 // Initialize each segment to be minimally sized, and let grow. 1642 for (int i = 0; i < segments.length; ++i) { 1643 segments[i].setTable(new HashEntry[1]); 1644 } 1645 1646 // Read the keys and values, and put the mappings in the table 1647 for (;;) { 1648 K key = (K) s.readObject(); 1649 V value = (V) s.readObject(); 1650 if (key == null) 1651 break; 1652 put(key, value); 1653 } 1654 } 1655 1656 public interface Entry<K,V> extends Map.Entry<K, V> { 1657 1658 /** 1659 * Returns the entry before this entry in the entry list. 1660 */ 1661 Entry<K,V> getBefore(); 1662 1663 /** 1664 * Returns the entry after this entry in the entry list. 1665 */ 1666 Entry<K,V> getAfter(); 1667 1668 /** 1669 * Returns the entry's access count. 1670 */ 1671 long getAccessCount(); 1672 1673 /** 1674 * Returns the entry's creation time in milliseconds. 1675 */ 1676 long getCreationTime(); 1677 1678 /** 1679 * Returns the entry's last access time in milliseconds. 1680 */ 1681 long getLastAccessTime(); 1682 1683 } 1684}