Clover coverage report -
Coverage timestamp: Sa Jul 7 2007 09:11:40 CEST
file stats: LOC: 2.157   Methods: 90
NCLOC: 972   Classes: 5
 
 Source file Conditionals Statements Methods TOTAL
AbstractConcurrentReadCache.java 64,7% 66,5% 61,1% 65,4%
coverage coverage
 1    /*
 2    * Copyright (c) 2002-2003 by OpenSymphony
 3    * All rights reserved.
 4    */
 5    /*
 6    File: AbstractConcurrentReadCache
 7   
 8    Written by Doug Lea. Adapted from JDK1.2 HashMap.java and Hashtable.java
 9    which carries the following copyright:
 10   
 11    * Copyright 1997 by Sun Microsystems, Inc.,
 12    * 901 San Antonio Road, Palo Alto, California, 94303, U.S.A.
 13    * All rights reserved.
 14    *
 15    * This software is the confidential and proprietary information
 16    * of Sun Microsystems, Inc. ("Confidential Information"). You
 17    * shall not disclose such Confidential Information and shall use
 18    * it only in accordance with the terms of the license agreement
 19    * you entered into with Sun.
 20   
 21    This class is a modified version of ConcurrentReaderHashMap, which was written
 22    by Doug Lea (http://gee.cs.oswego.edu/dl/). The modifications where done
 23    by Pyxis Technologies. This is a base class for the OSCache module of the
 24    openSymphony project (www.opensymphony.com).
 25   
 26    History:
 27    Date Who What
 28    28oct1999 dl Created
 29    14dec1999 dl jmm snapshot
 30    19apr2000 dl use barrierLock
 31    12jan2001 dl public release
 32    Oct2001 abergevin@pyxis-tech.com
 33    Integrated persistence and outer algorithm support
 34    */
 35    package com.opensymphony.oscache.base.algorithm;
 36   
 37   
 38    /** OpenSymphony BEGIN */
 39    import com.opensymphony.oscache.base.CacheEntry;
 40    import com.opensymphony.oscache.base.persistence.CachePersistenceException;
 41    import com.opensymphony.oscache.base.persistence.PersistenceListener;
 42   
 43    import org.apache.commons.logging.Log;
 44    import org.apache.commons.logging.LogFactory;
 45   
 46    import java.io.IOException;
 47    import java.io.Serializable;
 48   
 49    import java.util.*;
 50   
 51    /**
 52    * A version of Hashtable that supports mostly-concurrent reading, but exclusive writing.
 53    * Because reads are not limited to periods
 54    * without writes, a concurrent reader policy is weaker than a classic
 55    * reader/writer policy, but is generally faster and allows more
 56    * concurrency. This class is a good choice especially for tables that
 57    * are mainly created by one thread during the start-up phase of a
 58    * program, and from then on, are mainly read (with perhaps occasional
 59    * additions or removals) in many threads. If you also need concurrency
 60    * among writes, consider instead using ConcurrentHashMap.
 61    * <p>
 62    *
 63    * Successful retrievals using get(key) and containsKey(key) usually
 64    * run without locking. Unsuccessful ones (i.e., when the key is not
 65    * present) do involve brief synchronization (locking). Also, the
 66    * size and isEmpty methods are always synchronized.
 67    *
 68    * <p> Because retrieval operations can ordinarily overlap with
 69    * writing operations (i.e., put, remove, and their derivatives),
 70    * retrievals can only be guaranteed to return the results of the most
 71    * recently <em>completed</em> operations holding upon their
 72    * onset. Retrieval operations may or may not return results
 73    * reflecting in-progress writing operations. However, the retrieval
 74    * operations do always return consistent results -- either those
 75    * holding before any single modification or after it, but never a
 76    * nonsense result. For aggregate operations such as putAll and
 77    * clear, concurrent reads may reflect insertion or removal of only
 78    * some entries. In those rare contexts in which you use a hash table
 79    * to synchronize operations across threads (for example, to prevent
 80    * reads until after clears), you should either encase operations
 81    * in synchronized blocks, or instead use java.util.Hashtable.
 82    *
 83    * <p>
 84    *
 85    * This class also supports optional guaranteed
 86    * exclusive reads, simply by surrounding a call within a synchronized
 87    * block, as in <br>
 88    * <code>AbstractConcurrentReadCache t; ... Object v; <br>
 89    * synchronized(t) { v = t.get(k); } </code> <br>
 90    *
 91    * But this is not usually necessary in practice. For
 92    * example, it is generally inefficient to write:
 93    *
 94    * <pre>
 95    * AbstractConcurrentReadCache t; ... // Inefficient version
 96    * Object key; ...
 97    * Object value; ...
 98    * synchronized(t) {
 99    * if (!t.containsKey(key))
 100    * t.put(key, value);
 101    * // other code if not previously present
 102    * }
 103    * else {
 104    * // other code if it was previously present
 105    * }
 106    * }
 107    *</pre>
 108    * Instead, just take advantage of the fact that put returns
 109    * null if the key was not previously present:
 110    * <pre>
 111    * AbstractConcurrentReadCache t; ... // Use this instead
 112    * Object key; ...
 113    * Object value; ...
 114    * Object oldValue = t.put(key, value);
 115    * if (oldValue == null) {
 116    * // other code if not previously present
 117    * }
 118    * else {
 119    * // other code if it was previously present
 120    * }
 121    *</pre>
 122    * <p>
 123    *
 124    * Iterators and Enumerations (i.e., those returned by
 125    * keySet().iterator(), entrySet().iterator(), values().iterator(),
 126    * keys(), and elements()) return elements reflecting the state of the
 127    * hash table at some point at or since the creation of the
 128    * iterator/enumeration. They will return at most one instance of
 129    * each element (via next()/nextElement()), but might or might not
 130    * reflect puts and removes that have been processed since they were
 131    * created. They do <em>not</em> throw ConcurrentModificationException.
 132    * However, these iterators are designed to be used by only one
 133    * thread at a time. Sharing an iterator across multiple threads may
 134    * lead to unpredictable results if the table is being concurrently
 135    * modified. Again, you can ensure interference-free iteration by
 136    * enclosing the iteration in a synchronized block. <p>
 137    *
 138    * This class may be used as a direct replacement for any use of
 139    * java.util.Hashtable that does not depend on readers being blocked
 140    * during updates. Like Hashtable but unlike java.util.HashMap,
 141    * this class does NOT allow <tt>null</tt> to be used as a key or
 142    * value. This class is also typically faster than ConcurrentHashMap
 143    * when there is usually only one thread updating the table, but
 144    * possibly many retrieving values from it.
 145    * <p>
 146    *
 147    * Implementation note: A slightly faster implementation of
 148    * this class will be possible once planned Java Memory Model
 149    * revisions are in place.
 150    *
 151    * <p>[<a href="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> Introduction to this package. </a>]
 152    **/
 153    public abstract class AbstractConcurrentReadCache extends AbstractMap implements Map, Cloneable, Serializable {
 154    /**
 155    * The default initial number of table slots for this table (32).
 156    * Used when not otherwise specified in constructor.
 157    **/
 158    public static final int DEFAULT_INITIAL_CAPACITY = 32;
 159   
 160    /**
 161    * The minimum capacity.
 162    * Used if a lower value is implicitly specified
 163    * by either of the constructors with arguments.
 164    * MUST be a power of two.
 165    */
 166    private static final int MINIMUM_CAPACITY = 4;
 167   
 168    /**
 169    * The maximum capacity.
 170    * Used if a higher value is implicitly specified
 171    * by either of the constructors with arguments.
 172    * MUST be a power of two <= 1<<30.
 173    */
 174    private static final int MAXIMUM_CAPACITY = 1 << 30;
 175   
 176    /**
 177    * The default load factor for this table.
 178    * Used when not otherwise specified in constructor, the default is 0.75f.
 179    **/
 180    public static final float DEFAULT_LOAD_FACTOR = 0.75f;
 181   
 182    //OpenSymphony BEGIN (pretty long!)
 183    protected static final String NULL = "_nul!~";
 184   
 185    private static final Log log = LogFactory.getLog(AbstractConcurrentReadCache.class);
 186   
 187    /*
 188    The basic strategy is an optimistic-style scheme based on
 189    the guarantee that the hash table and its lists are always
 190    kept in a consistent enough state to be read without locking:
 191   
 192    * Read operations first proceed without locking, by traversing the
 193    apparently correct list of the apparently correct bin. If an
 194    entry is found, but not invalidated (value field null), it is
 195    returned. If not found, operations must recheck (after a memory
 196    barrier) to make sure they are using both the right list and
 197    the right table (which can change under resizes). If
 198    invalidated, reads must acquire main update lock to wait out
 199    the update, and then re-traverse.
 200   
 201    * All list additions are at the front of each bin, making it easy
 202    to check changes, and also fast to traverse. Entry next
 203    pointers are never assigned. Remove() builds new nodes when
 204    necessary to preserve this.
 205   
 206    * Remove() (also clear()) invalidates removed nodes to alert read
 207    operations that they must wait out the full modifications.
 208   
 209    */
 210   
 211    /**
 212    * Lock used only for its memory effects. We use a Boolean
 213    * because it is serializable, and we create a new one because
 214    * we need a unique object for each cache instance.
 215    **/
 216    protected final Boolean barrierLock = new Boolean(true);
 217   
 218    /**
 219    * field written to only to guarantee lock ordering.
 220    **/
 221    protected transient Object lastWrite;
 222   
 223    /**
 224    * The hash table data.
 225    */
 226    protected transient Entry[] table;
 227   
 228    /**
 229    * The total number of mappings in the hash table.
 230    */
 231    protected transient int count;
 232   
 233    /**
 234    * Persistence listener.
 235    */
 236    protected transient PersistenceListener persistenceListener = null;
 237   
 238    /**
 239    * Use memory cache or not.
 240    */
 241    protected boolean memoryCaching = true;
 242   
 243    /**
 244    * Use unlimited disk caching.
 245    */
 246    protected boolean unlimitedDiskCache = false;
 247   
 248    /**
 249    * The load factor for the hash table.
 250    *
 251    * @serial
 252    */
 253    protected float loadFactor;
 254   
 255    /**
 256    * Default cache capacity (number of entries).
 257    */
 258    protected final int DEFAULT_MAX_ENTRIES = 100;
 259   
 260    /**
 261    * Max number of element in cache when considered unlimited.
 262    */
 263    protected final int UNLIMITED = 2147483646;
 264    protected transient Collection values = null;
 265   
 266    /**
 267    * A HashMap containing the group information.
 268    * Each entry uses the group name as the key, and holds a
 269    * <code>Set</code> of containing keys of all
 270    * the cache entries that belong to that particular group.
 271    */
 272    protected HashMap groups = new HashMap();
 273    protected transient Set entrySet = null;
 274   
 275    // Views
 276    protected transient Set keySet = null;
 277   
 278    /**
 279    * Cache capacity (number of entries).
 280    */
 281    protected int maxEntries = DEFAULT_MAX_ENTRIES;
 282   
 283    /**
 284    * The table is rehashed when its size exceeds this threshold.
 285    * (The value of this field is always (int)(capacity * loadFactor).)
 286    *
 287    * @serial
 288    */
 289    protected int threshold;
 290   
 291    /**
 292    * Use overflow persistence caching.
 293    */
 294    private boolean overflowPersistence = false;
 295   
 296    /**
 297    * Constructs a new, empty map with the specified initial capacity and the specified load factor.
 298    *
 299    * @param initialCapacity the initial capacity
 300    * The actual initial capacity is rounded to the nearest power of two.
 301    * @param loadFactor the load factor of the AbstractConcurrentReadCache
 302    * @throws IllegalArgumentException if the initial maximum number
 303    * of elements is less
 304    * than zero, or if the load factor is nonpositive.
 305    */
 306  265 public AbstractConcurrentReadCache(int initialCapacity, float loadFactor) {
 307  265 if (loadFactor <= 0) {
 308  0 throw new IllegalArgumentException("Illegal Load factor: " + loadFactor);
 309    }
 310   
 311  265 this.loadFactor = loadFactor;
 312   
 313  265 int cap = p2capacity(initialCapacity);
 314  265 table = new Entry[cap];
 315  265 threshold = (int) (cap * loadFactor);
 316    }
 317   
 318    /**
 319    * Constructs a new, empty map with the specified initial capacity and default load factor.
 320    *
 321    * @param initialCapacity the initial capacity of the
 322    * AbstractConcurrentReadCache.
 323    * @throws IllegalArgumentException if the initial maximum number
 324    * of elements is less
 325    * than zero.
 326    */
 327  0 public AbstractConcurrentReadCache(int initialCapacity) {
 328  0 this(initialCapacity, DEFAULT_LOAD_FACTOR);
 329    }
 330   
 331    /**
 332    * Constructs a new, empty map with a default initial capacity and load factor.
 333    */
 334  265 public AbstractConcurrentReadCache() {
 335  265 this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
 336    }
 337   
 338    /**
 339    * Constructs a new map with the same mappings as the given map.
 340    * The map is created with a capacity of twice the number of mappings in
 341    * the given map or 11 (whichever is greater), and a default load factor.
 342    */
 343  0 public AbstractConcurrentReadCache(Map t) {
 344  0 this(Math.max(2 * t.size(), 11), DEFAULT_LOAD_FACTOR);
 345  0 putAll(t);
 346    }
 347   
 348    /**
 349    * Returns <tt>true</tt> if this map contains no key-value mappings.
 350    *
 351    * @return <tt>true</tt> if this map contains no key-value mappings.
 352    */
 353  0 public synchronized boolean isEmpty() {
 354  0 return count == 0;
 355    }
 356   
 357    /**
 358    * Returns a set of the cache keys that reside in a particular group.
 359    *
 360    * @param groupName The name of the group to retrieve.
 361    * @return a set containing all of the keys of cache entries that belong
 362    * to this group, or <code>null</code> if the group was not found.
 363    * @exception NullPointerException if the groupName is <code>null</code>.
 364    */
 365  45 public Set getGroup(String groupName) {
 366  45 if (log.isDebugEnabled()) {
 367  0 log.debug("getGroup called (group=" + groupName + ")");
 368    }
 369   
 370  45 Set groupEntries = null;
 371   
 372  45 if (memoryCaching && (groups != null)) {
 373  27 groupEntries = (Set) getGroupForReading(groupName);
 374    }
 375   
 376  45 if (groupEntries == null) {
 377    // Not in the map, try the persistence layer
 378  21 groupEntries = persistRetrieveGroup(groupName);
 379    }
 380   
 381  45 return groupEntries;
 382    }
 383   
 384    /**
 385    * Set the cache capacity
 386    */
 387  120 public void setMaxEntries(int newLimit) {
 388  120 if (newLimit > 0) {
 389  100 maxEntries = newLimit;
 390   
 391  100 synchronized (this) { // because remove() isn't synchronized
 392   
 393  100 while (size() > maxEntries) {
 394  20 remove(removeItem(), false, false);
 395    }
 396    }
 397    } else {
 398    // Capacity must be at least 1
 399  20 throw new IllegalArgumentException("Cache maximum number of entries must be at least 1");
 400    }
 401    }
 402   
 403    /**
 404    * Retrieve the cache capacity (number of entries).
 405    */
 406  40 public int getMaxEntries() {
 407  40 return maxEntries;
 408    }
 409   
 410    /**
 411    * Sets the memory caching flag.
 412    */
 413  265 public void setMemoryCaching(boolean memoryCaching) {
 414  265 this.memoryCaching = memoryCaching;
 415    }
 416   
 417    /**
 418    * Check if memory caching is used.
 419    */
 420  60 public boolean isMemoryCaching() {
 421  60 return memoryCaching;
 422    }
 423   
 424    /**
 425    * Set the persistence listener to use.
 426    */
 427  196 public void setPersistenceListener(PersistenceListener listener) {
 428  196 this.persistenceListener = listener;
 429    }
 430   
 431    /**
 432    * Get the persistence listener.
 433    */
 434  80 public PersistenceListener getPersistenceListener() {
 435  80 return persistenceListener;
 436    }
 437   
 438    /**
 439    * Sets the unlimited disk caching flag.
 440    */
 441  235 public void setUnlimitedDiskCache(boolean unlimitedDiskCache) {
 442  235 this.unlimitedDiskCache = unlimitedDiskCache;
 443    }
 444   
 445    /**
 446    * Check if we use unlimited disk cache.
 447    */
 448  0 public boolean isUnlimitedDiskCache() {
 449  0 return unlimitedDiskCache;
 450    }
 451   
 452    /**
 453    * Check if we use overflowPersistence
 454    *
 455    * @return Returns the overflowPersistence.
 456    */
 457  20 public boolean isOverflowPersistence() {
 458  20 return this.overflowPersistence;
 459    }
 460   
 461    /**
 462    * Sets the overflowPersistence flag
 463    *
 464    * @param overflowPersistence The overflowPersistence to set.
 465    */
 466  305 public void setOverflowPersistence(boolean overflowPersistence) {
 467  305 this.overflowPersistence = overflowPersistence;
 468    }
 469   
 470    /**
 471    * Return the number of slots in this table.
 472    **/
 473  30 public synchronized int capacity() {
 474  30 return table.length;
 475    }
 476   
 477    /**
 478    * Removes all mappings from this map.
 479    */
 480  345 public synchronized void clear() {
 481  345 Entry[] tab = table;
 482   
 483  345 for (int i = 0; i < tab.length; ++i) {
 484    // must invalidate all to force concurrent get's to wait and then retry
 485  11040 for (Entry e = tab[i]; e != null; e = e.next) {
 486  335 e.value = null;
 487   
 488    /** OpenSymphony BEGIN */
 489  335 itemRemoved(e.key);
 490   
 491    /** OpenSymphony END */
 492    }
 493   
 494  11040 tab[i] = null;
 495    }
 496   
 497    // Clean out the entire disk cache
 498  345 persistClear();
 499   
 500  345 count = 0;
 501  345 recordModification(tab);
 502    }
 503   
 504    /**
 505    * Returns a shallow copy of this.
 506    * <tt>AbstractConcurrentReadCache</tt> instance: the keys and
 507    * values themselves are not cloned.
 508    *
 509    * @return a shallow copy of this map.
 510    */
 511  0 public synchronized Object clone() {
 512  0 try {
 513  0 AbstractConcurrentReadCache t = (AbstractConcurrentReadCache) super.clone();
 514  0 t.keySet = null;
 515  0 t.entrySet = null;
 516  0 t.values = null;
 517   
 518  0 Entry[] tab = table;
 519  0 t.table = new Entry[tab.length];
 520   
 521  0 Entry[] ttab = t.table;
 522   
 523  0 for (int i = 0; i < tab.length; ++i) {
 524  0 Entry first = tab[i];
 525   
 526  0 if (first != null) {
 527  0 ttab[i] = (Entry) (first.clone());
 528    }
 529    }
 530   
 531  0 return t;
 532    } catch (CloneNotSupportedException e) {
 533    // this shouldn't happen, since we are Cloneable
 534  0 throw new InternalError();
 535    }
 536    }
 537   
 538    /**
 539    * Tests if some key maps into the specified value in this table.
 540    * This operation is more expensive than the <code>containsKey</code>
 541    * method.<p>
 542    *
 543    * Note that this method is identical in functionality to containsValue,
 544    * (which is part of the Map interface in the collections framework).
 545    *
 546    * @param value a value to search for.
 547    * @return <code>true</code> if and only if some key maps to the
 548    * <code>value</code> argument in this table as
 549    * determined by the <tt>equals</tt> method;
 550    * <code>false</code> otherwise.
 551    * @exception NullPointerException if the value is <code>null</code>.
 552    * @see #containsKey(Object)
 553    * @see #containsValue(Object)
 554    * @see Map
 555    */
 556  60 public boolean contains(Object value) {
 557  60 return containsValue(value);
 558    }
 559   
 560    /**
 561    * Tests if the specified object is a key in this table.
 562    *
 563    * @param key possible key.
 564    * @return <code>true</code> if and only if the specified object
 565    * is a key in this table, as determined by the
 566    * <tt>equals</tt> method; <code>false</code> otherwise.
 567    * @exception NullPointerException if the key is
 568    * <code>null</code>.
 569    * @see #contains(Object)
 570    */
 571  30 public boolean containsKey(Object key) {
 572  30 return get(key) != null;
 573   
 574    /** OpenSymphony BEGIN */
 575   
 576    // TODO: Also check the persistence?
 577   
 578    /** OpenSymphony END */
 579    }
 580   
 581    /**
 582    * Returns <tt>true</tt> if this map maps one or more keys to the
 583    * specified value. Note: This method requires a full internal
 584    * traversal of the hash table, and so is much slower than
 585    * method <tt>containsKey</tt>.
 586    *
 587    * @param value value whose presence in this map is to be tested.
 588    * @return <tt>true</tt> if this map maps one or more keys to the
 589    * specified value.
 590    * @exception NullPointerException if the value is <code>null</code>.
 591    */
 592  60 public boolean containsValue(Object value) {
 593  60 if (value == null) {
 594  0 throw new NullPointerException();
 595    }
 596   
 597  60 Entry[] tab = getTableForReading();
 598   
 599  60 for (int i = 0; i < tab.length; ++i) {
 600  1650 for (Entry e = tab[i]; e != null; e = e.next) {
 601  30 Object v = e.value;
 602   
 603  30 if ((v != null) && value.equals(v)) {
 604  30 return true;
 605    }
 606    }
 607    }
 608   
 609  30 return false;
 610    }
 611   
 612    /**
 613    * Returns an enumeration of the values in this table.
 614    * Use the Enumeration methods on the returned object to fetch the elements
 615    * sequentially.
 616    *
 617    * @return an enumeration of the values in this table.
 618    * @see java.util.Enumeration
 619    * @see #keys()
 620    * @see #values()
 621    * @see Map
 622    */
 623  30 public Enumeration elements() {
 624  30 return new ValueIterator();
 625    }
 626   
 627    /**
 628    * Returns a collection view of the mappings contained in this map.
 629    * Each element in the returned collection is a <tt>Map.Entry</tt>. The
 630    * collection is backed by the map, so changes to the map are reflected in
 631    * the collection, and vice-versa. The collection supports element
 632    * removal, which removes the corresponding mapping from the map, via the
 633    * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
 634    * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
 635    * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
 636    *
 637    * @return a collection view of the mappings contained in this map.
 638    */
 639  30 public Set entrySet() {
 640  30 Set es = entrySet;
 641   
 642  30 if (es != null) {
 643  0 return es;
 644    } else {
 645  30 return entrySet = new AbstractSet() {
 646  30 public Iterator iterator() {
 647  30 return new HashIterator();
 648    }
 649   
 650  0 public boolean contains(Object o) {
 651  0 if (!(o instanceof Map.Entry)) {
 652  0 return false;
 653    }
 654   
 655  0 Map.Entry entry = (Map.Entry) o;
 656  0 Object key = entry.getKey();
 657  0 Object v = AbstractConcurrentReadCache.this.get(key);
 658   
 659  0 return (v != null) && v.equals(entry.getValue());
 660    }
 661   
 662  0 public boolean remove(Object o) {
 663  0 if (!(o instanceof Map.Entry)) {
 664  0 return false;
 665    }
 666   
 667  0 return AbstractConcurrentReadCache.this.findAndRemoveEntry((Map.Entry) o);
 668    }
 669   
 670  0 public int size() {
 671  0 return AbstractConcurrentReadCache.this.size();
 672    }
 673   
 674  0 public void clear() {
 675  0 AbstractConcurrentReadCache.this.clear();
 676    }
 677    };
 678    }
 679    }
 680   
 681    /**
 682    * Returns the value to which the specified key is mapped in this table.
 683    *
 684    * @param key a key in the table.
 685    * @return the value to which the key is mapped in this table;
 686    * <code>null</code> if the key is not mapped to any value in
 687    * this table.
 688    * @exception NullPointerException if the key is
 689    * <code>null</code>.
 690    * @see #put(Object, Object)
 691    */
 692  4628714 public Object get(Object key) {
 693  4631277 if (log.isDebugEnabled()) {
 694  0 log.debug("get called (key=" + key + ")");
 695    }
 696   
 697    // throw null pointer exception if key null
 698  4631277 int hash = hash(key);
 699   
 700    /*
 701    Start off at the apparently correct bin. If entry is found, we
 702    need to check after a barrier anyway. If not found, we need a
 703    barrier to check if we are actually in right bin. So either
 704    way, we encounter only one barrier unless we need to retry.
 705    And we only need to fully synchronize if there have been
 706    concurrent modifications.
 707    */
 708  4631247 Entry[] tab = table;
 709  4631247 int index = hash & (tab.length - 1);
 710  4630432 Entry first = tab[index];
 711  4631247 Entry e = first;
 712   
 713  4631247 for (;;) {
 714  4687757 if (e == null) {
 715    // If key apparently not there, check to
 716    // make sure this was a valid read
 717  120461 tab = getTableForReading();
 718   
 719  120461 if (first == tab[index]) {
 720    /** OpenSymphony BEGIN */
 721   
 722    /* Previous code
 723    return null;*/
 724   
 725    // Not in the table, try persistence
 726  120461 Object value = persistRetrieve(key);
 727   
 728  120461 if (value != null) {
 729    // Update the map, but don't persist the data
 730  43 put(key, value, false);
 731    }
 732   
 733  120461 return value;
 734   
 735    /** OpenSymphony END */
 736    } else {
 737    // Wrong list -- must restart traversal at new first
 738  0 e = first = tab[index = hash & (tab.length - 1)];
 739    }
 740    }
 741    // checking for pointer equality first wins in most applications
 742  4567296 else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 743  4510786 Object value = e.value;
 744   
 745  4510786 if (value != null) {
 746    /** OpenSymphony BEGIN */
 747   
 748    /* Previous code
 749    return value;*/
 750  4510786 if (NULL.equals(value)) {
 751    // Memory cache disable, use disk
 752  256 value = persistRetrieve(e.key);
 753   
 754  256 if (value != null) {
 755  254 itemRetrieved(key);
 756    }
 757   
 758  256 return value; // fix [CACHE-13]
 759    } else {
 760  4510530 itemRetrieved(key);
 761   
 762  4510530 return value;
 763    }
 764   
 765    /** OpenSymphony END */
 766    }
 767   
 768    // Entry was invalidated during deletion. But it could
 769    // have been re-inserted, so we must retraverse.
 770    // To avoid useless contention, get lock to wait out modifications
 771    // before retraversing.
 772  0 synchronized (this) {
 773  0 tab = table;
 774    }
 775   
 776  0 e = first = tab[index = hash & (tab.length - 1)];
 777    } else {
 778  56510 e = e.next;
 779    }
 780    }
 781    }
 782   
 783    /**
 784    * Returns a set view of the keys contained in this map.
 785    * The set is backed by the map, so changes to the map are reflected in the set, and
 786    * vice-versa. The set supports element removal, which removes the
 787    * corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
 788    * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
 789    * <tt>clear</tt> operations. It does not support the <tt>add</tt> or
 790    * <tt>addAll</tt> operations.
 791    *
 792    * @return a set view of the keys contained in this map.
 793    */
 794  30 public Set keySet() {
 795  30 Set ks = keySet;
 796   
 797  30 if (ks != null) {
 798  10 return ks;
 799    } else {
 800  20 return keySet = new AbstractSet() {
 801  30 public Iterator iterator() {
 802  30 return new KeyIterator();
 803    }
 804   
 805  0 public int size() {
 806  0 return AbstractConcurrentReadCache.this.size();
 807    }
 808   
 809  0 public boolean contains(Object o) {
 810  0 return AbstractConcurrentReadCache.this.containsKey(o);
 811    }
 812   
 813  0 public boolean remove(Object o) {
 814  0 return AbstractConcurrentReadCache.this.remove(o) != null;
 815    }
 816   
 817  0 public void clear() {
 818  0 AbstractConcurrentReadCache.this.clear();
 819    }
 820    };
 821    }
 822    }
 823   
 824    /**
 825    * Returns an enumeration of the keys in this table.
 826    *
 827    * @return an enumeration of the keys in this table.
 828    * @see Enumeration
 829    * @see #elements()
 830    * @see #keySet()
 831    * @see Map
 832    */
 833  0 public Enumeration keys() {
 834  0 return new KeyIterator();
 835    }
 836   
 837    /**
 838    * Return the load factor
 839    **/
 840  0 public float loadFactor() {
 841  0 return loadFactor;
 842    }
 843   
 844    /**
 845    * Maps the specified <code>key</code> to the specified <code>value</code> in this table.
 846    * Neither the key nor the
 847    * value can be <code>null</code>. <p>
 848    *
 849    * The value can be retrieved by calling the <code>get</code> method
 850    * with a key that is equal to the original key.
 851    *
 852    * @param key the table key.
 853    * @param value the value.
 854    * @return the previous value of the specified key in this table,
 855    * or <code>null</code> if it did not have one.
 856    * @exception NullPointerException if the key or value is
 857    * <code>null</code>.
 858    * @see Object#equals(Object)
 859    * @see #get(Object)
 860    */
 861    /** OpenSymphony BEGIN */
 862  66054 public Object put(Object key, Object value) {
 863    // Call the internal put using persistance
 864  66054 return put(key, value, true);
 865    }
 866   
 867    /**
 868    * Copies all of the mappings from the specified map to this one.
 869    *
 870    * These mappings replace any mappings that this map had for any of the
 871    * keys currently in the specified Map.
 872    *
 873    * @param t Mappings to be stored in this map.
 874    */
 875  0 public synchronized void putAll(Map t) {
 876  0 for (Iterator it = t.entrySet().iterator(); it.hasNext();) {
 877  0 Map.Entry entry = (Map.Entry) it.next();
 878  0 Object key = entry.getKey();
 879  0 Object value = entry.getValue();
 880  0 put(key, value);
 881    }
 882    }
 883   
 884    /**
 885    * Removes the key (and its corresponding value) from this table.
 886    * This method does nothing if the key is not in the table.
 887    *
 888    * @param key the key that needs to be removed.
 889    * @return the value to which the key had been mapped in this table,
 890    * or <code>null</code> if the key did not have a mapping.
 891    */
 892    /** OpenSymphony BEGIN */
 893  70 public Object remove(Object key) {
 894  70 return remove(key, true, false);
 895    }
 896   
 897    /**
 898    * Like <code>remove(Object)</code>, but ensures that the entry will be removed from the persistent store, too,
 899    * even if overflowPersistence or unlimitedDiskcache are true.
 900    *
 901    * @param key the key that needs to be removed.
 902    * @return the value to which the key had been mapped in this table,
 903    * or <code>null</code> if the key did not have a mapping.
 904    */
 905  0 public Object removeForce(Object key) {
 906  0 return remove(key, true, true);
 907    }
 908   
 909    /**
 910    * Returns the total number of cache entries held in this map.
 911    *
 912    * @return the number of key-value mappings in this map.
 913    */
 914  61288 public synchronized int size() {
 915  61288 return count;
 916    }
 917   
 918    /**
 919    * Returns a collection view of the values contained in this map.
 920    * The collection is backed by the map, so changes to the map are reflected in
 921    * the collection, and vice-versa. The collection supports element
 922    * removal, which removes the corresponding mapping from this map, via the
 923    * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
 924    * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
 925    * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
 926    *
 927    * @return a collection view of the values contained in this map.
 928    */
 929  0 public Collection values() {
 930  0 Collection vs = values;
 931   
 932  0 if (vs != null) {
 933  0 return vs;
 934    } else {
 935  0 return values = new AbstractCollection() {
 936  0 public Iterator iterator() {
 937  0 return new ValueIterator();
 938    }
 939   
 940  0 public int size() {
 941  0 return AbstractConcurrentReadCache.this.size();
 942    }
 943   
 944  0 public boolean contains(Object o) {
 945  0 return AbstractConcurrentReadCache.this.containsValue(o);
 946    }
 947   
 948  0 public void clear() {
 949  0 AbstractConcurrentReadCache.this.clear();
 950    }
 951    };
 952    }
 953    }
 954   
 955    /**
 956    * Get ref to group.
 957    * CACHE-127 Synchronized copying of the group entry set since
 958    * the new HashSet(Collection c) constructor uses the iterator.
 959    * This may slow things down but it is better than a
 960    * ConcurrentModificationException. We might have to revisit the
 961    * code if performance is too adversely impacted.
 962    **/
 963  27 protected synchronized final Set getGroupForReading(String groupName) {
 964  27 Set group = (Set) getGroupsForReading().get(groupName);
 965  3 if (group == null) return null;
 966  24 return new HashSet(group);
 967    }
 968   
 969    /**
 970    * Get ref to groups.
 971    * The reference and the cells it
 972    * accesses will be at least as fresh as from last
 973    * use of barrierLock
 974    **/
 975  67 protected final Map getGroupsForReading() {
 976  67 synchronized (barrierLock) {
 977  67 return groups;
 978    }
 979    }
 980   
 981    /**
 982    * Get ref to table; the reference and the cells it
 983    * accesses will be at least as fresh as from last
 984    * use of barrierLock
 985    **/
 986  120611 protected final Entry[] getTableForReading() {
 987  120611 synchronized (barrierLock) {
 988  120611 return table;
 989    }
 990    }
 991   
 992    /**
 993    * Force a memory synchronization that will cause
 994    * all readers to see table. Call only when already
 995    * holding main synch lock.
 996    **/
 997  70433 protected final void recordModification(Object x) {
 998  70433 synchronized (barrierLock) {
 999  70433 lastWrite = x;
 1000    }
 1001    }
 1002   
 1003    /**
 1004    * Helper method for entrySet remove.
 1005    **/
 1006  0 protected synchronized boolean findAndRemoveEntry(Map.Entry entry) {
 1007  0 Object key = entry.getKey();
 1008  0 Object v = get(key);
 1009   
 1010  0 if ((v != null) && v.equals(entry.getValue())) {
 1011  0 remove(key);
 1012   
 1013  0 return true;
 1014    } else {
 1015  0 return false;
 1016    }
 1017    }
 1018   
 1019    /**
 1020    * Remove an object from the persistence.
 1021    * @param key The key of the object to remove
 1022    */
 1023  9072 protected void persistRemove(Object key) {
 1024  9072 if (log.isDebugEnabled()) {
 1025  0 log.debug("PersistRemove called (key=" + key + ")");
 1026    }
 1027   
 1028  9072 if (persistenceListener != null) {
 1029  42 try {
 1030  42 persistenceListener.remove((String) key);
 1031    } catch (CachePersistenceException e) {
 1032  0 log.error("[oscache] Exception removing cache entry with key '" + key + "' from persistence", e);
 1033    }
 1034    }
 1035    }
 1036   
 1037    /**
 1038    * Removes a cache group using the persistence listener.
 1039    * @param groupName The name of the group to remove
 1040    */
 1041  20 protected void persistRemoveGroup(String groupName) {
 1042  20 if (log.isDebugEnabled()) {
 1043  0 log.debug("persistRemoveGroup called (groupName=" + groupName + ")");
 1044    }
 1045   
 1046  20 if (persistenceListener != null) {
 1047  20 try {
 1048  20 persistenceListener.removeGroup(groupName);
 1049    } catch (CachePersistenceException e) {
 1050  0 log.error("[oscache] Exception removing group " + groupName, e);
 1051    }
 1052    }
 1053    }
 1054   
 1055    /**
 1056    * Retrieve an object from the persistence listener.
 1057    * @param key The key of the object to retrieve
 1058    */
 1059  120785 protected Object persistRetrieve(Object key) {
 1060  120785 if (log.isDebugEnabled()) {
 1061  0 log.debug("persistRetrieve called (key=" + key + ")");
 1062    }
 1063   
 1064  120785 Object entry = null;
 1065   
 1066  120785 if (persistenceListener != null) {
 1067  647 try {
 1068  647 entry = persistenceListener.retrieve((String) key);
 1069    } catch (CachePersistenceException e) {
 1070    /**
 1071    * It is normal that we get an exception occasionally.
 1072    * It happens when the item is invalidated (written or removed)
 1073    * during read. The logic is constructed so that read is retried.
 1074    */
 1075    }
 1076    }
 1077   
 1078  120785 return entry;
 1079    }
 1080   
 1081    /**
 1082    * Retrieves a cache group using the persistence listener.
 1083    * @param groupName The name of the group to retrieve
 1084    */
 1085  307 protected Set persistRetrieveGroup(String groupName) {
 1086  307 if (log.isDebugEnabled()) {
 1087  0 log.debug("persistRetrieveGroup called (groupName=" + groupName + ")");
 1088    }
 1089   
 1090  307 if (persistenceListener != null) {
 1091  248 try {
 1092  248 return persistenceListener.retrieveGroup(groupName);
 1093    } catch (CachePersistenceException e) {
 1094  0 log.error("[oscache] Exception retrieving group " + groupName, e);
 1095    }
 1096    }
 1097   
 1098  59 return null;
 1099    }
 1100   
 1101    /**
 1102    * Store an object in the cache using the persistence listener.
 1103    * @param key The object key
 1104    * @param obj The object to store
 1105    */
 1106  65708 protected void persistStore(Object key, Object obj) {
 1107  65708 if (log.isDebugEnabled()) {
 1108  0 log.debug("persistStore called (key=" + key + ")");
 1109    }
 1110   
 1111  65708 if (persistenceListener != null) {
 1112  362 try {
 1113  362 persistenceListener.store((String) key, obj);
 1114    } catch (CachePersistenceException e) {
 1115  0 log.error("[oscache] Exception persisting " + key, e);
 1116    }
 1117    }
 1118    }
 1119   
 1120    /**
 1121    * Creates or Updates a cache group using the persistence listener.
 1122    * @param groupName The name of the group to update
 1123    * @param group The entries for the group
 1124    */
 1125  260 protected void persistStoreGroup(String groupName, Set group) {
 1126  260 if (log.isDebugEnabled()) {
 1127  0 log.debug("persistStoreGroup called (groupName=" + groupName + ")");
 1128    }
 1129   
 1130  260 if (persistenceListener != null) {
 1131  208 try {
 1132  208 if ((group == null) || group.isEmpty()) {
 1133  0 persistenceListener.removeGroup(groupName);
 1134    } else {
 1135  208 persistenceListener.storeGroup(groupName, group);
 1136    }
 1137    } catch (CachePersistenceException e) {
 1138  0 log.error("[oscache] Exception persisting group " + groupName, e);
 1139    }
 1140    }
 1141    }
 1142   
 1143    /**
 1144    * Removes the entire cache from persistent storage.
 1145    */
 1146  345 protected void persistClear() {
 1147  345 if (log.isDebugEnabled()) {
 1148  0 log.debug("persistClear called");
 1149    ;
 1150    }
 1151   
 1152  345 if (persistenceListener != null) {
 1153  168 try {
 1154  168 persistenceListener.clear();
 1155    } catch (CachePersistenceException e) {
 1156  0 log.error("[oscache] Exception clearing persistent cache", e);
 1157    }
 1158    }
 1159    }
 1160   
 1161    /**
 1162    * Notify the underlying implementation that an item was put in the cache.
 1163    *
 1164    * @param key The cache key of the item that was put.
 1165    */
 1166    protected abstract void itemPut(Object key);
 1167   
 1168    /**
 1169    * Notify any underlying algorithm that an item has been retrieved from the cache.
 1170    *
 1171    * @param key The cache key of the item that was retrieved.
 1172    */
 1173    protected abstract void itemRetrieved(Object key);
 1174   
 1175    /**
 1176    * Notify the underlying implementation that an item was removed from the cache.
 1177    *
 1178    * @param key The cache key of the item that was removed.
 1179    */
 1180    protected abstract void itemRemoved(Object key);
 1181   
 1182    /**
 1183    * The cache has reached its cacpacity and an item needs to be removed.
 1184    * (typically according to an algorithm such as LRU or FIFO).
 1185    *
 1186    * @return The key of whichever item was removed.
 1187    */
 1188    protected abstract Object removeItem();
 1189   
 1190    /**
 1191    * Reconstitute the <tt>AbstractConcurrentReadCache</tt>.
 1192    * instance from a stream (i.e.,
 1193    * deserialize it).
 1194    */
 1195  0 private synchronized void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {
 1196    // Read in the threshold, loadfactor, and any hidden stuff
 1197  0 s.defaultReadObject();
 1198   
 1199    // Read in number of buckets and allocate the bucket array;
 1200  0 int numBuckets = s.readInt();
 1201  0 table = new Entry[numBuckets];
 1202   
 1203    // Read in size (number of Mappings)
 1204  0 int size = s.readInt();
 1205   
 1206    // Read the keys and values, and put the mappings in the table
 1207  0 for (int i = 0; i < size; i++) {
 1208  0 Object key = s.readObject();
 1209  0 Object value = s.readObject();
 1210  0 put(key, value);
 1211    }
 1212    }
 1213   
 1214    /**
 1215    * Rehashes the contents of this map into a new table with a larger capacity.
 1216    * This method is called automatically when the
 1217    * number of keys in this map exceeds its capacity and load factor.
 1218    */
 1219  80 protected void rehash() {
 1220  80 Entry[] oldMap = table;
 1221  80 int oldCapacity = oldMap.length;
 1222   
 1223  80 if (oldCapacity >= MAXIMUM_CAPACITY) {
 1224  0 return;
 1225    }
 1226   
 1227  80 int newCapacity = oldCapacity << 1;
 1228  80 Entry[] newMap = new Entry[newCapacity];
 1229  80 threshold = (int) (newCapacity * loadFactor);
 1230   
 1231    /*
 1232    We need to guarantee that any existing reads of oldMap can
 1233    proceed. So we cannot yet null out each oldMap bin.
 1234   
 1235    Because we are using power-of-two expansion, the elements
 1236    from each bin must either stay at same index, or move
 1237    to oldCapacity+index. We also minimize new node creation by
 1238    catching cases where old nodes can be reused because their
 1239    .next fields won't change. (This is checked only for sequences
 1240    of one and two. It is not worth checking longer ones.)
 1241    */
 1242  80 for (int i = 0; i < oldCapacity; ++i) {
 1243  84160 Entry l = null;
 1244  84160 Entry h = null;
 1245  84160 Entry e = oldMap[i];
 1246   
 1247  84160 while (e != null) {
 1248  55321 int hash = e.hash;
 1249  55321 Entry next = e.next;
 1250   
 1251  55321 if ((hash & oldCapacity) == 0) {
 1252    // stays at newMap[i]
 1253  26866 if (l == null) {
 1254    // try to reuse node
 1255  24239 if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) == 0))) {
 1256  19738 l = e;
 1257   
 1258  19738 break;
 1259    }
 1260    }
 1261   
 1262  7128 l = new Entry(hash, e.key, e.value, l);
 1263    } else {
 1264    // moves to newMap[oldCapacity+i]
 1265  28455 if (h == null) {
 1266  25976 if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) != 0))) {
 1267  21513 h = e;
 1268   
 1269  21513 break;
 1270    }
 1271    }
 1272   
 1273  6942 h = new Entry(hash, e.key, e.value, h);
 1274    }
 1275   
 1276  14070 e = next;
 1277    }
 1278   
 1279  84160 newMap[i] = l;
 1280  84160 newMap[oldCapacity + i] = h;
 1281    }
 1282   
 1283  80 table = newMap;
 1284  80 recordModification(newMap);
 1285    }
 1286   
 1287    /**
 1288    * Continuation of put(), called only when synch lock is
 1289    * held and interference has been detected.
 1290    **/
 1291    /** OpenSymphony BEGIN */
 1292   
 1293    /* Previous code
 1294    protected Object sput(Object key, Object value, int hash) {*/
 1295  33 protected Object sput(Object key, Object value, int hash, boolean persist) {
 1296    /** OpenSymphony END */
 1297  33 Entry[] tab = table;
 1298  33 int index = hash & (tab.length - 1);
 1299  33 Entry first = tab[index];
 1300  33 Entry e = first;
 1301   
 1302  33 for (;;) {
 1303  63 if (e == null) {
 1304    /** OpenSymphony BEGIN */
 1305   
 1306    // Previous code
 1307    // Entry newEntry = new Entry(hash, key, value, first);
 1308  33 Entry newEntry;
 1309   
 1310  33 if (memoryCaching) {
 1311  31 newEntry = new Entry(hash, key, value, first);
 1312    } else {
 1313  2 newEntry = new Entry(hash, key, NULL, first);
 1314    }
 1315   
 1316  33 itemPut(key);
 1317   
 1318    // Persist if required
 1319  33 if (persist && !overflowPersistence) {
 1320  33 persistStore(key, value);
 1321    }
 1322   
 1323    // If we have a CacheEntry, update the group lookups
 1324  33 if (value instanceof CacheEntry) {
 1325  33 updateGroups(null, (CacheEntry) value, persist);
 1326    }
 1327   
 1328    /** OpenSymphony END */
 1329  33 tab[index] = newEntry;
 1330   
 1331  33 if (++count >= threshold) {
 1332  0 rehash();
 1333    } else {
 1334  33 recordModification(newEntry);
 1335    }
 1336   
 1337  33 return null;
 1338  30 } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1339  0 Object oldValue = e.value;
 1340   
 1341    /** OpenSymphony BEGIN */
 1342   
 1343    /* Previous code
 1344    e.value = value; */
 1345  0 if (memoryCaching) {
 1346  0 e.value = value;
 1347    }
 1348   
 1349    // Persist if required
 1350  0 if (persist && overflowPersistence) {
 1351  0 persistRemove(key);
 1352  0 } else if (persist) {
 1353  0 persistStore(key, value);
 1354    }
 1355   
 1356  0 updateGroups(oldValue, value, persist);
 1357   
 1358  0 itemPut(key);
 1359   
 1360    /** OpenSymphony END */
 1361  0 return oldValue;
 1362    } else {
 1363  30 e = e.next;
 1364    }
 1365    }
 1366    }
 1367   
 1368    /**
 1369    * Continuation of remove(), called only when synch lock is
 1370    * held and interference has been detected.
 1371    **/
 1372    /** OpenSymphony BEGIN */
 1373   
 1374    /* Previous code
 1375    protected Object sremove(Object key, int hash) { */
 1376  0 protected Object sremove(Object key, int hash, boolean invokeAlgorithm) {
 1377    /** OpenSymphony END */
 1378  0 Entry[] tab = table;
 1379  0 int index = hash & (tab.length - 1);
 1380  0 Entry first = tab[index];
 1381  0 Entry e = first;
 1382   
 1383  0 for (;;) {
 1384  0 if (e == null) {
 1385  0 return null;
 1386  0 } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1387  0 Object oldValue = e.value;
 1388  0 if (persistenceListener != null && (oldValue == NULL)) {
 1389  0 oldValue = persistRetrieve(key);
 1390    }
 1391   
 1392  0 e.value = null;
 1393  0 count--;
 1394   
 1395    /** OpenSymphony BEGIN */
 1396  0 if (!unlimitedDiskCache && !overflowPersistence) {
 1397  0 persistRemove(e.key);
 1398    // If we have a CacheEntry, update the groups
 1399  0 if (oldValue instanceof CacheEntry) {
 1400  0 CacheEntry oldEntry = (CacheEntry)oldValue;
 1401  0 removeGroupMappings(oldEntry.getKey(),
 1402    oldEntry.getGroups(), true);
 1403    }
 1404    } else {
 1405    // only remove from memory groups
 1406  0 if (oldValue instanceof CacheEntry) {
 1407  0 CacheEntry oldEntry = (CacheEntry)oldValue;
 1408  0 removeGroupMappings(oldEntry.getKey(),
 1409    oldEntry.getGroups(), false);
 1410    }
 1411    }
 1412   
 1413  0 if (overflowPersistence && ((size() + 1) >= maxEntries)) {
 1414  0 persistStore(key, oldValue);
 1415    // add key to persistent groups but NOT to the memory groups
 1416  0 if (oldValue instanceof CacheEntry) {
 1417  0 CacheEntry oldEntry = (CacheEntry)oldValue;
 1418  0 addGroupMappings(oldEntry.getKey(), oldEntry.getGroups(), true, false);
 1419    }
 1420    }
 1421   
 1422  0 if (invokeAlgorithm) {
 1423  0 itemRemoved(key);
 1424    }
 1425   
 1426    /** OpenSymphony END */
 1427  0 Entry head = e.next;
 1428   
 1429  0 for (Entry p = first; p != e; p = p.next) {
 1430  0 head = new Entry(p.hash, p.key, p.value, head);
 1431    }
 1432   
 1433  0 tab[index] = head;
 1434  0 recordModification(head);
 1435   
 1436  0 return oldValue;
 1437    } else {
 1438  0 e = e.next;
 1439    }
 1440    }
 1441    }
 1442   
 1443    /**
 1444    * Save the state of the <tt>AbstractConcurrentReadCache</tt> instance to a stream.
 1445    * (i.e., serialize it).
 1446    *
 1447    * @serialData The <i>capacity</i> of the
 1448    * AbstractConcurrentReadCache (the length of the
 1449    * bucket array) is emitted (int), followed by the
 1450    * <i>size</i> of the AbstractConcurrentReadCache (the number of key-value
 1451    * mappings), followed by the key (Object) and value (Object)
 1452    * for each key-value mapping represented by the AbstractConcurrentReadCache
 1453    * The key-value mappings are emitted in no particular order.
 1454    */
 1455  0 private synchronized void writeObject(java.io.ObjectOutputStream s) throws IOException {
 1456    // Write out the threshold, loadfactor, and any hidden stuff
 1457  0 s.defaultWriteObject();
 1458   
 1459    // Write out number of buckets
 1460  0 s.writeInt(table.length);
 1461   
 1462    // Write out size (number of Mappings)
 1463  0 s.writeInt(count);
 1464   
 1465    // Write out keys and values (alternating)
 1466  0 for (int index = table.length - 1; index >= 0; index--) {
 1467  0 Entry entry = table[index];
 1468   
 1469  0 while (entry != null) {
 1470  0 s.writeObject(entry.key);
 1471  0 s.writeObject(entry.value);
 1472  0 entry = entry.next;
 1473    }
 1474    }
 1475    }
 1476   
 1477    /**
 1478    * Return hash code for Object x.
 1479    * Since we are using power-of-two
 1480    * tables, it is worth the effort to improve hashcode via
 1481    * the same multiplicative scheme as used in IdentityHashMap.
 1482    */
 1483  4704882 private static int hash(Object x) {
 1484  4706524 int h = x.hashCode();
 1485   
 1486    // Multiply by 127 (quickly, via shifts), and mix in some high
 1487    // bits to help guard against bunching of codes that are
 1488    // consecutive or equally spaced.
 1489  4706494 return ((h << 7) - h + (h >>> 9) + (h >>> 17));
 1490    }
 1491   
 1492    /**
 1493    * Add this cache key to the groups specified groups.
 1494    * We have to treat the
 1495    * memory and disk group mappings seperately so they remain valid for their
 1496    * corresponding memory/disk caches. (eg if mem is limited to 100 entries
 1497    * and disk is unlimited, the group mappings will be different).
 1498    *
 1499    * @param key The cache key that we are ading to the groups.
 1500    * @param newGroups the set of groups we want to add this cache entry to.
 1501    * @param persist A flag to indicate whether the keys should be added to
 1502    * the persistent cache layer.
 1503    * @param memory A flag to indicate whether the key should be added to
 1504    * the memory groups (important for overflow-to-disk)
 1505    */
 1506  317 private void addGroupMappings(String key, Set newGroups, boolean persist, boolean memory) {
 1507  317 if (newGroups == null) {
 1508  20 return;
 1509    }
 1510   
 1511    // Add this CacheEntry to the groups that it is now a member of
 1512  297 for (Iterator it = newGroups.iterator(); it.hasNext();) {
 1513  282 String groupName = (String) it.next();
 1514   
 1515    // Update the in-memory groups
 1516  282 if (memoryCaching && memory) {
 1517  182 if (groups == null) {
 1518  0 groups = new HashMap();
 1519    }
 1520   
 1521  182 Set memoryGroup = (Set) groups.get(groupName);
 1522   
 1523  182 if (memoryGroup == null) {
 1524  113 memoryGroup = new HashSet();
 1525  113 groups.put(groupName, memoryGroup);
 1526    }
 1527   
 1528  182 memoryGroup.add(key);
 1529    }
 1530   
 1531    // Update the persistent group maps
 1532  282 if (persist) {
 1533  236 Set persistentGroup = persistRetrieveGroup(groupName);
 1534   
 1535  236 if (persistentGroup == null) {
 1536  112 persistentGroup = new HashSet();
 1537    }
 1538   
 1539  236 persistentGroup.add(key);
 1540  236 persistStoreGroup(groupName, persistentGroup);
 1541    }
 1542    }
 1543    }
 1544   
 1545    /** OpenSymphony END (pretty long!) */
 1546    /**
 1547    * Returns the appropriate capacity (power of two) for the specified
 1548    * initial capacity argument.
 1549    */
 1550  265 private int p2capacity(int initialCapacity) {
 1551  265 int cap = initialCapacity;
 1552   
 1553    // Compute the appropriate capacity
 1554  265 int result;
 1555   
 1556  265 if ((cap > MAXIMUM_CAPACITY) || (cap < 0)) {
 1557  0 result = MAXIMUM_CAPACITY;
 1558    } else {
 1559  265 result = MINIMUM_CAPACITY;
 1560   
 1561  265 while (result < cap) {
 1562  795 result <<= 1;
 1563    }
 1564    }
 1565   
 1566  265 return result;
 1567    }
 1568   
 1569    /* Previous code
 1570    public Object put(Object key, Object value)*/
 1571  66097 private Object put(Object key, Object value, boolean persist) {
 1572    /** OpenSymphony END */
 1573  66097 if (value == null) {
 1574  30 throw new NullPointerException();
 1575    }
 1576   
 1577  66067 int hash = hash(key);
 1578  66067 Entry[] tab = table;
 1579  66067 int index = hash & (tab.length - 1);
 1580  66067 Entry first = tab[index];
 1581  66067 Entry e = first;
 1582   
 1583  66067 for (;;) {
 1584  96367 if (e == null) {
 1585  60908 synchronized (this) {
 1586  60908 tab = table;
 1587   
 1588    /** OpenSymphony BEGIN */
 1589   
 1590    // Previous code
 1591   
 1592    /* if (first == tab[index]) {
 1593    // Add to front of list
 1594    Entry newEntry = new Entry(hash, key, value, first);
 1595    tab[index] = newEntry;
 1596    if (++count >= threshold) rehash();
 1597    else recordModification(newEntry);
 1598    return null; */
 1599   
 1600  60908 Object oldValue = null;
 1601   
 1602    // Remove an item if the cache is full
 1603  60908 if (size() >= maxEntries) {
 1604    // part of fix CACHE-255: method should return old value
 1605  9090 oldValue = remove(removeItem(), false, false);
 1606    }
 1607   
 1608  60908 if (first == tab[index]) {
 1609    // Add to front of list
 1610  60875 Entry newEntry = null;
 1611   
 1612  60875 if (memoryCaching) {
 1613  60739 newEntry = new Entry(hash, key, value, first);
 1614    } else {
 1615  136 newEntry = new Entry(hash, key, NULL, first);
 1616    }
 1617   
 1618  60875 tab[index] = newEntry;
 1619  60875 itemPut(key);
 1620   
 1621    // Persist if required
 1622  60875 if (persist && !overflowPersistence) {
 1623  60458 persistStore(key, value);
 1624    }
 1625   
 1626    // If we have a CacheEntry, update the group lookups
 1627  60875 if (value instanceof CacheEntry) {
 1628  60475 updateGroups(null, (CacheEntry) value, persist);
 1629    }
 1630   
 1631  60875 if (++count >= threshold) {
 1632  80 rehash();
 1633    } else {
 1634  60795 recordModification(newEntry);
 1635    }
 1636   
 1637  60875 return oldValue;
 1638   
 1639    /** OpenSymphony END */
 1640    } else {
 1641    // wrong list -- retry
 1642   
 1643    /** OpenSymphony BEGIN */
 1644   
 1645    /* Previous code
 1646    return sput(key, value, hash);*/
 1647  33 return sput(key, value, hash, persist);
 1648   
 1649    /** OpenSymphony END */
 1650    }
 1651    }
 1652  35459 } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1653    // synch to avoid race with remove and to
 1654    // ensure proper serialization of multiple replaces
 1655  5159 synchronized (this) {
 1656  5159 tab = table;
 1657   
 1658  5159 Object oldValue = e.value;
 1659   
 1660    // [CACHE-118] - get the old cache entry even if there's no memory cache
 1661  5159 if (persist && (oldValue == NULL)) {
 1662  68 oldValue = persistRetrieve(key);
 1663    }
 1664   
 1665  5159 if ((first == tab[index]) && (oldValue != null)) {
 1666    /** OpenSymphony BEGIN */
 1667   
 1668    /* Previous code
 1669    e.value = value;
 1670    return oldValue; */
 1671  5159 if (memoryCaching) {
 1672  5091 e.value = value;
 1673    }
 1674   
 1675    // Persist if required
 1676  5159 if (persist && overflowPersistence) {
 1677  22 persistRemove(key);
 1678  5137 } else if (persist) {
 1679  5137 persistStore(key, value);
 1680    }
 1681   
 1682  5159 updateGroups(oldValue, value, persist);
 1683  5159 itemPut(key);
 1684   
 1685  5159 return oldValue;
 1686   
 1687    /** OpenSymphony END */
 1688    } else {
 1689    // retry if wrong list or lost race against concurrent remove
 1690   
 1691    /** OpenSymphony BEGIN */
 1692   
 1693    /* Previous code
 1694    return sput(key, value, hash);*/
 1695  0 return sput(key, value, hash, persist);
 1696   
 1697    /** OpenSymphony END */
 1698    }
 1699    }
 1700    } else {
 1701  30300 e = e.next;
 1702    }
 1703    }
 1704    }
 1705   
 1706  9180 private synchronized Object remove(Object key, boolean invokeAlgorithm, boolean forcePersist)
 1707    /* Previous code
 1708    public Object remove(Object key) */
 1709   
 1710    /** OpenSymphony END */ {
 1711    /*
 1712    Strategy:
 1713   
 1714    Find the entry, then
 1715    1. Set value field to null, to force get() to retry
 1716    2. Rebuild the list without this entry.
 1717    All entries following removed node can stay in list, but
 1718    all preceeding ones need to be cloned. Traversals rely
 1719    on this strategy to ensure that elements will not be
 1720    repeated during iteration.
 1721    */
 1722   
 1723    /** OpenSymphony BEGIN */
 1724  9180 if (key == null) {
 1725  0 return null;
 1726    }
 1727   
 1728    /** OpenSymphony END */
 1729  9180 int hash = hash(key);
 1730  9180 Entry[] tab = table;
 1731  9180 int index = hash & (tab.length - 1);
 1732  9180 Entry first = tab[index];
 1733  9180 Entry e = first;
 1734   
 1735  9180 for (;;) {
 1736  12370 if (e == null) {
 1737  0 tab = getTableForReading();
 1738   
 1739  0 if (first == tab[index]) {
 1740  0 return null;
 1741    } else {
 1742    // Wrong list -- must restart traversal at new first
 1743   
 1744    /** OpenSymphony BEGIN */
 1745   
 1746    /* Previous Code
 1747    return sremove(key, hash); */
 1748  0 return sremove(key, hash, invokeAlgorithm);
 1749   
 1750    /** OpenSymphony END */
 1751    }
 1752  12370 } else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) {
 1753  9180 synchronized (this) {
 1754  9180 tab = table;
 1755   
 1756  9180 Object oldValue = e.value;
 1757  9180 if (persistenceListener != null && (oldValue == NULL)) {
 1758  0 oldValue = persistRetrieve(key);
 1759    }
 1760   
 1761    // re-find under synch if wrong list
 1762  9180 if ((first != tab[index]) || (oldValue == null)) {
 1763    /** OpenSymphony BEGIN */
 1764   
 1765    /* Previous Code
 1766    return sremove(key, hash); */
 1767  0 return sremove(key, hash, invokeAlgorithm);
 1768    }
 1769   
 1770    /** OpenSymphony END */
 1771  9180 e.value = null;
 1772  9180 count--;
 1773   
 1774    /** OpenSymphony BEGIN */
 1775  9180 if (forcePersist || (!unlimitedDiskCache && !overflowPersistence)) {
 1776  9050 persistRemove(e.key);
 1777    // If we have a CacheEntry, update the group lookups
 1778  9050 if (oldValue instanceof CacheEntry) {
 1779  9020 CacheEntry oldEntry = (CacheEntry) oldValue;
 1780  9020 removeGroupMappings(oldEntry.getKey(),
 1781    oldEntry.getGroups(), true);
 1782    }
 1783    } else {
 1784    // only remove from memory groups
 1785  130 if (oldValue instanceof CacheEntry) {
 1786  60 CacheEntry oldEntry = (CacheEntry) oldValue;
 1787  60 removeGroupMappings(oldEntry.getKey(), oldEntry
 1788    .getGroups(), false);
 1789    }
 1790    }
 1791   
 1792  9180 if (!forcePersist && overflowPersistence && ((size() + 1) >= maxEntries)) {
 1793  80 persistStore(key, oldValue);
 1794    // add key to persistent groups but NOT to the memory groups
 1795  80 if (oldValue instanceof CacheEntry) {
 1796  40 CacheEntry oldEntry = (CacheEntry) oldValue;
 1797  40 addGroupMappings(oldEntry.getKey(), oldEntry.getGroups(), true, false);
 1798    }
 1799    }
 1800   
 1801  9180 if (invokeAlgorithm) {
 1802  70 itemRemoved(key);
 1803    }
 1804   
 1805    // introduced to fix bug CACHE-255
 1806  9180 if (oldValue instanceof CacheEntry) {
 1807  9080 CacheEntry oldEntry = (CacheEntry) oldValue;
 1808  9080 oldValue = oldEntry.getContent();
 1809    }
 1810   
 1811    /** OpenSymphony END */
 1812  9180 Entry head = e.next;
 1813   
 1814  9180 for (Entry p = first; p != e; p = p.next) {
 1815  3190 head = new Entry(p.hash, p.key, p.value, head);
 1816    }
 1817   
 1818  9180 tab[index] = head;
 1819  9180 recordModification(head);
 1820   
 1821  9180 return oldValue;
 1822    }
 1823    } else {
 1824  3190 e = e.next;
 1825    }
 1826    }
 1827    }
 1828   
 1829    /**
 1830    * Remove this CacheEntry from the groups it no longer belongs to.
 1831    * We have to treat the memory and disk group mappings separately so they remain
 1832    * valid for their corresponding memory/disk caches. (eg if mem is limited
 1833    * to 100 entries and disk is unlimited, the group mappings will be
 1834    * different).
 1835    *
 1836    * @param key The cache key that we are removing from the groups.
 1837    * @param oldGroups the set of groups we want to remove the cache entry
 1838    * from.
 1839    * @param persist A flag to indicate whether the keys should be removed
 1840    * from the persistent cache layer.
 1841    */
 1842  9184 private void removeGroupMappings(String key, Set oldGroups, boolean persist) {
 1843  9184 if (oldGroups == null) {
 1844  9020 return;
 1845    }
 1846   
 1847  164 for (Iterator it = oldGroups.iterator(); it.hasNext();) {
 1848  90 String groupName = (String) it.next();
 1849   
 1850    // Update the in-memory groups
 1851  90 if (memoryCaching && (this.groups != null)) {
 1852  78 Set memoryGroup = (Set) groups.get(groupName);
 1853   
 1854  78 if (memoryGroup != null) {
 1855  78 memoryGroup.remove(key);
 1856   
 1857  78 if (memoryGroup.isEmpty()) {
 1858  60 groups.remove(groupName);
 1859    }
 1860    }
 1861    }
 1862   
 1863    // Update the persistent group maps
 1864  90 if (persist) {
 1865  50 Set persistentGroup = persistRetrieveGroup(groupName);
 1866   
 1867  50 if (persistentGroup != null) {
 1868  44 persistentGroup.remove(key);
 1869   
 1870  44 if (persistentGroup.isEmpty()) {
 1871  20 persistRemoveGroup(groupName);
 1872    } else {
 1873  24 persistStoreGroup(groupName, persistentGroup);
 1874    }
 1875    }
 1876    }
 1877    }
 1878    }
 1879   
 1880    /**
 1881    * Updates the groups to reflect the differences between the old and new
 1882    * cache entries. Either of the old or new values can be <code>null</code>
 1883    * or contain a <code>null</code> group list, in which case the entry's
 1884    * groups will all be added or removed respectively.
 1885    *
 1886    * @param oldValue The old CacheEntry that is being replaced.
 1887    * @param newValue The new CacheEntry that is being inserted.
 1888    */
 1889  5159 private void updateGroups(Object oldValue, Object newValue, boolean persist) {
 1890    // If we have/had a CacheEntry, update the group lookups
 1891  5159 boolean oldIsCE = oldValue instanceof CacheEntry;
 1892  5159 boolean newIsCE = newValue instanceof CacheEntry;
 1893   
 1894  5159 if (newIsCE && oldIsCE) {
 1895  5159 updateGroups((CacheEntry) oldValue, (CacheEntry) newValue, persist);
 1896  0 } else if (newIsCE) {
 1897  0 updateGroups(null, (CacheEntry) newValue, persist);
 1898  0 } else if (oldIsCE) {
 1899  0 updateGroups((CacheEntry) oldValue, null, persist);
 1900    }
 1901    }
 1902   
 1903    /**
 1904    * Updates the groups to reflect the differences between the old and new cache entries.
 1905    * Either of the old or new values can be <code>null</code>
 1906    * or contain a <code>null</code> group list, in which case the entry's
 1907    * groups will all be added or removed respectively.
 1908    *
 1909    * @param oldValue The old CacheEntry that is being replaced.
 1910    * @param newValue The new CacheEntry that is being inserted.
 1911    */
 1912  65667 private void updateGroups(CacheEntry oldValue, CacheEntry newValue, boolean persist) {
 1913  65667 Set oldGroups = null;
 1914  65667 Set newGroups = null;
 1915   
 1916  65667 if (oldValue != null) {
 1917  5159 oldGroups = oldValue.getGroups();
 1918    }
 1919   
 1920  65667 if (newValue != null) {
 1921  65667 newGroups = newValue.getGroups();
 1922    }
 1923   
 1924    // Get the names of the groups to remove
 1925  65667 if (oldGroups != null) {
 1926  104 Set removeFromGroups = new HashSet();
 1927   
 1928  104 for (Iterator it = oldGroups.iterator(); it.hasNext();) {
 1929  173 String groupName = (String) it.next();
 1930   
 1931  173 if ((newGroups == null) || !newGroups.contains(groupName)) {
 1932    // We need to remove this group
 1933  30 removeFromGroups.add(groupName);
 1934    }
 1935    }
 1936   
 1937  104 removeGroupMappings(oldValue.getKey(), removeFromGroups, persist);
 1938    }
 1939   
 1940    // Get the names of the groups to add
 1941  65667 if (newGroups != null) {
 1942  277 Set addToGroups = new HashSet();
 1943   
 1944  277 for (Iterator it = newGroups.iterator(); it.hasNext();) {
 1945  405 String groupName = (String) it.next();
 1946   
 1947  405 if ((oldGroups == null) || !oldGroups.contains(groupName)) {
 1948    // We need to add this group
 1949  262 addToGroups.add(groupName);
 1950    }
 1951    }
 1952   
 1953  277 addGroupMappings(newValue.getKey(), addToGroups, persist, true);
 1954    }
 1955    }
 1956   
 1957    /**
 1958    * AbstractConcurrentReadCache collision list entry.
 1959    */
 1960    protected static class Entry implements Map.Entry {
 1961    protected final Entry next;
 1962    protected final Object key;
 1963   
 1964    /*
 1965    The use of volatile for value field ensures that
 1966    we can detect status changes without synchronization.
 1967    The other fields are never changed, and are
 1968    marked as final.
 1969    */
 1970    protected final int hash;
 1971    protected volatile Object value;
 1972   
 1973  78168 Entry(int hash, Object key, Object value, Entry next) {
 1974  78168 this.hash = hash;
 1975  78168 this.key = key;
 1976  78168 this.next = next;
 1977  78168 this.value = value;
 1978    }
 1979   
 1980    // Map.Entry Ops
 1981  0 public Object getKey() {
 1982  0 return key;
 1983    }
 1984   
 1985    /**
 1986    * Set the value of this entry.
 1987    * Note: In an entrySet or
 1988    * entrySet.iterator), unless the set or iterator is used under
 1989    * synchronization of the table as a whole (or you can otherwise
 1990    * guarantee lack of concurrent modification), <tt>setValue</tt>
 1991    * is not strictly guaranteed to actually replace the value field
 1992    * obtained via the <tt>get</tt> operation of the underlying hash
 1993    * table in multithreaded applications. If iterator-wide
 1994    * synchronization is not used, and any other concurrent
 1995    * <tt>put</tt> or <tt>remove</tt> operations occur, sometimes
 1996    * even to <em>other</em> entries, then this change is not
 1997    * guaranteed to be reflected in the hash table. (It might, or it
 1998    * might not. There are no assurances either way.)
 1999    *
 2000    * @param value the new value.
 2001    * @return the previous value, or null if entry has been detectably
 2002    * removed.
 2003    * @exception NullPointerException if the value is <code>null</code>.
 2004    *
 2005    **/
 2006  0 public Object setValue(Object value) {
 2007  0 if (value == null) {
 2008  0 throw new NullPointerException();
 2009    }
 2010   
 2011  0 Object oldValue = this.value;
 2012  0 this.value = value;
 2013   
 2014  0 return oldValue;
 2015    }
 2016   
 2017    /**
 2018    * Get the value.
 2019    * Note: In an entrySet or entrySet.iterator,
 2020    * unless the set or iterator is used under synchronization of the
 2021    * table as a whole (or you can otherwise guarantee lack of
 2022    * concurrent modification), <tt>getValue</tt> <em>might</em>
 2023    * return null, reflecting the fact that the entry has been
 2024    * concurrently removed. However, there are no assurances that
 2025    * concurrent removals will be reflected using this method.
 2026    *
 2027    * @return the current value, or null if the entry has been
 2028    * detectably removed.
 2029    **/
 2030  0 public Object getValue() {
 2031  0 return value;
 2032    }
 2033   
 2034  0 public boolean equals(Object o) {
 2035  0 if (!(o instanceof Map.Entry)) {
 2036  0 return false;
 2037    }
 2038   
 2039  0 Map.Entry e = (Map.Entry) o;
 2040   
 2041  0 if (!key.equals(e.getKey())) {
 2042  0 return false;
 2043    }
 2044   
 2045  0 Object v = value;
 2046   
 2047  0 return (v == null) ? (e.getValue() == null) : v.equals(e.getValue());
 2048    }
 2049   
 2050  0 public int hashCode() {
 2051  0 Object v = value;
 2052   
 2053  0 return hash ^ ((v == null) ? 0 : v.hashCode());
 2054    }
 2055   
 2056  0 public String toString() {
 2057  0 return key + "=" + value;
 2058    }
 2059   
 2060  0 protected Object clone() {
 2061  0 return new Entry(hash, key, value, ((next == null) ? null : (Entry) next.clone()));
 2062    }
 2063    }
 2064   
 2065    protected class HashIterator implements Iterator, Enumeration {
 2066    protected final Entry[] tab; // snapshot of table
 2067    protected Entry entry = null; // current node of slot
 2068    protected Entry lastReturned = null; // last node returned by next
 2069    protected Object currentKey; // key for current node
 2070    protected Object currentValue; // value for current node
 2071    protected int index; // current slot
 2072   
 2073  90 protected HashIterator() {
 2074  90 tab = AbstractConcurrentReadCache.this.getTableForReading();
 2075  90 index = tab.length - 1;
 2076    }
 2077   
 2078  100 public boolean hasMoreElements() {
 2079  100 return hasNext();
 2080    }
 2081   
 2082  250 public boolean hasNext() {
 2083    /*
 2084    currentkey and currentValue are set here to ensure that next()
 2085    returns normally if hasNext() returns true. This avoids
 2086    surprises especially when final element is removed during
 2087    traversal -- instead, we just ignore the removal during
 2088    current traversal.
 2089    */
 2090  250 for (;;) {
 2091  380 if (entry != null) {
 2092  160 Object v = entry.value;
 2093   
 2094  160 if (v != null) {
 2095  160 currentKey = entry.key;
 2096  160 currentValue = v;
 2097   
 2098  160 return true;
 2099    } else {
 2100  0 entry = entry.next;
 2101    }
 2102    }
 2103   
 2104  220 while ((entry == null) && (index >= 0)) {
 2105  2880 entry = tab[index--];
 2106    }
 2107   
 2108  220 if (entry == null) {
 2109  90 currentKey = currentValue = null;
 2110   
 2111  90 return false;
 2112    }
 2113    }
 2114    }
 2115   
 2116  130 public Object next() {
 2117  130 if ((currentKey == null) && !hasNext()) {
 2118  0 throw new NoSuchElementException();
 2119    }
 2120   
 2121  130 Object result = returnValueOfNext();
 2122  130 lastReturned = entry;
 2123  130 currentKey = currentValue = null;
 2124  130 entry = entry.next;
 2125   
 2126  130 return result;
 2127    }
 2128   
 2129  40 public Object nextElement() {
 2130  40 return next();
 2131    }
 2132   
 2133  0 public void remove() {
 2134  0 if (lastReturned == null) {
 2135  0 throw new IllegalStateException();
 2136    }
 2137   
 2138  0 AbstractConcurrentReadCache.this.remove(lastReturned.key);
 2139    }
 2140   
 2141  0 protected Object returnValueOfNext() {
 2142  0 return entry;
 2143    }
 2144    }
 2145   
 2146    protected class KeyIterator extends HashIterator {
 2147  90 protected Object returnValueOfNext() {
 2148  90 return currentKey;
 2149    }
 2150    }
 2151   
 2152    protected class ValueIterator extends HashIterator {
 2153  40 protected Object returnValueOfNext() {
 2154  40 return currentValue;
 2155    }
 2156    }
 2157    }