001    /*
002     *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003     *
004     *  This file is licensed to You under the Eclipse Public License (EPL);
005     *  You may not use this file except in compliance with the License. You
006     *  may obtain a copy of the License at
007     *
008     *      http://www.opensource.org/licenses/eclipse-1.0.php
009     *
010     *  See the COPYRIGHT.txt file distributed with this work for information
011     *  regarding copyright ownership.
012     */
013    package org.jikesrvm.scheduler;
014    
015    import org.jikesrvm.Constants;
016    import org.jikesrvm.VM;
017    import org.jikesrvm.runtime.Magic;
018    import org.jikesrvm.runtime.Entrypoints;
019    import org.vmmagic.unboxed.Address;
020    import org.vmmagic.unboxed.Offset;
021    import org.vmmagic.pragma.Uninterruptible;
022    import org.vmmagic.pragma.Entrypoint;
023    import org.vmmagic.pragma.Untraced;
024    import org.vmmagic.pragma.NoInline;
025    
026    /**
027     *
028     * <p> Alternative (to Java monitors) light-weight synchronization
029     * mechanism to implement Java monitors {@link Lock}.  These locks
030     * should not be used where Java monitors would suffice, or where
031     * an adaptive mutex is required.  They are
032     * intended to be held only briefly!
033     *
034     * <p> Normally, contending <code>RVMThread</code>s will spin on
035     * this processor lock's <code>latestContender</code> field.  If
036     * <code>MCS_Locking</code> is set, the processors spin on processor
037     * local data.  This is loosely based on an idea in Mellor-Crummey and
038     * Scott's paper in ASPLOS-IV (1991).
039     * 1.  Possible project: determine those conditions under which MCS
040     * locking performs better than spinning on a global address.
041     *
042     * <p> Acquiring or releasing a lock involves atomically reading and
043     * setting the lock's <code>latestContender</code> field.  If this
044     * field is null, the lock is unowned.  Otherwise, the field points to
045     * the virtual processor that owns the lock, or, if MCS locking is
046     * being used, to the last vp on a circular queue of virtual
047     * processors spinning until they get the lock, or, if MCS locking is
048     * being used and the circular spin queue is being updated, to
049     * <code>IN_FLUX</code>.
050     *
051     * <p> Contention is best handled by doing something else.  To support
052     * this, <code>tryLock</code> obtains the lock (and returns true) if
053     * the lock is unowned (and there is no spurious microcontention).
054     * Otherwise, it returns false.
055     *
056     * <p> Only when "doing something else" is not an attractive option
057     * (locking global thread queues, unlocking a thick lock with threads
058     * waiting, avoiding starvation on a thread that has issued several
059     * tryLocks, etc.) should lock() be called.  Here, any remaining
060     * contention is handled by spinning on a local flag.
061     *
062     * <p> To add itself to the circular waiting queue, a processor must
063     * succeed in setting the latestContender field to IN_FLUX.  A backoff
064     * strategy is used to reduce contention for this field.  This
065     * strategy has both a pseudo-random (to prevent two or more virtual
066     * processors from backing off in lock step) and an exponential
067     * component (to deal with really high contention).
068     *
069     * <p> Releasing a lock entails either atomically setting the
070     * latestContender field to null (if this processor is the
071     * latestContender), or releasing the first virtual processor on the
072     * circular spin queue.  In the latter case, the latestContender field
073     * must be set to IN_FLUX.  To give unlock() priority over lock(), the
074     * backoff strategy is not used for unlocking: if a vp fails to set
075     * set the field to IN_FLUX, it tries again immediately.
076     *
077     * <p> Usage: system locks should only be used when synchronized
078     * methods cannot.  Do not do anything, (such as trigger a type cast,
079     * allocate an object, or call any method of a class that does not
080     * implement Uninterruptible) that might allow a thread switch or
081     * trigger a garbage collection between lock and unlock.
082     *
083     * @see RVMThread
084     * @see Monitor
085     * @see Lock */
086    @Uninterruptible
087    public final class SpinLock implements Constants {
088      /**
089       * Should contending <code>RVMThread</code>s spin on thread local addresses (true)
090       * or on a globally shared address (false).
091       */
092      private static final boolean MCS_Locking = false;
093    
094      /**
095       * The state of the processor lock.
096       * <ul>
097       * <li> <code>null</code>, if the lock is not owned;
098       * <li> the processor that owns the lock, if no processors are waiting;
099       * <li> the last in a circular chain of processors waiting to own the lock; or
100       * <li> <code>IN_FLUX</code>, if the circular chain is being edited.
101       * </ul>
102       * Only the first two states are possible unless MCS locking is implemented.
103       */
104      @Entrypoint
105      @Untraced
106      RVMThread latestContender;
107      public boolean lockHeld() { return latestContender!=null; }
108      // FIXME: save the string somewhere.
109      public void lock(String s) {
110        lock();
111      }
112      /**
113       * Acquire a processor lock.
114       */
115      public void lock() {
116        if (!VM.runningVM) return;
117        VM.disableYieldpoints();
118        RVMThread i = RVMThread.getCurrentThread();
119        RVMThread p;
120        int attempts = 0;
121        Offset latestContenderOffset = Entrypoints.latestContenderField.getOffset();
122        do {
123          p = Magic.objectAsThread(Magic.addressAsObject(Magic.prepareAddress(this, latestContenderOffset)));
124          if (p == null) { // nobody owns the lock
125            if (Magic.attemptAddress(this, latestContenderOffset, Address.zero(), Magic.objectAsAddress(i))) {
126              Magic.isync(); // so subsequent instructions wont see stale values
127              return;
128            } else {
129              continue; // don't handle contention
130            }
131          } else if (MCS_Locking && Magic.objectAsAddress(p).NE(IN_FLUX)) { // lock is owned, but not being changed
132            if (Magic.attemptAddress(this, latestContenderOffset, Magic.objectAsAddress(p), IN_FLUX)) {
133              Magic.isync(); // so subsequent instructions wont see stale values
134              break;
135            }
136          }
137          handleMicrocontention(attempts++);
138        } while (true);
139        // i owns the lock
140        if (VM.VerifyAssertions && !MCS_Locking) VM._assert(VM.NOT_REACHED);
141        i.awaitingSpinLock = this;
142        if (p.awaitingSpinLock != this) { // make i first (and only) waiter on the contender chain
143          i.contenderLink = i;
144        } else {                               // make i last waiter on the contender chain
145          i.contenderLink = p.contenderLink;
146          p.contenderLink = i;
147        }
148        Magic.sync(); // so other contender will see updated contender chain
149        Magic.setObjectAtOffset(this, latestContenderOffset, i);  // other processors can get at the lock
150        do { // spin, waiting for the lock
151          Magic.isync(); // to make new value visible as soon as possible
152        } while (i.awaitingSpinLock == this);
153      }
154    
155      /**
156       * Conditionally acquire a processor lock.
157       * @return whether acquisition succeeded
158       */
159      public boolean tryLock() {
160        if (!VM.runningVM) return true;
161        VM.disableYieldpoints();
162        Offset latestContenderOffset = Entrypoints.latestContenderField.getOffset();
163        if (Magic.prepareAddress(this, latestContenderOffset).isZero()) {
164          Address cp = Magic.objectAsAddress(RVMThread.getCurrentThread());
165          if (Magic.attemptAddress(this, latestContenderOffset, Address.zero(), cp)) {
166            Magic.isync(); // so subsequent instructions wont see stale values
167            return true;
168          }
169        }
170        VM.enableYieldpoints();
171        return false;
172      }
173    
174      /**
175       * Release a processor lock.
176       */
177      public void unlock() {
178        if (!VM.runningVM) return;
179        Magic.sync(); // commit changes while lock was held so they are visiable to the next processor that acquires the lock
180        Offset latestContenderOffset = Entrypoints.latestContenderField.getOffset();
181        RVMThread i = RVMThread.getCurrentThread();
182        if (!MCS_Locking) {
183          Magic.setObjectAtOffset(this, latestContenderOffset, null);  // latestContender = null;
184          VM.enableYieldpoints();
185          return;
186        }
187        RVMThread p;
188        do {
189          p = Magic.objectAsThread(Magic.addressAsObject(Magic.prepareAddress(this, latestContenderOffset)));
190          if (p == i) { // nobody is waiting for the lock
191            if (Magic.attemptAddress(this, latestContenderOffset, Magic.objectAsAddress(p), Address.zero())) {
192              break;
193            }
194          } else
195          if (Magic.objectAsAddress(p).NE(IN_FLUX)) { // there are waiters, but the contention chain is not being chainged
196            if (Magic.attemptAddress(this, latestContenderOffset, Magic.objectAsAddress(p), IN_FLUX)) {
197              break;
198            }
199          } else { // in flux
200            handleMicrocontention(-1); // wait a little before trying again
201          }
202        } while (true);
203        if (p != i) { // p is the last processor on the chain of processors contending for the lock
204          RVMThread q = p.contenderLink; // q is first processor on the chain
205          if (p == q) { // only one processor waiting for the lock
206            q.awaitingSpinLock = null; // q now owns the lock
207            Magic.sync(); // make sure the chain of waiting processors gets updated before another processor accesses the chain
208            // other contenders can get at the lock:
209            Magic.setObjectAtOffset(this, latestContenderOffset, q); // latestContender = q;
210          } else { // more than one processor waiting for the lock
211            p.contenderLink = q.contenderLink; // remove q from the chain
212            q.awaitingSpinLock = null; // q now owns the lock
213            Magic.sync(); // make sure the chain of waiting processors gets updated before another processor accesses the chain
214            Magic.setObjectAtOffset(this, latestContenderOffset, p); // other contenders can get at the lock
215          }
216        }
217        VM.enableYieldpoints();
218      }
219    
220      /**
221       * An attempt to lock or unlock a processor lock has failed,
222       * presumably due to contention with another processor.  Backoff a
223       * little to increase the likelihood that a subsequent retry will
224       * succeed.
225       */
226      @NoInline
227      private void handleMicrocontention(int n) {
228        Magic.pause();    // reduce overhead of spin wait on IA
229        if (n <= 0) return;  // method call overhead is delay enough
230        if (n > 100) {
231          // PNT: FIXME: we're dying here ... maybe we're deadlocking?
232          VM.sysWriteln("Unexpectedly large spin lock contention on ",Magic.objectAsAddress(this));
233          RVMThread t=latestContender;
234          if (t==null) {
235            VM.sysWriteln("Unexpectedly large spin lock contention in ",RVMThread.getCurrentThreadSlot(),"; lock held by nobody");
236          } else {
237            VM.sysWriteln("Unexpectedly large spin lock contention in ",RVMThread.getCurrentThreadSlot(),"; lock held by ",t.getThreadSlot());
238            if (t!=RVMThread.getCurrentThread()) {
239              VM.sysWriteln("But -- at least the spin lock is held by a different thread.");
240            }
241          }
242          RVMThread.dumpStack();
243          VM.sysFail("Unexpectedly large spin lock contention");
244        }
245        // PNT: this is weird.
246        int pid = RVMThread.getCurrentThread().getThreadSlot(); // delay a different amount in each thread
247        delayIndex = (delayIndex + pid) % delayCount.length;
248        int delay = delayCount[delayIndex] * delayMultiplier; // pseudorandom backoff component
249        delay += delayBase << (n - 1);                     // exponential backoff component
250        for (int i = delay; i > 0; i--) ;                        // delay a different amount of time on each thread
251      }
252    
253      private static final int delayMultiplier = 10;
254      private static final int delayBase = 64;
255      private static int delayIndex;
256      private static final int[] delayCount = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13};
257    
258      /**
259       * For MCS locking, indicates that another processor is changing the
260       * state of the circular waiting queue.
261       */
262      private static final Address IN_FLUX = Address.max();
263    
264    }
265