001    /*
002     *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003     *
004     *  This file is licensed to You under the Eclipse Public License (EPL);
005     *  You may not use this file except in compliance with the License. You
006     *  may obtain a copy of the License at
007     *
008     *      http://www.opensource.org/licenses/eclipse-1.0.php
009     *
010     *  See the COPYRIGHT.txt file distributed with this work for information
011     *  regarding copyright ownership.
012     */
013    package org.jikesrvm.scheduler;
014    
015    import org.jikesrvm.VM;
016    import org.jikesrvm.Callbacks;
017    import org.jikesrvm.Constants;
018    import org.jikesrvm.Services;
019    import org.jikesrvm.objectmodel.ObjectModel;
020    import org.jikesrvm.objectmodel.ThinLockConstants;
021    import org.jikesrvm.runtime.Magic;
022    import org.vmmagic.pragma.Inline;
023    import org.vmmagic.pragma.Interruptible;
024    import org.vmmagic.pragma.Uninterruptible;
025    import org.vmmagic.pragma.UnpreemptibleNoWarn;
026    import org.vmmagic.pragma.Unpreemptible;
027    import org.vmmagic.unboxed.Word;
028    import org.vmmagic.unboxed.Offset;
029    
030    /**
031     Lock provides RVM support for monitors and Java level
032     synchronization.
033    
034     <p>
035     This class may be decomposed into four sections:
036     <OL>
037     <LI> support for synchronization methods of java.lang.Object,
038     <LI> heavy weight locking mechanism,
039     <LI> management of heavy weight locks, and
040     <LI> debugging and performance tuning support.
041     </OL>
042     </p>
043    
044     <p><STRONG>Requirement 1:</STRONG>
045     It must be possible to lock an object when allocations are not
046     allowed.
047     </p>
048    
049     <p><STRONG>Requirement 2:</STRONG>
050     After a lock has been obtained, the code of this class must return
051     without allowing a thread switch.  (The {@link
052    org.jikesrvm.ArchitectureSpecific.BaselineExceptionDeliverer#unwindStackFrame(org.jikesrvm.compilers.common.CompiledMethod, org.jikesrvm.ArchitectureSpecific.Registers)
053     exception handler}
054     of the baseline compiler assumes that until lock() returns the lock
055     has not been obtained.)
056     </p>
057    
058     <p><STRONG>Section 1:</STRONG>
059     support for {@link java.lang.Object#notify}, {@link
060    java.lang.Object#notifyAll}, and {@link java.lang.Object#wait()}.
061     When these methods are called, the indicated object must be locked
062     by the current thread.  <p>
063    
064     <p><STRONG>Section 2:</STRONG>
065     has two sections.  <EM>Section 2a:</EM> locks (and unlocking)
066     objects with heavy-weight locks associated with them.  <EM>Section
067     2b:</EM> associates (and disassociates) heavy-weight locks with
068     objects.
069     </p>
070    
071     <p><STRONG>Section 3:</STRONG>
072     Allocates (and frees) heavy weight locks consistent with Requirement
073     1.
074     </p>
075    
076     <p><STRONG>Section 4:</STRONG>
077     debugging and performance tuning stuff.
078     </p>
079    
080     <p>
081     The following performance tuning issues have not yet been addressed
082     adaquately:
083     <OL>
084     <LI> <EM>What to do if the attempt to lock an object fails?</EM>  There
085     are three choices: try again (busy-wait), yield and then try again,
086     inflate the lock and yield to the heavy-weight lock's entering
087     queue.  Currently, yield n times, then inflate.
088     (This seemed to be best for the portBOB benchmark on a 12-way AIX
089     SMP in the Fall of '99.)
090     <LI> <EM>When should a heavy-weight lock be deflated?</EM>  Currently,
091     deflation happens when the lock is unlocked with nothing on either
092     of its queues.  Probably better, would be to periodically (what
093     period?) examine heavy-weight locks and deflate any that havn't
094     been held for a while (how long?).
095     <LI> <EM>How many heavy-weight locks are needed? and how should they be
096     managed?</EM>  Currently, each processor maintains a pool of free
097     locks.  When a lock is inflated by a processor it is taken from
098     this pool and when a lock is deflated by a processor it gets added
099     to the processors pool.  Since inflation can happen on one processor
100     and deflation on another, this can create an imbalance.  It might
101     be worth investigating a scheme for balancing these local pools.
102     <LI> <EM>Is there any advantage to using the {@link SpinLock#tryLock}
103     method?</EM>
104     </OL>
105     Once these questions, and the issue of using MCS locking in {@link SpinLock},
106     have been investigated, then a larger performance issue
107     comes into view.  A number of different light-weight locking schemes have
108     been proposed over the years (see last several OOPSLA's).  It should be
109     possible to implement each of them in RVM and compare their performance.
110     </p>
111    
112     @see java.lang.Object
113     @see ThinLock
114     @see SpinLock
115     */
116    
117    @Uninterruptible
118    public final class Lock implements Constants {
119      /****************************************************************************
120       * Constants
121       */
122    
123      /** do debug tracing? */
124      protected static final boolean trace = false;
125      /** Control the gathering of statistics */
126      public static final boolean STATS = false;
127    
128      /** The (fixed) number of entries in the lock table spine */
129      protected static final int LOCK_SPINE_SIZE = 128;
130      /** The log size of each chunk in the spine */
131      protected static final int LOG_LOCK_CHUNK_SIZE = 11;
132      /** The size of each chunk in the spine */
133      protected static final int LOCK_CHUNK_SIZE = 1 << LOG_LOCK_CHUNK_SIZE;
134      /** The mask used to get the chunk-level index */
135      protected static final int LOCK_CHUNK_MASK = LOCK_CHUNK_SIZE - 1;
136      /** The maximum possible number of locks */
137      protected static final int MAX_LOCKS = LOCK_SPINE_SIZE * LOCK_CHUNK_SIZE;
138      /** The number of chunks to allocate on startup */
139      protected static final int INITIAL_CHUNKS = 1;
140    
141      /**
142       * Should we give up or persist in the attempt to get a heavy-weight lock,
143       * if its <code>mutex</code> microlock is held by another procesor.
144       */
145      private static final boolean tentativeMicrolocking = false;
146    
147      // Heavy lock table.
148    
149      /** The table of locks. */
150      private static Lock[][] locks;
151      /** Used during allocation of locks within the table. */
152      private static final SpinLock lockAllocationMutex = new SpinLock();
153      /** The number of chunks in the spine that have been physically allocated */
154      private static int chunksAllocated;
155      /** The number of locks allocated (these may either be in use, on a global
156       * freelist, or on a thread's freelist. */
157      private static int nextLockIndex;
158    
159      // Global free list.
160    
161      /** A global lock free list head */
162      private static Lock globalFreeLock;
163      /** the number of locks held on the global free list. */
164      private static int globalFreeLocks;
165      /** the total number of allocation operations. */
166      private static int globalLocksAllocated;
167      /** the total number of free operations. */
168      private static int globalLocksFreed;
169    
170      // Statistics
171    
172      /** Number of lock operations */
173      public static int lockOperations;
174      /** Number of unlock operations */
175      public static int unlockOperations;
176      /** Number of deflations */
177      public static int deflations;
178    
179      /****************************************************************************
180       * Instance
181       */
182    
183      /** The object being locked (if any). */
184      protected Object lockedObject;
185      /** The id of the thread that owns this lock (if any). */
186      protected int ownerId;
187      /** The number of times the owning thread (if any) has acquired this lock. */
188      protected int recursionCount;
189      /** A spin lock to handle contention for the data structures of this lock. */
190      public final SpinLock mutex;
191      /** Is this lock currently being used? */
192      protected boolean active;
193      /** The next free lock on the free lock list */
194      private Lock nextFreeLock;
195      /** This lock's index in the lock table*/
196      protected int index;
197      /** Queue for entering the lock, guarded by mutex. */
198      ThreadQueue entering;
199      /** Queue for waiting on a notify, guarded by mutex as well. */
200      ThreadQueue waiting;
201    
202      /**
203       * A heavy weight lock to handle extreme contention and wait/notify
204       * synchronization.
205       */
206      public Lock() {
207        mutex = new SpinLock();
208        entering = new ThreadQueue();
209        waiting = new ThreadQueue();
210      }
211    
212      /**
213       * Acquires this heavy-weight lock on the indicated object.
214       *
215       * @param o the object to be locked
216       * @return true, if the lock succeeds; false, otherwise
217       */
218      @Unpreemptible
219      public boolean lockHeavy(Object o) {
220        if (tentativeMicrolocking) {
221          if (!mutex.tryLock()) {
222            return false;
223          }
224        } else {
225          mutex.lock();  // Note: thread switching is not allowed while mutex is held.
226        }
227        return lockHeavyLocked(o);
228      }
229      /** Complete the task of acquiring the heavy lock, assuming that the mutex
230          is already acquired (locked). */
231      @Unpreemptible
232      public boolean lockHeavyLocked(Object o) {
233        if (lockedObject != o) { // lock disappeared before we got here
234          mutex.unlock(); // thread switching benign
235          return false;
236        }
237        if (STATS) lockOperations++;
238        RVMThread me = RVMThread.getCurrentThread();
239        int threadId = me.getLockingId();
240        if (ownerId == threadId) {
241          recursionCount++;
242        } else if (ownerId == 0) {
243          ownerId = threadId;
244          recursionCount = 1;
245        } else {
246          entering.enqueue(me);
247          mutex.unlock();
248          me.monitor().lockNoHandshake();
249          while (entering.isQueued(me)) {
250            me.monitor().waitWithHandshake(); // this may spuriously return
251          }
252          me.monitor().unlock();
253          return false;
254        }
255        mutex.unlock(); // thread-switching benign
256        return true;
257      }
258    
259      @UnpreemptibleNoWarn
260      private static void raiseIllegalMonitorStateException(String msg, Object o) {
261        throw new IllegalMonitorStateException(msg + o);
262      }
263    
264      /**
265       * Releases this heavy-weight lock on the indicated object.
266       *
267       * @param o the object to be unlocked
268       */
269      @Unpreemptible
270      public void unlockHeavy(Object o) {
271        boolean deflated = false;
272        mutex.lock(); // Note: thread switching is not allowed while mutex is held.
273        RVMThread me = RVMThread.getCurrentThread();
274        if (ownerId != me.getLockingId()) {
275          mutex.unlock(); // thread-switching benign
276          raiseIllegalMonitorStateException("heavy unlocking", o);
277        }
278        recursionCount--;
279        if (0 < recursionCount) {
280          mutex.unlock(); // thread-switching benign
281          return;
282        }
283        if (STATS) unlockOperations++;
284        ownerId = 0;
285        RVMThread toAwaken = entering.dequeue();
286        if (toAwaken == null && entering.isEmpty() && waiting.isEmpty()) { // heavy lock can be deflated
287          // Possible project: decide on a heuristic to control when lock should be deflated
288          Offset lockOffset = Magic.getObjectType(o).getThinLockOffset();
289          if (!lockOffset.isMax()) { // deflate heavy lock
290            deflate(o, lockOffset);
291            deflated = true;
292          }
293        }
294        mutex.unlock(); // does a Magic.sync();  (thread-switching benign)
295        if (toAwaken != null) {
296          toAwaken.monitor().lockedBroadcastNoHandshake();
297        }
298      }
299    
300      /**
301       * Disassociates this heavy-weight lock from the indicated object.
302       * This lock is not held, nor are any threads on its queues.  Note:
303       * the mutex for this lock is held when deflate is called.
304       *
305       * @param o the object from which this lock is to be disassociated
306       */
307      private void deflate(Object o, Offset lockOffset) {
308        if (VM.VerifyAssertions) {
309          VM._assert(lockedObject == o);
310          VM._assert(recursionCount == 0);
311          VM._assert(entering.isEmpty());
312          VM._assert(waiting.isEmpty());
313        }
314        if (STATS) deflations++;
315        ThinLock.markDeflated(o, lockOffset, index);
316        lockedObject = null;
317        free(this);
318      }
319    
320      /**
321       * Set the owner of a lock
322       * @param id The thread id of the owner.
323       */
324      public void setOwnerId(int id) {
325        ownerId = id;
326      }
327    
328      /**
329       * Get the thread id of the current owner of the lock.
330       */
331      public int getOwnerId() {
332        return ownerId;
333      }
334    
335      /**
336       * Update the lock's recursion count.
337       */
338      public void setRecursionCount(int c) {
339        recursionCount = c;
340      }
341    
342      /**
343       * Get the lock's recursion count.
344       */
345      public int getRecursionCount() {
346        return recursionCount;
347      }
348    
349      /**
350       * Set the object that this lock is referring to.
351       */
352      public void setLockedObject(Object o) {
353        lockedObject = o;
354      }
355    
356      /**
357       * Get the object that this lock is referring to.
358       */
359      public Object getLockedObject() {
360        return lockedObject;
361      }
362    
363      /**
364       * Dump threads blocked trying to get this lock
365       */
366      protected void dumpBlockedThreads() {
367        VM.sysWrite(" entering: ");
368        entering.dump();
369      }
370      /**
371       * Dump threads waiting to be notified on this lock
372       */
373      protected void dumpWaitingThreads() {
374        VM.sysWrite(" waiting: ");
375        waiting.dump();
376      }
377    
378      /**
379       * Reports the state of a heavy-weight lock, via {@link VM#sysWrite}.
380       */
381      private void dump() {
382        if (!active) {
383          return;
384        }
385        VM.sysWrite("Lock ");
386        VM.sysWriteInt(index);
387        VM.sysWrite(":\n");
388        VM.sysWrite(" lockedObject: ");
389        VM.sysWriteHex(Magic.objectAsAddress(lockedObject));
390        VM.sysWrite("   thin lock = ");
391        VM.sysWriteHex(Magic.objectAsAddress(lockedObject).loadAddress(ObjectModel.defaultThinLockOffset()));
392        VM.sysWrite(" object type = ");
393        VM.sysWrite(Magic.getObjectType(lockedObject).getDescriptor());
394        VM.sysWriteln();
395    
396        VM.sysWrite(" ownerId: ");
397        VM.sysWriteInt(ownerId);
398        VM.sysWrite(" (");
399        VM.sysWriteInt(ownerId >>> ThinLockConstants.TL_THREAD_ID_SHIFT);
400        VM.sysWrite(") recursionCount: ");
401        VM.sysWriteInt(recursionCount);
402        VM.sysWriteln();
403        dumpBlockedThreads();
404        dumpWaitingThreads();
405    
406        VM.sysWrite(" mutexLatestContender: ");
407        if (mutex.latestContender == null) {
408          VM.sysWrite("<null>");
409        } else {
410          VM.sysWriteInt(mutex.latestContender.getThreadSlot());
411        }
412        VM.sysWrite("\n");
413      }
414    
415      /**
416       * Is this lock blocking thread t?
417       */
418      protected boolean isBlocked(RVMThread t) {
419        return entering.isQueued(t);
420      }
421    
422      /**
423       * Is this thread t waiting on this lock?
424       */
425      protected boolean isWaiting(RVMThread t) {
426        return waiting.isQueued(t);
427      }
428    
429      /****************************************************************************
430       * Static Lock Table
431       */
432    
433      /**
434       * Sets up the data structures for holding heavy-weight locks.
435       */
436      @Interruptible
437      public static void init() {
438        nextLockIndex = 1;
439        locks = new Lock[LOCK_SPINE_SIZE][];
440        for (int i=0; i < INITIAL_CHUNKS; i++) {
441          chunksAllocated++;
442          locks[i] = new Lock[LOCK_CHUNK_SIZE];
443        }
444        if (VM.VerifyAssertions) {
445          // check that each potential lock is addressable
446          VM._assert(((MAX_LOCKS - 1) <=
447                      ThinLockConstants.TL_LOCK_ID_MASK.rshl(ThinLockConstants.TL_LOCK_ID_SHIFT).toInt()) ||
448                      ThinLockConstants.TL_LOCK_ID_MASK.EQ(Word.fromIntSignExtend(-1)));
449        }
450      }
451    
452      /**
453       * Delivers up an unassigned heavy-weight lock.  Locks are allocated
454       * from processor specific regions or lists, so normally no synchronization
455       * is required to obtain a lock.
456       * <p>
457       * Collector threads cannot use heavy-weight locks.
458       *
459       * @return a free Lock; or <code>null</code>, if garbage collection is not enabled
460       */
461      @UnpreemptibleNoWarn("The caller is prepared to lose control when it allocates a lock")
462      static Lock allocate() {
463        RVMThread me=RVMThread.getCurrentThread();
464        if (me.cachedFreeLock != null) {
465          Lock l = me.cachedFreeLock;
466          me.cachedFreeLock = null;
467          if (trace) {
468            VM.sysWriteln("Lock.allocate: returning ",Magic.objectAsAddress(l),
469                          ", a cached free lock from Thread #",me.getThreadSlot());
470          }
471          return l;
472        }
473    
474        Lock l = null;
475        while (l == null) {
476          if (globalFreeLock != null) {
477            lockAllocationMutex.lock();
478            l = globalFreeLock;
479            if (l != null) {
480              globalFreeLock = l.nextFreeLock;
481              l.nextFreeLock = null;
482              l.active = true;
483              globalFreeLocks--;
484            }
485            lockAllocationMutex.unlock();
486            if (trace && l!=null) {
487              VM.sysWriteln("Lock.allocate: returning ",Magic.objectAsAddress(l),
488                            " from the global freelist for Thread #",me.getThreadSlot());
489            }
490          } else {
491            l = new Lock(); // may cause thread switch (and processor loss)
492            lockAllocationMutex.lock();
493            if (globalFreeLock == null) {
494              // ok, it's still correct for us to be adding a new lock
495              if (nextLockIndex >= MAX_LOCKS) {
496                VM.sysWriteln("Too many fat locks"); // make MAX_LOCKS bigger? we can keep going??
497                VM.sysFail("Exiting VM with fatal error");
498              }
499              l.index = nextLockIndex++;
500              globalLocksAllocated++;
501            } else {
502              l = null; // someone added to the freelist, try again
503            }
504            lockAllocationMutex.unlock();
505            if (l != null) {
506              if (l.index >= numLocks()) {
507                /* We need to grow the table */
508                growLocks(l.index);
509              }
510              addLock(l);
511              l.active = true;
512              /* make sure other processors see lock initialization.
513               * Note: Derek and I BELIEVE that an isync is not required in the other processor because the lock is newly allocated - Bowen */
514              Magic.sync();
515            }
516            if (trace && l!=null) {
517              VM.sysWriteln("Lock.allocate: returning ",Magic.objectAsAddress(l),
518                            ", a freshly allocated lock for Thread #",
519                            me.getThreadSlot());
520            }
521          }
522        }
523        return l;
524      }
525    
526      /**
527       * Recycles an unused heavy-weight lock.  Locks are deallocated
528       * to processor specific lists, so normally no synchronization
529       * is required to obtain or release a lock.
530       */
531      protected static void free(Lock l) {
532        l.active = false;
533        RVMThread me = RVMThread.getCurrentThread();
534        if (me.cachedFreeLock == null) {
535          if (trace) {
536            VM.sysWriteln("Lock.free: setting ",Magic.objectAsAddress(l),
537                          " as the cached free lock for Thread #",
538                          me.getThreadSlot());
539          }
540          me.cachedFreeLock = l;
541        } else {
542          if (trace) {
543            VM.sysWriteln("Lock.free: returning ",Magic.objectAsAddress(l),
544                          " to the global freelist for Thread #",
545                          me.getThreadSlot());
546          }
547          returnLock(l);
548        }
549      }
550      static void returnLock(Lock l) {
551        if (trace) {
552          VM.sysWriteln("Lock.returnLock: returning ",Magic.objectAsAddress(l),
553                        " to the global freelist for Thread #",
554                        RVMThread.getCurrentThreadSlot());
555        }
556        lockAllocationMutex.lock();
557        l.nextFreeLock = globalFreeLock;
558        globalFreeLock = l;
559        globalFreeLocks++;
560        globalLocksFreed++;
561        lockAllocationMutex.unlock();
562      }
563    
564      /**
565       * Grow the locks table by allocating a new spine chunk.
566       */
567      @UnpreemptibleNoWarn("The caller is prepared to lose control when it allocates a lock")
568      static void growLocks(int id) {
569        int spineId = id >> LOG_LOCK_CHUNK_SIZE;
570        if (spineId >= LOCK_SPINE_SIZE) {
571          VM.sysFail("Cannot grow lock array greater than maximum possible index");
572        }
573        for(int i=chunksAllocated; i <= spineId; i++) {
574          if (locks[i] != null) {
575            /* We were beaten to it */
576            continue;
577          }
578    
579          /* Allocate the chunk */
580          Lock[] newChunk = new Lock[LOCK_CHUNK_SIZE];
581    
582          lockAllocationMutex.lock();
583          if (locks[i] == null) {
584            /* We got here first */
585            locks[i] = newChunk;
586            chunksAllocated++;
587          }
588          lockAllocationMutex.unlock();
589        }
590      }
591    
592      /**
593       * Return the number of lock slots that have been allocated. This provides
594       * the range of valid lock ids.
595       */
596      public static int numLocks() {
597        return chunksAllocated * LOCK_CHUNK_SIZE;
598      }
599    
600      /**
601       * Read a lock from the lock table by id.
602       *
603       * @param id The lock id
604       * @return The lock object.
605       */
606      @Inline
607      public static Lock getLock(int id) {
608        return locks[id >> LOG_LOCK_CHUNK_SIZE][id & LOCK_CHUNK_MASK];
609      }
610    
611      /**
612       * Add a lock to the lock table
613       *
614       * @param l The lock object
615       */
616      @Uninterruptible
617      public static void addLock(Lock l) {
618        Lock[] chunk = locks[l.index >> LOG_LOCK_CHUNK_SIZE];
619        int index = l.index & LOCK_CHUNK_MASK;
620        Services.setArrayUninterruptible(chunk, index, l);
621      }
622    
623      /**
624       * Dump the lock table.
625       */
626      public static void dumpLocks() {
627        for (int i = 0; i < numLocks(); i++) {
628          Lock l = getLock(i);
629          if (l != null) {
630            l.dump();
631          }
632        }
633        VM.sysWrite("\n");
634        VM.sysWrite("lock availability stats: ");
635        VM.sysWriteInt(globalLocksAllocated);
636        VM.sysWrite(" locks allocated, ");
637        VM.sysWriteInt(globalLocksFreed);
638        VM.sysWrite(" locks freed, ");
639        VM.sysWriteInt(globalFreeLocks);
640        VM.sysWrite(" free locks\n");
641      }
642    
643      /**
644       * Count number of locks held by thread
645       * @param id the thread locking ID we're counting for
646       * @return number of locks held
647       */
648      public static int countLocksHeldByThread(int id) {
649        int count=0;
650        for (int i = 0; i < numLocks(); i++) {
651          Lock l = getLock(i);
652          if (l != null && l.active && l.ownerId == id && l.recursionCount > 0) {
653            count++;
654          }
655        }
656        return count;
657      }
658    
659      /**
660       * scan lock queues for thread and report its state
661       */
662      @Interruptible
663      public static String getThreadState(RVMThread t) {
664        for (int i = 0; i < numLocks(); i++) {
665          Lock l = getLock(i);
666          if (l == null || !l.active) continue;
667          if (l.isBlocked(t)) return ("waitingForLock(blocked)" + i);
668          if (l.isWaiting(t)) return "waitingForNotification(waiting)";
669        }
670        return null;
671      }
672    
673      /****************************************************************************
674       * Statistics
675       */
676    
677      /**
678       * Set up callbacks to report statistics.
679       */
680      @Interruptible
681      public static void boot() {
682        if (STATS) {
683          Callbacks.addExitMonitor(new Lock.ExitMonitor());
684          Callbacks.addAppRunStartMonitor(new Lock.AppRunStartMonitor());
685        }
686      }
687    
688      /**
689       * Initialize counts in preparation for gathering statistics
690       */
691      private static final class AppRunStartMonitor implements Callbacks.AppRunStartMonitor {
692        @Override
693        public void notifyAppRunStart(String app, int value) {
694          lockOperations = 0;
695          unlockOperations = 0;
696          deflations = 0;
697    
698          ThinLock.notifyAppRunStart("", 0);
699        }
700      }
701    
702      /**
703       * Report statistics at the end of execution.
704       */
705      private static final class ExitMonitor implements Callbacks.ExitMonitor {
706        @Override
707        public void notifyExit(int value) {
708          int totalLocks = lockOperations + ThinLock.fastLocks + ThinLock.slowLocks;
709    
710          RVMThread.dumpStats();
711          VM.sysWrite(" notifyAll operations\n");
712          VM.sysWrite("FatLocks: ");
713          VM.sysWrite(lockOperations);
714          VM.sysWrite(" locks");
715          Services.percentage(lockOperations, totalLocks, "all lock operations");
716          VM.sysWrite("FatLocks: ");
717          VM.sysWrite(unlockOperations);
718          VM.sysWrite(" unlock operations\n");
719          VM.sysWrite("FatLocks: ");
720          VM.sysWrite(deflations);
721          VM.sysWrite(" deflations\n");
722    
723          ThinLock.notifyExit(totalLocks);
724          VM.sysWriteln();
725    
726          VM.sysWrite("lock availability stats: ");
727          VM.sysWriteInt(globalLocksAllocated);
728          VM.sysWrite(" locks allocated, ");
729          VM.sysWriteInt(globalLocksFreed);
730          VM.sysWrite(" locks freed, ");
731          VM.sysWriteInt(globalFreeLocks);
732          VM.sysWrite(" free locks\n");
733        }
734      }
735    }
736