001    /*
002     *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003     *
004     *  This file is licensed to You under the Eclipse Public License (EPL);
005     *  You may not use this file except in compliance with the License. You
006     *  may obtain a copy of the License at
007     *
008     *      http://www.opensource.org/licenses/eclipse-1.0.php
009     *
010     *  See the COPYRIGHT.txt file distributed with this work for information
011     *  regarding copyright ownership.
012     */
013    package org.mmtk.policy;
014    
015    import org.mmtk.utility.alloc.BlockAllocator;
016    import org.mmtk.utility.alloc.EmbeddedMetaData;
017    import org.mmtk.utility.heap.FreeListPageResource;
018    import org.mmtk.utility.heap.Map;
019    import org.mmtk.utility.heap.VMRequest;
020    import org.mmtk.utility.Constants;
021    import org.mmtk.utility.Conversions;
022    import org.mmtk.utility.Memory;
023    
024    import org.mmtk.vm.Lock;
025    import org.mmtk.vm.VM;
026    
027    import org.vmmagic.pragma.*;
028    import org.vmmagic.unboxed.*;
029    
030    /**
031     * Each instance of this class corresponds to one mark-sweep *space*.
032     * Each of the instance methods of this class may be called by any
033     * thread (i.e. synchronization must be explicit in any instance or
034     * class method).  This contrasts with the MarkSweepLocal, where
035     * instances correspond to *plan* instances and therefore to kernel
036     * threads.  Thus unlike this class, synchronization is not necessary
037     * in the instance methods of MarkSweepLocal.
038     */
039    @Uninterruptible
040    public abstract class SegregatedFreeListSpace extends Space implements Constants {
041    
042      /****************************************************************************
043      *
044      * Class variables
045      */
046    
047      /**
048       *
049       */
050      protected static final boolean LAZY_SWEEP = true;
051      private static final boolean COMPACT_SIZE_CLASSES = false;
052      protected static final int MIN_CELLS = 6;
053      protected static final int MAX_CELLS = 99; // (1<<(INUSE_BITS-1))-1;
054      protected static final int MAX_CELL_SIZE = 8<<10;
055      public static final int MAX_FREELIST_OBJECT_BYTES = MAX_CELL_SIZE;
056    
057      // live bits etc
058      private static final int OBJECT_LIVE_SHIFT = LOG_MIN_ALIGNMENT; // 4 byte resolution
059      private static final int LOG_BIT_COVERAGE = OBJECT_LIVE_SHIFT;
060      private static final int LOG_LIVE_COVERAGE = LOG_BIT_COVERAGE + LOG_BITS_IN_BYTE;
061      private static final int LIVE_BYTES_PER_REGION = 1 << (EmbeddedMetaData.LOG_BYTES_IN_REGION - LOG_LIVE_COVERAGE);
062      private static final Word WORD_SHIFT_MASK = Word.one().lsh(LOG_BITS_IN_WORD).minus(Extent.one());
063      private static final int LOG_LIVE_WORD_STRIDE = LOG_LIVE_COVERAGE + LOG_BYTES_IN_WORD;
064      private static final Extent LIVE_WORD_STRIDE = Extent.fromIntSignExtend(1<<LOG_LIVE_WORD_STRIDE);
065      private static final Word LIVE_WORD_STRIDE_MASK = LIVE_WORD_STRIDE.minus(1).toWord().not();
066      private static final int NET_META_DATA_BYTES_PER_REGION = BlockAllocator.META_DATA_BYTES_PER_REGION + LIVE_BYTES_PER_REGION;
067      protected static final int META_DATA_PAGES_PER_REGION_WITH_BITMAP = Conversions.bytesToPages(Extent.fromIntSignExtend(NET_META_DATA_BYTES_PER_REGION));
068      protected static final int META_DATA_PAGES_PER_REGION_NO_BITMAP = Conversions.bytesToPages(Extent.fromIntSignExtend(BlockAllocator.META_DATA_BYTES_PER_REGION));
069      private static final Extent META_DATA_OFFSET = BlockAllocator.META_DATA_EXTENT;
070    
071    
072      // calculate worst case fragmentation very conservatively
073      private static final int NEW_SIZECLASS_OVERHEAD = sizeClassCount();  // one page wasted per size class
074      private static final int METADATA_OVERHEAD = META_DATA_PAGES_PER_REGION_WITH_BITMAP; // worst case scenario
075      public static final float WORST_CASE_FRAGMENTATION = 1 + ((NEW_SIZECLASS_OVERHEAD + METADATA_OVERHEAD)/(float) EmbeddedMetaData.BYTES_IN_REGION);
076    
077      /****************************************************************************
078       *
079       * Instance variables
080       */
081    
082      /**
083       *
084       */
085      protected final Lock lock = VM.newLock("SegregatedFreeListGlobal");
086      protected final AddressArray consumedBlockHead = AddressArray.create(sizeClassCount());
087      protected final AddressArray flushedBlockHead = AddressArray.create(sizeClassCount());
088      protected final AddressArray availableBlockHead = AddressArray.create(sizeClassCount());
089    
090      private final int[] cellSize = new int[sizeClassCount()];
091      private final byte[] blockSizeClass = new byte[sizeClassCount()];
092      private final int[] blockHeaderSize = new int[sizeClassCount()];
093    
094      /**
095       * The caller specifies the region of virtual memory to be used for
096       * this space.  If this region conflicts with an existing space,
097       * then the constructor will fail.
098       *
099       * @param name The name of this space (used when printing error messages etc)
100       * @param additionalMetadata The number of meta data bytes per region for the subclass.
101       * @param vmRequest An object describing the virtual memory requested.
102       */
103      public SegregatedFreeListSpace(String name, int additionalMetadata, VMRequest vmRequest) {
104        super(name, false, false, true, vmRequest);
105        initSizeClasses();
106        int totalMetadata = additionalMetadata;
107        if (maintainSideBitmap()) {
108          totalMetadata += META_DATA_PAGES_PER_REGION_WITH_BITMAP;
109        } else {
110          totalMetadata += META_DATA_PAGES_PER_REGION_NO_BITMAP;
111        }
112        if (vmRequest.isDiscontiguous()) {
113          pr = new FreeListPageResource(this, totalMetadata);
114        } else {
115          pr = new FreeListPageResource(this, start, extent, totalMetadata);
116        }
117      }
118    
119      /**
120       * Should SegregatedFreeListSpace manage a side bitmap to keep track of live objects?
121       */
122      protected abstract boolean maintainSideBitmap();
123    
124      /**
125       * Do we need to preserve free lists as we move blocks around.
126       */
127      protected abstract boolean preserveFreeList();
128    
129      /****************************************************************************
130       *
131       * Allocation
132       */
133    
134      /**
135       * Return a block to the global pool.
136       *
137       * @param block The block to return
138       * @param sizeClass The size class
139       */
140      public void returnConsumedBlock(Address block, int sizeClass) {
141        returnBlock(block, sizeClass, Address.zero());
142      }
143    
144      /**
145       * Return a block to the global pool.
146       *
147       * @param block The block to return
148       * @param sizeClass The size class
149       * @param freeCell The first free cell in the block.
150       */
151      public void returnBlock(Address block, int sizeClass, Address freeCell) {
152        if (VM.VERIFY_ASSERTIONS) {
153          VM.assertions._assert(BlockAllocator.getNext(block).isZero());
154        }
155        if (preserveFreeList()) {
156          setFreeList(block, freeCell);
157        }
158        lock.acquire();
159        BlockAllocator.setNext(block, consumedBlockHead.get(sizeClass));
160        consumedBlockHead.set(sizeClass, block);
161        lock.release();
162      }
163    
164      /**
165       * Acquire a new block from the global pool to allocate into. This method
166       * with either return a non-empty free list, or zero when allocation
167       * fails.
168       *
169       * This method will populate the passed in free list for the given size
170       * class and return the address of the block.
171       *
172       * @param sizeClass The size class to allocate into
173       * @param freeList The free list to populate
174       * @return The address of the block
175       */
176      public Address getAllocationBlock(int sizeClass, AddressArray freeList) {
177        lock.acquire();
178        Address block;
179        while(!(block = availableBlockHead.get(sizeClass)).isZero()) {
180          availableBlockHead.set(sizeClass, BlockAllocator.getNext(block));
181          lock.release();
182    
183          /* This block is no longer on any list */
184          BlockAllocator.setNext(block, Address.zero());
185    
186          /* Can we allocate into this block? */
187          Address cell = advanceToBlock(block, sizeClass);
188          if (!cell.isZero()) {
189            freeList.set(sizeClass, cell);
190            return block;
191          }
192    
193          /* Block was full */
194          lock.acquire();
195          BlockAllocator.setNext(block, consumedBlockHead.get(sizeClass));
196          consumedBlockHead.set(sizeClass, block);
197        }
198        lock.release();
199        return expandSizeClass(sizeClass, freeList);
200      }
201    
202      /**
203       * Expand a particular size class, allocating a new block, breaking
204       * the block into cells and placing those cells on a free list for
205       * that block.  The block becomes the current head for this size
206       * class and the address of the first available cell is returned.<p>
207       *
208       * <b>This is guaranteed to return pre-zeroed cells</b>
209       *
210       * @param sizeClass The size class to be expanded
211       * @param freeList The free list to populate.
212       * @return The block that was just allocated.
213       */
214      @Inline
215      private Address expandSizeClass(int sizeClass, AddressArray freeList) {
216        Address block = BlockAllocator.alloc(this, blockSizeClass[sizeClass]);
217    
218        if (block.isZero()) {
219          return Address.zero();
220        }
221    
222        BlockAllocator.setNext(block, Address.zero());
223        BlockAllocator.setAllClientSizeClass(block, blockSizeClass[sizeClass], (byte) sizeClass);
224    
225        notifyNewBlock(block, sizeClass);
226    
227        int cellExtent = cellSize[sizeClass];
228        int blockSize = BlockAllocator.blockSize(blockSizeClass[sizeClass]);
229        int useableBlockSize = blockSize - blockHeaderSize[sizeClass];
230        Address firstCell = block.plus(blockHeaderSize[sizeClass]);
231        Address sentinel = block.plus(blockSize);
232    
233        /* pre-zero the block */
234        VM.memory.zero(false, firstCell, Extent.fromIntZeroExtend(useableBlockSize));
235    
236        /* construct the free list */
237        Address nextCell;
238        Address cell = firstCell;
239        while ((nextCell = cell.plus(cellExtent)).LT(sentinel)) {
240          cell.store(nextCell);
241          cell = nextCell;
242        }
243    
244        /* Populate the free list */
245        freeList.set(sizeClass, firstCell);
246        return block;
247      }
248    
249      /****************************************************************************
250       *
251       * Block management
252       */
253    
254      /**
255       * Initialize the size class data structures.
256       */
257      private void initSizeClasses() {
258        for (int sc = 0; sc < sizeClassCount(); sc++) {
259          cellSize[sc] = getBaseCellSize(sc);
260          for (byte blk = 0; blk < BlockAllocator.BLOCK_SIZE_CLASSES; blk++) {
261            int usableBytes = BlockAllocator.blockSize(blk);
262            int cells = usableBytes / cellSize[sc];
263            blockSizeClass[sc] = blk;
264            /* cells must start at multiple of MIN_ALIGNMENT because
265               cellSize is also supposed to be multiple, this should do
266               the trick: */
267            blockHeaderSize[sc] = BlockAllocator.blockSize(blk) - cells * cellSize[sc];
268            if (((usableBytes < BYTES_IN_PAGE) && (cells*2 > MAX_CELLS)) ||
269                ((usableBytes > (BYTES_IN_PAGE>>1)) && (cells > MIN_CELLS)))
270              break;
271          }
272        }
273      }
274    
275      /**
276       * Get the size class for a given number of bytes.<p>
277       *
278       * We use size classes based on a worst case internal fragmentation
279       * loss target of 1/8.  In fact, across sizes from 8 bytes to 512
280       * the average worst case loss is 13.3%, giving an expected loss
281       * (assuming uniform distribution) of about 7%.  We avoid using the
282       * Lea class sizes because they were so numerous and therefore
283       * liable to lead to excessive inter-class-size fragmentation.<p>
284       *
285       * This method may segregate arrays and scalars (currently it does
286       * not).<p>
287       *
288       * This method should be more intelligent and take alignment requests
289       * into consideration. The issue with this is that the block header
290       * which can be varied by subclasses can change the alignment of the
291       * cells.<p>
292       *
293       * @param bytes The number of bytes required to accommodate the object
294       * to be allocated.
295       * @return The size class capable of accommodating the allocation request.
296       */
297      @Inline
298      public final int getSizeClass(int bytes) {
299        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert((bytes > 0) && (bytes <= MAX_CELL_SIZE));
300    
301        int sz1 = bytes - 1;
302    
303        if (BYTES_IN_ADDRESS == 4) { // 32-bit
304          if (COMPACT_SIZE_CLASSES)
305            return (
306                (sz1 <=  31) ?      (sz1 >>  2) : //    4 bytes apart
307                (sz1 <=  63) ?  4 + (sz1 >>  3) : //    8 bytes apart
308                (sz1 <=  95) ?  8 + (sz1 >>  4) : //   16 bytes apart
309                (sz1 <= 223) ? 14 + (sz1 >>  6) : //   64 bytes apart
310                (sz1 <= 734) ? 17 + (sz1 >>  8) : //  256 bytes apart
311                               20 + (sz1 >> 10)); // 1024 bytes apart
312          else
313            return (
314                (sz1 <=   63) ?      (sz1 >>  2) : //    4 bytes apart
315                (sz1 <=  127) ? 12 + (sz1 >>  4) : //   16 bytes apart
316                (sz1 <=  255) ? 16 + (sz1 >>  5) : //   32 bytes apart
317                (sz1 <=  511) ? 20 + (sz1 >>  6) : //   64 bytes apart
318                (sz1 <= 2047) ? 26 + (sz1 >>  8) : //  256 bytes apart
319                                32 + (sz1 >> 10)); // 1024 bytes apart
320        } else { // 64-bit
321          if (COMPACT_SIZE_CLASSES)
322            return (
323                (sz1 <=   95) ?      (sz1 >>  3) : //    8 bytes apart
324                (sz1 <=  127) ?  6 + (sz1 >>  4) : //   16 bytes apart
325                (sz1 <=  191) ? 10 + (sz1 >>  5) : //   32 bytes apart
326                (sz1 <=  383) ? 13 + (sz1 >>  6) : //   64 bytes apart
327                (sz1 <=  511) ? 16 + (sz1 >>  7) : //  128 bytes apart
328                (sz1 <= 1023) ? 19 + (sz1 >>  9) : //  512 bytes apart
329                                20 + (sz1 >> 10)); // 1024 bytes apart
330          else
331            return (
332                (sz1 <=  111) ?      (sz1 >>  3) : //    8 bytes apart
333                (sz1 <=  223) ?  7 + (sz1 >>  4) : //   16 bytes apart
334                (sz1 <=  319) ? 14 + (sz1 >>  5) : //   32 bytes apart
335                (sz1 <=  575) ? 19 + (sz1 >>  6) : //   64 bytes apart
336                (sz1 <= 2047) ? 26 + (sz1 >>  8) : //  256 bytes apart
337                                32 + (sz1 >> 10)); // 1024 bytes apart
338        }
339      }
340    
341      /**
342       * Return the size of a basic cell (i.e. not including any cell
343       * header) for a given size class.
344       *
345       * @param sc The size class in question
346       * @return The size of a basic cell (i.e. not including any cell
347       * header).
348       */
349      @Inline
350      public final int getBaseCellSize(int sc) {
351        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert((sc >= 0) && (sc < sizeClassCount()));
352    
353        if (BYTES_IN_ADDRESS == 4) { // 32-bit
354          if (COMPACT_SIZE_CLASSES)
355            return ((sc <  8) ? (sc +  1) <<  2:
356                    (sc < 12) ? (sc -  3) <<  3:
357                    (sc < 16) ? (sc -  7) <<  4:
358                    (sc < 18) ? (sc - 13) <<  6:
359                    (sc < 21) ? (sc - 16) <<  8:
360                                (sc - 19) << 10);
361          else
362            return ((sc < 16) ? (sc +  1) <<  2:
363                    (sc < 20) ? (sc - 11) <<  4:
364                    (sc < 24) ? (sc - 15) <<  5:
365                    (sc < 28) ? (sc - 19) <<  6:
366                    (sc < 34) ? (sc - 25) <<  8:
367                                (sc - 31) << 10);
368        } else { // 64-bit
369          if (COMPACT_SIZE_CLASSES)
370            return ((sc < 12) ? (sc +  1) <<  3:
371                    (sc < 14) ? (sc -  5) <<  4:
372                    (sc < 16) ? (sc -  9) <<  5:
373                    (sc < 19) ? (sc - 12) <<  6:
374                    (sc < 20) ? (sc - 15) <<  7:
375                    (sc < 21) ? (sc - 18) <<  9:
376                                (sc - 19) << 10);
377          else
378            return ((sc < 14) ? (sc +  1) <<  3:
379                    (sc < 21) ? (sc -  6) <<  4:
380                    (sc < 24) ? (sc - 13) <<  5:
381                    (sc < 28) ? (sc - 18) <<  6:
382                    (sc < 34) ? (sc - 25) <<  8:
383                                (sc - 31) << 10);
384        }
385      }
386    
387      /**
388       * The number of distinct size classes.
389       */
390      @Inline
391      public static int sizeClassCount() {
392        return (COMPACT_SIZE_CLASSES) ? 28 : 40;
393      }
394    
395      /****************************************************************************
396       *
397       * Preserving (saving & restoring) free lists
398       */
399    
400      /**
401       * Prepare a block for allocation, returning a free list into the block.
402       *
403       * @param block The new block
404       * @param sizeClass The block's sizeclass.
405       */
406      protected abstract Address advanceToBlock(Address block, int sizeClass);
407    
408      /**
409       * Notify that a new block has been installed. This is to ensure that
410       * appropriate collection state can be initialized for the block
411       *
412       * @param block The new block
413       * @param sizeClass The block's sizeclass.
414       */
415      protected void notifyNewBlock(Address block, int sizeClass) {}
416    
417      /**
418       * Should the sweep reclaim the cell containing this object. Is this object
419       * live. This is only used when maintainSideBitmap is false.
420       *
421       * @param object The object to query
422       * @return {@code true} if the cell should be reclaimed
423       */
424      protected boolean reclaimCellForObject(ObjectReference object) {
425        VM.assertions.fail("Must implement reclaimCellForObject if not maintaining side bitmap");
426        return false;
427      }
428    
429      /****************************************************************************
430       *
431       * Metadata manipulation
432       */
433    
434      /**
435       * In the case where free lists associated with each block are
436       * preserved, get the free list for a given block.
437       *
438       * @param block The block whose free list is to be found
439       * @return The free list for this block
440       */
441      @Inline
442      protected final Address getFreeList(Address block) {
443        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(preserveFreeList());
444        return BlockAllocator.getFreeListMeta(block);
445      }
446    
447      /**
448       * In the case where free lists associated with each block are
449       * preserved, set the free list for a given block.
450       *
451       * @param block The block whose free list is to be found
452       * @param cell The head of the free list (i.e. the first cell in the
453       * free list).
454       */
455      @Inline
456      protected final void setFreeList(Address block, Address cell) {
457        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(preserveFreeList());
458        BlockAllocator.setFreeListMeta(block, cell);
459      }
460    
461      /****************************************************************************
462       *
463       * Collection
464       */
465    
466      /**
467       * Clear all block marks for this space.  This method is important when
468       * it is desirable to do partial collections, which man mean that block
469       * marks need to be explicitly cleared when necessary.
470       */
471      protected final void clearAllBlockMarks() {
472        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!maintainSideBitmap());
473        for (int sizeClass = 0; sizeClass < sizeClassCount(); sizeClass++) {
474          Extent blockSize = Extent.fromIntSignExtend(BlockAllocator.blockSize(blockSizeClass[sizeClass]));
475          /* Flushed blocks */
476          Address block = flushedBlockHead.get(sizeClass);
477          while (!block.isZero()) {
478            Address next = BlockAllocator.getNext(block);
479            clearBlockMark(block, blockSize);
480            block = next;
481          }
482          /* Available blocks */
483          block = consumedBlockHead.get(sizeClass);
484          while (!block.isZero()) {
485            Address next = BlockAllocator.getNext(block);
486            clearBlockMark(block, blockSize);
487            block = next;
488          }
489        }
490      }
491    
492      /**
493       * Sweep all blocks for free objects.
494       *
495       * @param clearMarks should we clear block mark bits as we process.
496       */
497      protected final void sweepConsumedBlocks(boolean clearMarks) {
498        for (int sizeClass = 0; sizeClass < sizeClassCount(); sizeClass++) {
499          Extent blockSize = Extent.fromIntSignExtend(BlockAllocator.blockSize(blockSizeClass[sizeClass]));
500          Address availableHead = Address.zero();
501          /* Flushed blocks */
502          Address block = flushedBlockHead.get(sizeClass);
503          flushedBlockHead.set(sizeClass, Address.zero());
504          while (!block.isZero()) {
505            Address next = BlockAllocator.getNext(block);
506            availableHead = sweepBlock(block, sizeClass, blockSize, availableHead, clearMarks);
507            block = next;
508          }
509          /* Consumed blocks */
510          block = consumedBlockHead.get(sizeClass);
511          consumedBlockHead.set(sizeClass, Address.zero());
512          while (!block.isZero()) {
513            Address next = BlockAllocator.getNext(block);
514            availableHead = sweepBlock(block, sizeClass, blockSize, availableHead, clearMarks);
515            block = next;
516          }
517          /* Make blocks available */
518          availableBlockHead.set(sizeClass, availableHead);
519        }
520      }
521    
522      /**
523       * Sweep a block, freeing it and adding to the list given by availableHead
524       * if it contains no free objects.
525       *
526       * @param clearMarks should we clear block mark bits as we process.
527       */
528      protected final Address sweepBlock(Address block, int sizeClass, Extent blockSize, Address availableHead, boolean clearMarks) {
529        boolean liveBlock = containsLiveCell(block, blockSize, clearMarks);
530        if (!liveBlock) {
531          BlockAllocator.setNext(block, Address.zero());
532          BlockAllocator.free(this, block);
533        } else {
534          BlockAllocator.setNext(block, availableHead);
535          availableHead = block;
536          if (!LAZY_SWEEP) {
537            setFreeList(block, makeFreeList(block, sizeClass));
538          }
539        }
540        return availableHead;
541      }
542    
543      /**
544       * Eagerly consume all remaining blocks.
545       */
546      protected final void consumeBlocks() {
547        for (int sizeClass = 0; sizeClass < sizeClassCount(); sizeClass++) {
548          while (!availableBlockHead.get(sizeClass).isZero()) {
549            Address block = availableBlockHead.get(sizeClass);
550            availableBlockHead.set(sizeClass, BlockAllocator.getNext(block));
551            advanceToBlock(block, sizeClass);
552            BlockAllocator.setNext(block, consumedBlockHead.get(sizeClass));
553            consumedBlockHead.set(sizeClass, block);
554          }
555        }
556      }
557    
558      /**
559       * Flush all the allocation blocks to the consumed list.
560       */
561      protected final void flushAvailableBlocks() {
562        for (int sizeClass = 0; sizeClass < sizeClassCount(); sizeClass++) {
563          flushedBlockHead.set(sizeClass, availableBlockHead.get(sizeClass));
564          availableBlockHead.set(sizeClass, Address.zero());
565        }
566      }
567    
568      /**
569       * Does this block contain any live cells.
570       *
571       * @param block The block
572       * @param blockSize The size of the block
573       * @param clearMarks should we clear block mark bits as we process.
574       * @return {@code true} if any cells in the block are live
575       */
576      @Inline
577      protected boolean containsLiveCell(Address block, Extent blockSize, boolean clearMarks) {
578        if (maintainSideBitmap()) {
579          if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(alignToLiveStride(block).EQ(block));
580          Address cursor = getLiveWordAddress(block);
581          Address sentinel = getLiveWordAddress(block.plus(blockSize.minus(1)));
582          while (cursor.LE(sentinel)) {
583            Word live = cursor.loadWord();
584            if (!live.isZero()) {
585              return true;
586            }
587            cursor = cursor.plus(BYTES_IN_WORD);
588          }
589          return false;
590        } else {
591          boolean live = false;
592          Address cursor = block;
593          while(cursor.LT(block.plus(blockSize))) {
594            live |= BlockAllocator.checkBlockMeta(cursor);
595            if (clearMarks)
596              BlockAllocator.clearBlockMeta(cursor);
597            cursor = cursor.plus(1 << BlockAllocator.LOG_MIN_BLOCK);
598          }
599          return live;
600        }
601      }
602    
603    
604      /**
605       * Clear block marks for a block
606       *
607       * @param block The block
608       * @param blockSize The size of the block
609       */
610      @Inline
611      protected void clearBlockMark(Address block, Extent blockSize) {
612        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!maintainSideBitmap());
613        Address cursor = block;
614        while(cursor.LT(block.plus(blockSize))) {
615          BlockAllocator.clearBlockMeta(cursor);
616          cursor = cursor.plus(1 << BlockAllocator.LOG_MIN_BLOCK);
617        }
618      }
619    
620      /**
621       * In the cell containing this object live?
622       *
623       * @param object The object
624       * @return {@code true} if the cell is live
625       */
626      @Inline
627      protected boolean isCellLive(ObjectReference object) {
628        /* Must override if not using the side bitmap */
629        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(maintainSideBitmap());
630        return liveBitSet(object);
631      }
632    
633      /**
634       * Use the live bits for a block to infer free cells and thus
635       * construct a free list for the block.
636       *
637       * @param block The block to be processed
638       * @param sizeClass The size class for the block
639       * @return The head of the new free list
640       */
641      @Inline
642      protected final Address makeFreeList(Address block, int sizeClass) {
643        Extent blockSize = Extent.fromIntSignExtend(BlockAllocator.blockSize(blockSizeClass[sizeClass]));
644        Address cursor = block.plus(blockHeaderSize[sizeClass]);
645        Address lastFree = Address.zero();
646        Address firstFree = Address.zero();
647        Address end = block.plus(blockSize);
648        Extent cellExtent = Extent.fromIntSignExtend(cellSize[sizeClass]);
649        while (cursor.LT(end)) {
650          ObjectReference current = VM.objectModel.getObjectFromStartAddress(cursor);
651          boolean free = true;
652          if (!current.isNull()) {
653            free = !isCellLive(current);
654          }
655          if (free) {
656            if (firstFree.isZero()) {
657              firstFree = cursor;
658            } else {
659              lastFree.store(cursor);
660            }
661            Memory.zeroSmall(cursor, cellExtent);
662            lastFree = cursor;
663          }
664          cursor = cursor.plus(cellExtent);
665        }
666        return firstFree;
667      }
668    
669      /**
670       * Sweep all blocks for free objects.
671       */
672      public void sweepCells(Sweeper sweeper) {
673        for (int sizeClass = 0; sizeClass < sizeClassCount(); sizeClass++) {
674          Address availableHead = Address.zero();
675          /* Flushed blocks */
676          Address block = flushedBlockHead.get(sizeClass);
677          flushedBlockHead.set(sizeClass, Address.zero());
678          while (!block.isZero()) {
679            Address next = BlockAllocator.getNext(block);
680            availableHead = sweepCells(sweeper, block, sizeClass, availableHead);
681            block = next;
682          }
683          /* Consumed blocks */
684          block = consumedBlockHead.get(sizeClass);
685          consumedBlockHead.set(sizeClass, Address.zero());
686          while (!block.isZero()) {
687            Address next = BlockAllocator.getNext(block);
688            availableHead = sweepCells(sweeper, block, sizeClass, availableHead);
689            block = next;
690          }
691          /* Make blocks available */
692          availableBlockHead.set(sizeClass, availableHead);
693        }
694      }
695    
696      /**
697       * Sweep a block, freeing it and adding to the list given by availableHead
698       * if it contains no free objects.
699       */
700      private Address sweepCells(Sweeper sweeper, Address block, int sizeClass, Address availableHead) {
701        boolean liveBlock = sweepCells(sweeper, block, sizeClass);
702        if (!liveBlock) {
703          BlockAllocator.setNext(block, Address.zero());
704          BlockAllocator.free(this, block);
705        } else {
706          BlockAllocator.setNext(block, availableHead);
707          availableHead = block;
708        }
709        return availableHead;
710      }
711    
712      /**
713       * Sweep a block, freeing it and making it available if any live cells were found.
714       * if it contains no free objects.<p>
715       *
716       * This is designed to be called in parallel by multiple collector threads.
717       */
718      public void parallelSweepCells(Sweeper sweeper) {
719        for (int sizeClass = 0; sizeClass < sizeClassCount(); sizeClass++) {
720          Address block;
721          while(!(block = getSweepBlock(sizeClass)).isZero()) {
722            boolean liveBlock = sweepCells(sweeper, block, sizeClass);
723            if (!liveBlock) {
724              BlockAllocator.setNext(block, Address.zero());
725              BlockAllocator.free(this, block);
726            } else {
727              lock.acquire();
728              BlockAllocator.setNext(block, availableBlockHead.get(sizeClass));
729              availableBlockHead.set(sizeClass, block);
730              lock.release();
731            }
732          }
733        }
734      }
735    
736      /**
737       * Get a block for a parallel sweep.
738       *
739       * @param sizeClass The size class of the block to sweep.
740       * @return The block or zero if no blocks remain to be swept.
741       */
742      private Address getSweepBlock(int sizeClass) {
743        lock.acquire();
744        Address block;
745    
746        /* Flushed blocks */
747        block = flushedBlockHead.get(sizeClass);
748        if (!block.isZero()) {
749          flushedBlockHead.set(sizeClass, BlockAllocator.getNext(block));
750          lock.release();
751          BlockAllocator.setNext(block, Address.zero());
752          return block;
753        }
754    
755        /* Consumed blocks */
756        block = consumedBlockHead.get(sizeClass);
757        if (!block.isZero()) {
758          consumedBlockHead.set(sizeClass, BlockAllocator.getNext(block));
759          lock.release();
760          BlockAllocator.setNext(block, Address.zero());
761          return block;
762        }
763    
764        /* All swept! */
765        lock.release();
766        return Address.zero();
767      }
768    
769      /**
770       * Does this block contain any live cells?
771       */
772      @Inline
773      public boolean sweepCells(Sweeper sweeper, Address block, int sizeClass) {
774        Extent blockSize = Extent.fromIntSignExtend(BlockAllocator.blockSize(blockSizeClass[sizeClass]));
775        Address cursor = block.plus(blockHeaderSize[sizeClass]);
776        Address end = block.plus(blockSize);
777        Extent cellExtent = Extent.fromIntSignExtend(cellSize[sizeClass]);
778        boolean containsLive = false;
779        while (cursor.LT(end)) {
780          ObjectReference current = VM.objectModel.getObjectFromStartAddress(cursor);
781          boolean free = true;
782          if (!current.isNull()) {
783            free = !liveBitSet(current);
784            if (!free) {
785              free = sweeper.sweepCell(current);
786              if (free) unsyncClearLiveBit(current);
787            }
788          }
789          if (!free) {
790            containsLive = true;
791          }
792          cursor = cursor.plus(cellExtent);
793        }
794        return containsLive;
795      }
796    
797      /**
798       * A callback used to perform sweeping of a free list space.
799       */
800      @Uninterruptible
801      public abstract static class Sweeper {
802        public abstract boolean sweepCell(ObjectReference object);
803      }
804    
805      /****************************************************************************
806       *
807       * Live bit manipulation
808       */
809    
810      /**
811       * Atomically set the live bit for a given object
812       *
813       * @param object The object whose live bit is to be set.
814       * @return {@code true} if the bit was changed to true.
815       */
816      @Inline
817      public static boolean testAndSetLiveBit(ObjectReference object) {
818        return updateLiveBit(VM.objectModel.objectStartRef(object), true, true);
819      }
820    
821      /**
822       * Set the live bit for the block containing the given object
823       *
824       * @param object The object whose blocks liveness is to be set.
825       */
826      @Inline
827      protected static void markBlock(ObjectReference object) {
828        BlockAllocator.markBlockMeta(object);
829      }
830    
831      /**
832       * Set the live bit for the given block.
833       *
834       * @param block The block whose liveness is to be set.
835       */
836      @Inline
837      protected static void markBlock(Address block) {
838        BlockAllocator.markBlockMeta(block);
839      }
840    
841      /**
842       * Set the live bit for a given object, without using
843       * synchronization primitives---must only be used when contention
844       * for live bit is strictly not possible
845       *
846       * @param object The object whose live bit is to be set.
847       */
848      @Inline
849      public static boolean unsyncSetLiveBit(ObjectReference object) {
850        return updateLiveBit(VM.objectModel.refToAddress(object), true, false);
851      }
852    
853      /**
854       * Set the live bit for a given address
855       *
856       * @param address The address whose live bit is to be set.
857       * @param set {@code true} if the bit is to be set, as opposed to cleared
858       * @param atomic {@code true} if we want to perform this operation atomically
859       */
860      @Inline
861      private static boolean updateLiveBit(Address address, boolean set, boolean atomic) {
862        Word oldValue, newValue;
863        Address liveWord = getLiveWordAddress(address);
864        Word mask = getMask(address, true);
865        if (atomic) {
866          do {
867            oldValue = liveWord.prepareWord();
868            newValue = (set) ? oldValue.or(mask) : oldValue.and(mask.not());
869          } while (!liveWord.attempt(oldValue, newValue));
870        } else {
871          oldValue = liveWord.loadWord();
872          liveWord.store(set ? oldValue.or(mask) : oldValue.and(mask.not()));
873        }
874        return oldValue.and(mask).NE(mask);
875      }
876    
877      /**
878       * Test the live bit for a given object
879       *
880       * @param object The object whose live bit is to be set.
881       */
882      @Inline
883      protected static boolean liveBitSet(ObjectReference object) {
884        return liveBitSet(VM.objectModel.refToAddress(object));
885      }
886    
887      /**
888       * Set the live bit for a given address
889       *
890       * @param address The address whose live bit is to be set.
891       * @return {@code true} if this operation changed the state of the live bit.
892       */
893      @Inline
894      protected static boolean liveBitSet(Address address) {
895        Address liveWord = getLiveWordAddress(address);
896        Word mask = getMask(address, true);
897        Word value = liveWord.loadWord();
898        return value.and(mask).EQ(mask);
899      }
900    
901      /**
902       * Clear the live bit for a given object
903       *
904       * @param object The object whose live bit is to be cleared.
905       */
906      @Inline
907      protected static void clearLiveBit(ObjectReference object) {
908        clearLiveBit(VM.objectModel.refToAddress(object));
909      }
910    
911      /**
912       * Clear the live bit for a given address
913       *
914       * @param address The address whose live bit is to be cleared.
915       */
916      @Inline
917      protected static void clearLiveBit(Address address) {
918        updateLiveBit(address, false, true);
919      }
920    
921      /**
922       * Clear the live bit for a given object
923       *
924       * @param object The object whose live bit is to be cleared.
925       */
926      @Inline
927      protected static void unsyncClearLiveBit(ObjectReference object) {
928        unsyncClearLiveBit(VM.objectModel.refToAddress(object));
929      }
930    
931      /**
932       * Clear the live bit for a given address
933       *
934       * @param address The address whose live bit is to be cleared.
935       */
936      @Inline
937      protected static void unsyncClearLiveBit(Address address) {
938        updateLiveBit(address, false, false);
939      }
940    
941      /**
942       * Clear all live bits for a block
943       */
944      protected void clearLiveBits(Address block, int sizeClass) {
945        int blockSize = BlockAllocator.blockSize(blockSizeClass[sizeClass]);
946        Address cursor = getLiveWordAddress(block);
947        Address sentinel = getLiveWordAddress(block.plus(blockSize - 1));
948        while (cursor.LE(sentinel)) {
949          cursor.store(Word.zero());
950          cursor = cursor.plus(BYTES_IN_WORD);
951        }
952      }
953    
954      protected void zeroLiveBits() {
955        Extent bytes = Extent.fromIntSignExtend(EmbeddedMetaData.BYTES_IN_REGION>>LOG_LIVE_COVERAGE);
956       if (contiguous) {
957          Address end = ((FreeListPageResource)pr).getHighWater();
958          Address cursor = start;
959          while (cursor.LT(end)) {
960            Address metadata = EmbeddedMetaData.getMetaDataBase(cursor).plus(META_DATA_OFFSET);
961            VM.memory.zero(false, metadata, bytes);
962            cursor = cursor.plus(EmbeddedMetaData.BYTES_IN_REGION);
963          }
964        } else {
965          for(Address cursor = headDiscontiguousRegion; !cursor.isZero(); cursor = Map.getNextContiguousRegion(cursor)) {
966            Address metadata = EmbeddedMetaData.getMetaDataBase(cursor).plus(META_DATA_OFFSET);
967            VM.memory.zero(false, metadata, bytes);
968          }
969        }
970      }
971    
972      /**
973       * Align an address so that it corresponds to a live word boundary.
974       * In other words, if the live bit for the given address is not the
975       * zeroth bit of a live word, round the address down such that it
976       * does.
977       *
978       * @param address The address to be aligned to a live word
979       * @return The given address, aligned down so that it corresponds to
980       * an address on a live word boundary.
981       */
982      private static Address alignToLiveStride(Address address) {
983        return address.toWord().and(LIVE_WORD_STRIDE_MASK).toAddress();
984      }
985    
986      /**
987       * Given an address, produce a bit mask for the live table
988       *
989       * @param address The address whose live bit mask is to be established
990       * @param set True if we want the mask for <i>setting</i> the bit,
991       * false if we want the mask for <i>clearing</i> the bit.
992       * @return The appropriate bit mask for object for the live table for.
993       */
994      @Inline
995      private static Word getMask(Address address, boolean set) {
996        int shift = address.toWord().rshl(OBJECT_LIVE_SHIFT).and(WORD_SHIFT_MASK).toInt();
997        Word rtn = Word.one().lsh(shift);
998        return (set) ? rtn : rtn.not();
999      }
1000    
1001      /**
1002       * Given an address, return the address of the live word for
1003       * that address.
1004       *
1005       * @param address The address whose live word address is to be returned
1006       * @return The address of the live word for this object
1007       */
1008      @Inline
1009      private static Address getLiveWordAddress(Address address) {
1010        Address rtn = EmbeddedMetaData.getMetaDataBase(address);
1011        return rtn.plus(META_DATA_OFFSET).plus(EmbeddedMetaData.getMetaDataOffset(address, LOG_LIVE_COVERAGE, LOG_BYTES_IN_WORD));
1012      }
1013    }