001    /*
002     *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003     *
004     *  This file is licensed to You under the Eclipse Public License (EPL);
005     *  You may not use this file except in compliance with the License. You
006     *  may obtain a copy of the License at
007     *
008     *      http://www.opensource.org/licenses/eclipse-1.0.php
009     *
010     *  See the COPYRIGHT.txt file distributed with this work for information
011     *  regarding copyright ownership.
012     */
013    package org.mmtk.utility.heap;
014    
015    import org.mmtk.plan.Plan;
016    import org.mmtk.policy.Space;
017    
018    import static org.mmtk.policy.Space.PAGES_IN_CHUNK;
019    import org.mmtk.utility.alloc.EmbeddedMetaData;
020    import org.mmtk.utility.Conversions;
021    import org.mmtk.utility.GenericFreeList;
022    import org.mmtk.vm.VM;
023    import org.mmtk.utility.Constants;
024    
025    import org.vmmagic.unboxed.*;
026    import org.vmmagic.pragma.*;
027    
028    /**
029     * This class manages the allocation of pages for a space.  When a
030     * page is requested by the space both a page budget and the use of
031     * virtual address space are checked.  If the request for space can't
032     * be satisfied (for either reason) a GC may be triggered.<p>
033     */
034    @Uninterruptible
035    public final class FreeListPageResource extends PageResource implements Constants {
036    
037      private final GenericFreeList freeList;
038      private int highWaterMark = 0;
039      private final int metaDataPagesPerRegion;
040      private int pagesCurrentlyOnFreeList = 0;
041    
042      /**
043       * Constructor
044       *
045       * Contiguous free list resource. The address range is pre-defined at
046       * initialization time and is immutable.
047       *
048       * @param space The space to which this resource is attached
049       * @param start The start of the address range allocated to this resource
050       * @param bytes The size of the address rage allocated to this resource
051       */
052      public FreeListPageResource(Space space, Address start, Extent bytes) {
053        super(space, start);
054        int pages = Conversions.bytesToPages(bytes);
055        freeList = new GenericFreeList(pages);
056        pagesCurrentlyOnFreeList = pages;
057        this.metaDataPagesPerRegion = 0;
058      }
059    
060      /**
061       * Constructor
062       *
063       * Contiguous free list resource. The address range is pre-defined at
064       * initialization time and is immutable.
065       *
066       * @param space The space to which this resource is attached
067       * @param start The start of the address range allocated to this resource
068       * @param bytes The size of the address rage allocated to this resource
069       * @param metaDataPagesPerRegion The number of pages of meta data
070       * that are embedded in each region.
071       */
072      public FreeListPageResource(Space space, Address start, Extent bytes, int metaDataPagesPerRegion) {
073        super(space, start);
074        this.metaDataPagesPerRegion = metaDataPagesPerRegion;
075        int pages = Conversions.bytesToPages(bytes);
076        freeList = new GenericFreeList(pages, EmbeddedMetaData.PAGES_IN_REGION);
077        pagesCurrentlyOnFreeList = pages;
078        reserveMetaData(space.getExtent());
079      }
080    
081      /**
082       * Constructor
083       *
084       * Discontiguous monotone resource. The address range is <i>not</i>
085       * pre-defined at initialization time and is dynamically defined to
086       * be some set of pages, according to demand and availability.
087       *
088       * @param space The space to which this resource is attached
089       */
090      public FreeListPageResource(Space space, int metaDataPagesPerRegion) {
091        super(space);
092        this.metaDataPagesPerRegion = metaDataPagesPerRegion;
093        this.start = Space.AVAILABLE_START;
094        freeList = new GenericFreeList(Map.globalPageMap, Map.getDiscontigFreeListPROrdinal(this));
095        pagesCurrentlyOnFreeList = 0;
096      }
097    
098      /**
099       * Return the number of available physical pages for this resource.
100       * This includes all pages currently free on the resource's free list.
101       * If the resource is using discontiguous space it also includes
102       * currently unassigned discontiguous space.<p>
103       *
104       * Note: This just considers physical pages (ie virtual memory pages
105       * allocated for use by this resource). This calculation is orthogonal
106       * to and does not consider any restrictions on the number of pages
107       * this resource may actually use at any time (ie the number of
108       * committed and reserved pages).<p>
109       *
110       * Note: The calculation is made on the assumption that all space that
111       * could be assigned to this resource would be assigned to this resource
112       * (ie the unused discontiguous space could just as likely be assigned
113       * to another competing resource).
114       *
115       * @return The number of available physical pages for this resource.
116       */
117      @Override
118      public int getAvailablePhysicalPages() {
119        int rtn = pagesCurrentlyOnFreeList;
120        if (!contiguous) {
121          int chunks = Map.getAvailableDiscontiguousChunks()-Map.getChunkConsumerCount();
122          if (chunks < 0) chunks = 0;
123          rtn += chunks*(Space.PAGES_IN_CHUNK-metaDataPagesPerRegion);
124        }
125        return rtn;
126      }
127    
128      /**
129       * Allocate <code>pages</code> pages from this resource.<p>
130       *
131       * If the request can be satisfied, then ensure the pages are
132       * mmpapped and zeroed before returning the address of the start of
133       * the region.  If the request cannot be satisfied, return zero.
134       *
135       * @param reservedPages The number of pages reserved due to the initial request.
136       * @param requiredPages The number of pages required to be allocated.
137       * @param zeroed If true allocated pages are zeroed.
138       * @return The start of the first page if successful, zero on
139       * failure.
140       */
141      @Override
142      @Inline
143      protected Address allocPages(int reservedPages, int requiredPages, boolean zeroed) {
144        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(metaDataPagesPerRegion == 0 || requiredPages <= PAGES_IN_CHUNK - metaDataPagesPerRegion);
145        lock();
146        boolean newChunk = false;
147        int pageOffset = freeList.alloc(requiredPages);
148        if (pageOffset == GenericFreeList.FAILURE && !contiguous) {
149          pageOffset = allocateContiguousChunks(requiredPages);
150          newChunk = true;
151        }
152        if (pageOffset == GenericFreeList.FAILURE) {
153          unlock();
154          return Address.zero();
155        } else {
156          pagesCurrentlyOnFreeList -= requiredPages;
157          if (pageOffset > highWaterMark) {
158            if (highWaterMark == 0 || (pageOffset ^ highWaterMark) > EmbeddedMetaData.PAGES_IN_REGION) {
159              int regions = 1 + ((pageOffset - highWaterMark) >> EmbeddedMetaData.LOG_PAGES_IN_REGION);
160              int metapages = regions * metaDataPagesPerRegion;
161              reserved += metapages;
162              committed += metapages;
163              newChunk = true;
164            }
165            highWaterMark = pageOffset;
166          }
167          Address rtn = start.plus(Conversions.pagesToBytes(pageOffset));
168          Extent bytes = Conversions.pagesToBytes(requiredPages);
169          // The meta-data portion of reserved Pages was committed above.
170          commitPages(reservedPages, requiredPages);
171          space.growSpace(rtn, bytes, newChunk);
172          unlock();
173          Mmapper.ensureMapped(rtn, requiredPages);
174          if (zeroed)
175            VM.memory.zero(zeroNT, rtn, bytes);
176          VM.events.tracePageAcquired(space, rtn, requiredPages);
177          return rtn;
178        }
179      }
180    
181      /**
182       * Release a group of pages, associated with this page resource,
183       * that were allocated together, optionally zeroing on release and
184       * optionally memory protecting on release.
185       *
186       * @param first The first page in the group of pages that were
187       * allocated together.
188       */
189      @Inline
190      public void releasePages(Address first) {
191        if (VM.VERIFY_ASSERTIONS)
192          VM.assertions._assert(Conversions.isPageAligned(first));
193    
194        int pageOffset = Conversions.bytesToPages(first.diff(start));
195    
196        int pages = freeList.size(pageOffset);
197        if (ZERO_ON_RELEASE)
198          VM.memory.zero(false, first, Conversions.pagesToBytes(pages));
199        /* Can't use protect here because of the chunk sizes involved!
200        if (protectOnRelease.getValue())
201          LazyMmapper.protect(first, pages);
202         */
203        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(pages <= committed);
204    
205        lock();
206        reserved -= pages;
207        committed -= pages;
208        int freed = freeList.free(pageOffset, true);
209        pagesCurrentlyOnFreeList += pages;
210    
211        if (!contiguous) // only discontiguous spaces use chunks
212          releaseFreeChunks(first, freed);
213    
214        unlock();
215    
216        VM.events.tracePageReleased(space, first, pages);
217      }
218    
219      /**
220       * The release of a page may have freed up an entire chunk or
221       * set of chunks.  We need to check whether any chunks can be
222       * freed, and if so, free them.
223       *
224       * @param freedPage The address of the page that was just freed.
225       * @param pagesFreed The number of pages made available when the page was freed.
226       */
227      private void releaseFreeChunks(Address freedPage, int pagesFreed) {
228        int pageOffset = Conversions.bytesToPages(freedPage.diff(start));
229    
230        if (metaDataPagesPerRegion > 0) {       // can only be a single chunk
231          if (pagesFreed == (PAGES_IN_CHUNK - metaDataPagesPerRegion)) {
232            freeContiguousChunk(Space.chunkAlign(freedPage, true));
233          }
234        } else {                                // may be multiple chunks
235          if (pagesFreed % PAGES_IN_CHUNK == 0) {    // necessary, but not sufficient condition
236            /* grow a region of chunks, starting with the chunk containing the freed page */
237            int regionStart = pageOffset & ~(PAGES_IN_CHUNK - 1);
238            int nextRegionStart = regionStart + PAGES_IN_CHUNK;
239            /* now try to grow (end point pages are marked as non-coalescing) */
240            while (regionStart >= 0 && freeList.isCoalescable(regionStart))
241              regionStart -= PAGES_IN_CHUNK;
242            while (nextRegionStart < GenericFreeList.MAX_UNITS && freeList.isCoalescable(nextRegionStart))
243              nextRegionStart += PAGES_IN_CHUNK;
244             if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(regionStart >= 0 && nextRegionStart < GenericFreeList.MAX_UNITS);
245            if (pagesFreed == nextRegionStart - regionStart) {
246              freeContiguousChunk(start.plus(Conversions.pagesToBytes(regionStart)));
247            }
248          }
249        }
250      }
251    
252      /**
253       * Allocate sufficient contiguous chunks within a discontiguous region to
254       * satisfy the pending request.  Note that this is purely about address space
255       * allocation within a discontiguous region.  This method does not reserve
256       * individual pages, it merely assigns a suitably large region of virtual
257       * memory from within the discontiguous region for use by a particular space.
258       *
259       * @param pages The number of pages currently being requested
260       * @return A chunk number or GenericFreelist.FAILURE
261       */
262      private int allocateContiguousChunks(int pages) {
263        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(metaDataPagesPerRegion == 0 || pages <= PAGES_IN_CHUNK - metaDataPagesPerRegion);
264        int rtn = GenericFreeList.FAILURE;
265        int requiredChunks = Space.requiredChunks(pages);
266        Address region = space.growDiscontiguousSpace(requiredChunks);
267        if (!region.isZero()) {
268          int regionStart = Conversions.bytesToPages(region.diff(start));
269          int regionEnd = regionStart + (requiredChunks*Space.PAGES_IN_CHUNK) - 1;
270          freeList.setUncoalescable(regionStart);
271          freeList.setUncoalescable(regionEnd + 1);
272          for (int p = regionStart; p < regionEnd; p += Space.PAGES_IN_CHUNK) {
273            int liberated;
274            if (p != regionStart)
275              freeList.clearUncoalescable(p);
276            liberated = freeList.free(p, true); // add chunk to our free list
277            if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(liberated == Space.PAGES_IN_CHUNK + (p - regionStart));
278            if (metaDataPagesPerRegion > 1)
279              freeList.alloc(metaDataPagesPerRegion, p); // carve out space for metadata
280            pagesCurrentlyOnFreeList += Space.PAGES_IN_CHUNK - metaDataPagesPerRegion;
281          }
282          rtn = freeList.alloc(pages); // re-do the request which triggered this call
283        }
284        return rtn;
285      }
286    
287      /**
288       * Release a single chunk from a discontiguous region.  All this does is
289       * release a chunk from the virtual address space associated with this
290       * discontiguous space.
291       *
292       * @param chunk The chunk to be freed
293       */
294      private void freeContiguousChunk(Address chunk) {
295        int numChunks = Map.getContiguousRegionChunks(chunk);
296        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(numChunks == 1 || metaDataPagesPerRegion == 0);
297    
298        /* nail down all pages associated with the chunk, so it is no longer on our free list */
299        int chunkStart = Conversions.bytesToPages(chunk.diff(start));
300        int chunkEnd = chunkStart + (numChunks*Space.PAGES_IN_CHUNK);
301        while (chunkStart < chunkEnd) {
302          freeList.setUncoalescable(chunkStart);
303          if (metaDataPagesPerRegion > 0)
304            freeList.free(chunkStart);  // first free any metadata pages
305          int tmp = freeList.alloc(Space.PAGES_IN_CHUNK, chunkStart); // then alloc the entire chunk
306          if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(tmp == chunkStart);
307          chunkStart += Space.PAGES_IN_CHUNK;
308          pagesCurrentlyOnFreeList -= (Space.PAGES_IN_CHUNK - metaDataPagesPerRegion);
309        }
310        /* now return the address space associated with the chunk for global reuse */
311        space.releaseDiscontiguousChunks(chunk);
312      }
313    
314      /**
315       * Reserve virtual address space for meta-data.
316       *
317       * @param extent The size of this space
318       */
319      private void reserveMetaData(Extent extent) {
320        highWaterMark = 0;
321        if (metaDataPagesPerRegion > 0) {
322          if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(start.toWord().rshl(EmbeddedMetaData.LOG_BYTES_IN_REGION).lsh(EmbeddedMetaData.LOG_BYTES_IN_REGION).toAddress().EQ(start));
323          Extent size = extent.toWord().rshl(EmbeddedMetaData.LOG_BYTES_IN_REGION).lsh(EmbeddedMetaData.LOG_BYTES_IN_REGION).toExtent();
324          Address cursor = start.plus(size);
325          while (cursor.GT(start)) {
326            cursor = cursor.minus(EmbeddedMetaData.BYTES_IN_REGION);
327            int unit = cursor.diff(start).toWord().rshl(LOG_BYTES_IN_PAGE).toInt();
328            int tmp = freeList.alloc(metaDataPagesPerRegion, unit);
329            pagesCurrentlyOnFreeList -= metaDataPagesPerRegion;
330            if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(tmp == unit);
331          }
332        }
333      }
334    
335      /**
336       * {@inheritDoc}<p>
337       * In the case of a free-list allocator, meta-data is pre-allocated, so
338       * simply return the un-adjusted request size.
339       *
340       * @param pages The size of the pending allocation in pages
341       * @return The (unadjusted) request size, since metadata is pre-allocated
342       */
343      @Override
344      public int adjustForMetaData(int pages) { return pages; }
345    
346      public Address getHighWater() {
347        return start.plus(Extent.fromIntSignExtend(highWaterMark<<LOG_BYTES_IN_PAGE));
348      }
349    
350      /**
351       * Return the size of the super page
352       *
353       * @param first the Address of the first word in the superpage
354       * @return the size in bytes
355       */
356      @Inline
357      public Extent getSize(Address first) {
358        if (VM.VERIFY_ASSERTIONS)
359          VM.assertions._assert(Conversions.isPageAligned(first));
360    
361        int pageOffset = Conversions.bytesToPages(first.diff(start));
362        int pages = freeList.size(pageOffset);
363        return Conversions.pagesToBytes(pages);
364      }
365    
366      /**
367       * Resize the free list associated with this resource and nail down
368       * its start address. This method is called to re-set the free list
369       * once the global free list (which it shares) is finalized and the
370       * base address is finalized.  There's a circular dependency, so we
371       * need an explicit call-back to reset the free list size and start
372       *
373       * @param startAddress The final start address for the discontiguous space.
374       */
375      @Interruptible
376      public void resizeFreeList(Address startAddress) {
377        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!contiguous && !Plan.isInitialized());
378        start = startAddress;
379        freeList.resizeFreeList();
380      }
381    }