001    /*
002     *  This file is part of the Jikes RVM project (http://jikesrvm.org).
003     *
004     *  This file is licensed to You under the Eclipse Public License (EPL);
005     *  You may not use this file except in compliance with the License. You
006     *  may obtain a copy of the License at
007     *
008     *      http://www.opensource.org/licenses/eclipse-1.0.php
009     *
010     *  See the COPYRIGHT.txt file distributed with this work for information
011     *  regarding copyright ownership.
012     */
013    package org.mmtk.utility.alloc;
014    
015    import org.mmtk.vm.Lock;
016    import org.mmtk.plan.Plan;
017    import org.mmtk.policy.Space;
018    import org.mmtk.utility.*;
019    
020    import org.mmtk.vm.VM;
021    
022    import org.vmmagic.unboxed.*;
023    import org.vmmagic.pragma.*;
024    
025    /**
026     * This abstract base class provides the basis for processor-local
027     * allocation.  The key functionality provided is the retry mechanism
028     * that is necessary to correctly handle the fact that a "slow-path"
029     * allocation can cause a GC which violate the uninterruptability assumption.
030     * This results in the thread being moved to a different processor so that
031     * the allocator object it is using is not actually the one for the processor
032     * it is running on.<p>
033     *
034     * This class also includes functionality to assist allocators with
035     * ensuring that requests are aligned according to requests.<p>
036     *
037     * Failing to handle this properly will lead to very hard to trace bugs
038     * where the allocation that caused a GC or allocations immediately following
039     * GC are run incorrectly.
040     */
041    @Uninterruptible
042    public abstract class Allocator implements Constants {
043    
044      /** Lock used for out of memory handling */
045      private static Lock oomLock = VM.newLock("OOM Lock");
046      /** Has an allocation succeeded since the emergency collection? */
047      private static volatile boolean allocationSuccess;
048      /** Maximum number of failed attempts by a single thread */
049      private static int collectionAttempts;
050    
051      /**
052       * @return a consecutive failure count for any allocating thread.
053       */
054      public static int determineCollectionAttempts() {
055        if (!allocationSuccess) {
056          collectionAttempts++;
057        } else {
058          allocationSuccess = false;
059          collectionAttempts = 1;
060        }
061        return collectionAttempts;
062      }
063    
064      /**
065       * Return the space this allocator is currently bound to.
066       *
067       * @return The Space.
068       */
069      protected abstract Space getSpace();
070    
071      /**
072       * Aligns up an allocation request. The allocation request accepts a
073       * region, that must be at least particle aligned, an alignment
074       * request (some power of two number of particles) and an offset (a
075       * number of particles). There is also a knownAlignment parameter to
076       * allow a more optimised check when the particular allocator in use
077       * always aligns at a coarser grain than individual particles, such
078       * as some free lists.
079       *
080       * @param region The region to align up.
081       * @param alignment The requested alignment
082       * @param offset The offset from the alignment
083       * @param knownAlignment The statically known minimum alignment.
084       * @return The aligned up address.
085       */
086      @Inline
087      public static Address alignAllocation(Address region, int alignment, int offset, int knownAlignment, boolean fillAlignmentGap) {
088        if (VM.VERIFY_ASSERTIONS) {
089          VM.assertions._assert(knownAlignment >= MIN_ALIGNMENT);
090          VM.assertions._assert(MIN_ALIGNMENT >= BYTES_IN_INT);
091          VM.assertions._assert(!(fillAlignmentGap && region.isZero()));
092          VM.assertions._assert(alignment <= MAX_ALIGNMENT);
093          VM.assertions._assert(offset >= 0);
094          VM.assertions._assert(region.toWord().and(Word.fromIntSignExtend(MIN_ALIGNMENT-1)).isZero());
095          VM.assertions._assert((alignment & (MIN_ALIGNMENT - 1)) == 0);
096          VM.assertions._assert((offset & (MIN_ALIGNMENT - 1)) == 0);
097        }
098    
099        // No alignment ever required.
100        if (alignment <= knownAlignment || MAX_ALIGNMENT <= MIN_ALIGNMENT)
101          return region;
102    
103        // May require an alignment
104        Word mask = Word.fromIntSignExtend(alignment - 1);
105        Word negOff = Word.fromIntSignExtend(-offset);
106        Offset delta = negOff.minus(region.toWord()).and(mask).toOffset();
107    
108        if (fillAlignmentGap && ALIGNMENT_VALUE != 0) {
109          if ((MAX_ALIGNMENT - MIN_ALIGNMENT) == BYTES_IN_WORD) {
110            // At most a single hole
111            if (delta.toInt() == (BYTES_IN_WORD)) {
112              region.store(Word.fromIntSignExtend(ALIGNMENT_VALUE));
113              region = region.plus(delta);
114            return region;
115            }
116          } else {
117            while (delta.toInt() >= (BYTES_IN_WORD)) {
118              region.store(Word.fromIntSignExtend(ALIGNMENT_VALUE));
119              region = region.plus(BYTES_IN_WORD);
120              delta = delta.minus(BYTES_IN_WORD);
121            }
122          }
123        }
124    
125        return region.plus(delta);
126      }
127    
128      /**
129       * Fill the specified region with the alignment value.
130       *
131       * @param start The start of the region.
132       * @param end A pointer past the end of the region.
133       */
134      @Inline
135      public static void fillAlignmentGap(Address start, Address end) {
136        if ((MAX_ALIGNMENT - MIN_ALIGNMENT) == BYTES_IN_INT) {
137          // At most a single hole
138          if (!end.diff(start).isZero()) {
139            start.store(ALIGNMENT_VALUE);
140          }
141        } else {
142          while (start.LT(end)) {
143            start.store(ALIGNMENT_VALUE);
144            start = start.plus(BYTES_IN_INT);
145          }
146        }
147      }
148    
149      /**
150       * Aligns up an allocation request. The allocation request accepts a
151       * region, that must be at least particle aligned, an alignment
152       * request (some power of two number of particles) and an offset (a
153       * number of particles).
154       *
155       * @param region The region to align up.
156       * @param alignment The requested alignment
157       * @param offset The offset from the alignment
158       * @return The aligned up address.
159       */
160      @Inline
161      public static Address alignAllocation(Address region, int alignment, int offset) {
162        return alignAllocation(region, alignment, offset, MIN_ALIGNMENT, true);
163      }
164    
165      /**
166       * Aligns up an allocation request. The allocation request accepts a
167       * region, that must be at least particle aligned, an alignment
168       * request (some power of two number of particles) and an offset (a
169       * number of particles).
170       *
171       * @param region The region to align up.
172       * @param alignment The requested alignment
173       * @param offset The offset from the alignment
174       * @return The aligned up address.
175       */
176      @Inline
177      public static Address alignAllocationNoFill(Address region, int alignment, int offset) {
178        return alignAllocation(region, alignment, offset, MIN_ALIGNMENT, false);
179      }
180    
181      /**
182       * This method calculates the minimum size that will guarantee the allocation
183       * of a specified number of bytes at the specified alignment.
184       *
185       * @param size The number of bytes (not aligned).
186       * @param alignment The requested alignment (some factor of 2).
187       */
188      @Inline
189      public static int getMaximumAlignedSize(int size, int alignment) {
190        return getMaximumAlignedSize(size, alignment, MIN_ALIGNMENT);
191      }
192    
193      /**
194       * This method calculates the minimum size that will guarantee the allocation
195       * of a specified number of bytes at the specified alignment.
196       *
197       * @param size The number of bytes (not aligned).
198       * @param alignment The requested alignment (some factor of 2).
199       * @param knownAlignment The known minimum alignment. Specifically for use in
200       * allocators that enforce greater than particle alignment. It is a <b>precondition</b>
201       * that size is aligned to knownAlignment, and that knownAlignment >= MIN_ALGINMENT.
202       */
203      @Inline
204      public static int getMaximumAlignedSize(int size, int alignment, int knownAlignment) {
205        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(size == Conversions.roundDown(size, knownAlignment));
206        if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(knownAlignment >= MIN_ALIGNMENT);
207    
208        if (MAX_ALIGNMENT <= MIN_ALIGNMENT || alignment <= knownAlignment) {
209          return size;
210        } else {
211          return size + alignment - knownAlignment;
212        }
213      }
214    
215      /**
216       * Single slow path allocation attempt. This is called by allocSlow.
217       *
218       * @param bytes The size of the allocation request
219       * @param alignment The required alignment
220       * @param offset The alignment offset
221       * @return The start address of the region, or zero if allocation fails
222       */
223      protected abstract Address allocSlowOnce(int bytes, int alignment, int offset);
224    
225      /**
226       * <b>Out-of-line</b> slow path allocation. This method forces slow path
227       * allocation to be out of line (typically desirable, but not when the
228       * calling context is already explicitly out-of-line).
229       *
230       * @param bytes The size of the allocation request
231       * @param alignment The required alignment
232       * @param offset The alignment offset
233       * @return The start address of the region, or zero if allocation fails
234       */
235      @NoInline
236      public final Address allocSlow(int bytes, int alignment, int offset) {
237        return allocSlowInline(bytes, alignment, offset);
238      }
239    
240      /**
241       * <b>Inline</b> slow path allocation. This method attempts allocSlowOnce
242       * several times, and allows collection to occur, and ensures that execution
243       * safely resumes by taking care of potential thread/mutator context affinity
244       * changes. All allocators should use this as the trampoline for slow
245       * path allocation.
246       *
247       * @param bytes The size of the allocation request
248       * @param alignment The required alignment
249       * @param offset The alignment offset
250       * @return The start address of the region, or zero if allocation fails
251       */
252      @Inline
253      public final Address allocSlowInline(int bytes, int alignment, int offset) {
254        Allocator current = this;
255        Space space = current.getSpace();
256        while (true) {
257          // Try to allocate using the slow path
258          Address result = current.allocSlowOnce(bytes, alignment, offset);
259    
260          // Collector allocation always succeeds (or fails inside allocSlow).
261          if (!VM.activePlan.isMutator()) {
262            if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!result.isZero());
263            return result;
264          }
265    
266          // Information about the previous collection.
267          boolean emergencyCollection = Plan.isEmergencyCollection();
268    
269          if (!result.isZero()) {
270            // Report allocation success to assist OutOfMemory handling.
271            if (!allocationSuccess) {
272              oomLock.acquire();
273              allocationSuccess = true;
274              oomLock.release();
275            }
276            return result;
277          }
278    
279          if (emergencyCollection) {
280            // Check if we are in an OutOfMemory situation
281            oomLock.acquire();
282            boolean failWithOOM = !allocationSuccess;
283            // This seems odd, but we must allow each OOM to run its course (and maybe give us back memory)
284            allocationSuccess = true;
285            oomLock.release();
286            if (failWithOOM) {
287              // Nobody has successfully allocated since an emergency collection: OutOfMemory
288              VM.collection.outOfMemory();
289              VM.assertions.fail("Not Reached");
290              return Address.zero();
291            }
292          }
293    
294          /* This is in case a GC occurs, and our mutator context is stale.
295           * In some VMs the scheduler can change the affinity between the
296           * current thread and the mutator context. This is possible for
297           * VMs that dynamically multiplex Java threads onto multiple mutator
298           * contexts, */
299          current = VM.activePlan.mutator().getAllocatorFromSpace(space);
300        }
301      }
302    }