001 /* 002 * This file is part of the Jikes RVM project (http://jikesrvm.org). 003 * 004 * This file is licensed to You under the Eclipse Public License (EPL); 005 * You may not use this file except in compliance with the License. You 006 * may obtain a copy of the License at 007 * 008 * http://www.opensource.org/licenses/eclipse-1.0.php 009 * 010 * See the COPYRIGHT.txt file distributed with this work for information 011 * regarding copyright ownership. 012 */ 013 package org.mmtk.utility.heap; 014 015 import org.mmtk.utility.*; 016 017 import org.mmtk.vm.Lock; 018 import org.mmtk.vm.VM; 019 020 import org.vmmagic.unboxed.*; 021 import org.vmmagic.pragma.*; 022 023 /** 024 * This class implements mmapping and protection of virtual memory. 025 */ 026 @Uninterruptible public final class Mmapper implements Constants { 027 028 /**************************************************************************** 029 * Constants 030 */ 031 032 /** 033 * 034 */ 035 public static final byte UNMAPPED = 0; 036 public static final byte MAPPED = 1; 037 public static final byte PROTECTED = 2; // mapped but not accessible 038 public static final int LOG_MMAP_CHUNK_BYTES = 20; 039 public static final int MMAP_CHUNK_BYTES = 1 << LOG_MMAP_CHUNK_BYTES; // the granularity VMResource operates at 040 //TODO: 64-bit: this is not OK: value does not fit in int, but should, we do not want to create such big array 041 private static final int MMAP_CHUNK_MASK = MMAP_CHUNK_BYTES - 1; 042 private static final int MMAP_NUM_CHUNKS = 1 << (Constants.LOG_BYTES_IN_ADDRESS_SPACE - LOG_MMAP_CHUNK_BYTES); 043 public static final boolean verbose = false; 044 045 /**************************************************************************** 046 * Class variables 047 */ 048 049 /** 050 * 051 */ 052 public static final Lock lock = VM.newLock("Mmapper"); 053 private static byte[] mapped; 054 055 056 /**************************************************************************** 057 * Initialization 058 */ 059 060 /** 061 * Class initializer. This is executed <i>prior</i> to bootstrap 062 * (i.e. at "build" time). 063 */ 064 static { 065 mapped = new byte[MMAP_NUM_CHUNKS]; 066 for (int c = 0; c < MMAP_NUM_CHUNKS; c++) { 067 mapped[c] = UNMAPPED; 068 } 069 } 070 071 /**************************************************************************** 072 * Generic mmap and protection functionality 073 */ 074 075 /** 076 * Given an address array describing the regions of virtual memory to be used 077 * by MMTk, demand zero map all of them if they are not already mapped. 078 * 079 * @param spaceMap An address array containing a pairs of start and end 080 * addresses for each of the regions to be mappe3d 081 */ 082 public static void eagerlyMmapAllSpaces(AddressArray spaceMap) { 083 084 /*for (int i = 0; i < spaceMap.length() / 2; i++) { 085 Address regionStart = spaceMap.get(i * 2); 086 Address regionEnd = spaceMap.get(i * 2 + 1); 087 Log.write(regionStart); Log.write(" "); Log.writeln(regionEnd); 088 if (regionEnd.EQ(Address.zero()) || regionStart.EQ(Address.fromIntSignExtend(-1)) ||regionEnd.EQ(Address.fromIntSignExtend(-1))) 089 break; 090 if (VM.VERIFY_ASSERTIONS) { 091 VM.assertions._assert(regionStart.EQ(chunkAlignDown(regionStart))); 092 VM.assertions._assert(regionEnd.EQ(chunkAlignDown(regionEnd))); 093 } 094 int pages = Conversions.bytesToPages(regionEnd.diff(regionStart)); 095 ensureMapped(regionStart, pages); 096 }*/ 097 } 098 099 /** 100 * Mark a range of pages as having (already) been mapped. This is useful 101 * where the VM has performed the mapping of the pages itself. 102 * 103 * @param start The start of the range to be marked as mapped 104 * @param bytes The size of the range, in bytes. 105 */ 106 public static void markAsMapped(Address start, int bytes) { 107 int startChunk = Conversions.addressToMmapChunksDown(start); 108 int endChunk = Conversions.addressToMmapChunksUp(start.plus(bytes)); 109 for (int i = startChunk; i <= endChunk; i++) 110 mapped[i] = MAPPED; 111 } 112 113 /** 114 * Ensure that a range of pages is mmapped (or equivalent). If the 115 * pages are not yet mapped, demand-zero map them. Note that mapping 116 * occurs at chunk granularity, not page granularity.<p> 117 * 118 * NOTE: There is a monotonicity assumption so that only updates require lock 119 * acquisition. 120 * TODO: Fix the above to support unmapping. 121 * 122 * @param start The start of the range to be mapped. 123 * @param pages The size of the range to be mapped, in pages 124 */ 125 public static void ensureMapped(Address start, int pages) { 126 int startChunk = Conversions.addressToMmapChunksDown(start); 127 int endChunk = Conversions.addressToMmapChunksUp(start.plus(Conversions.pagesToBytes(pages))); 128 for (int chunk = startChunk; chunk < endChunk; chunk++) { 129 if (mapped[chunk] == MAPPED) continue; 130 Address mmapStart = Conversions.mmapChunksToAddress(chunk); 131 lock.acquire(); 132 // Log.writeln(mmapStart); 133 // might have become MAPPED here 134 if (mapped[chunk] == UNMAPPED) { 135 int errno = VM.memory.dzmmap(mmapStart, MMAP_CHUNK_BYTES); 136 if (errno != 0) { 137 lock.release(); 138 Log.write("ensureMapped failed with errno "); Log.write(errno); 139 Log.write(" on address "); Log.writeln(mmapStart); 140 VM.assertions.fail("Can't get more space with mmap()"); 141 } else { 142 if (verbose) { 143 Log.write("mmap succeeded at chunk "); Log.write(chunk); Log.write(" "); Log.write(mmapStart); 144 Log.write(" with len = "); Log.writeln(MMAP_CHUNK_BYTES); 145 } 146 } 147 } 148 if (mapped[chunk] == PROTECTED) { 149 if (!VM.memory.munprotect(mmapStart, MMAP_CHUNK_BYTES)) { 150 lock.release(); 151 VM.assertions.fail("Mmapper.ensureMapped (unprotect) failed"); 152 } else { 153 if (verbose) { 154 Log.write("munprotect succeeded at chunk "); Log.write(chunk); Log.write(" "); Log.write(mmapStart); 155 Log.write(" with len = "); Log.writeln(MMAP_CHUNK_BYTES); 156 } 157 } 158 } 159 mapped[chunk] = MAPPED; 160 lock.release(); 161 } 162 163 } 164 165 /** 166 * Memory protect a range of pages (using mprotect or equivalent). Note 167 * that protection occurs at chunk granularity, not page granularity. 168 * 169 * @param start The start of the range to be protected. 170 * @param pages The size of the range to be protected, in pages 171 */ 172 public static void protect(Address start, int pages) { 173 int startChunk = Conversions.addressToMmapChunksDown(start); 174 int chunks = Conversions.pagesToMmapChunksUp(pages); 175 int endChunk = startChunk + chunks; 176 lock.acquire(); 177 for (int chunk = startChunk; chunk < endChunk; chunk++) { 178 if (mapped[chunk] == MAPPED) { 179 Address mmapStart = Conversions.mmapChunksToAddress(chunk); 180 if (!VM.memory.mprotect(mmapStart, MMAP_CHUNK_BYTES)) { 181 lock.release(); 182 VM.assertions.fail("Mmapper.mprotect failed"); 183 } else { 184 if (verbose) { 185 Log.write("mprotect succeeded at chunk "); Log.write(chunk); Log.write(" "); Log.write(mmapStart); 186 Log.write(" with len = "); Log.writeln(MMAP_CHUNK_BYTES); 187 } 188 } 189 mapped[chunk] = PROTECTED; 190 } else { 191 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(mapped[chunk] == PROTECTED); 192 } 193 } 194 lock.release(); 195 } 196 197 /**************************************************************************** 198 * Utility functions 199 */ 200 201 /** 202 * Return {@code true} if the given address has been mmapped 203 * 204 * @param addr The address in question. 205 * @return {@code true} if the given address has been mmapped 206 */ 207 @Uninterruptible 208 public static boolean addressIsMapped(Address addr) { 209 int chunk = Conversions.addressToMmapChunksDown(addr); 210 return mapped[chunk] == MAPPED; 211 } 212 213 /** 214 * Return {@code true} if the given object has been mmapped 215 * 216 * @param object The object in question. 217 * @return {@code true} if the given object has been mmapped 218 */ 219 @Uninterruptible 220 public static boolean objectIsMapped(ObjectReference object) { 221 return addressIsMapped(VM.objectModel.refToAddress(object)); 222 } 223 224 /** 225 * Return a given address rounded up to an mmap chunk size 226 * 227 * @param addr The address to be aligned 228 * @return The given address rounded up to an mmap chunk size 229 */ 230 @SuppressWarnings("unused") // but might be useful someday 231 private static Address chunkAlignUp(Address addr) { 232 return chunkAlignDown(addr.plus(MMAP_CHUNK_MASK)); 233 } 234 235 /** 236 * Return a given address rounded down to an mmap chunk size 237 * 238 * @param addr The address to be aligned 239 * @return The given address rounded down to an mmap chunk size 240 */ 241 private static Address chunkAlignDown(Address addr) { 242 return addr.toWord().and(Word.fromIntSignExtend(MMAP_CHUNK_MASK).not()).toAddress(); 243 } 244 } 245