001 /* 002 * This file is part of the Jikes RVM project (http://jikesrvm.org). 003 * 004 * This file is licensed to You under the Eclipse Public License (EPL); 005 * You may not use this file except in compliance with the License. You 006 * may obtain a copy of the License at 007 * 008 * http://www.opensource.org/licenses/eclipse-1.0.php 009 * 010 * See the COPYRIGHT.txt file distributed with this work for information 011 * regarding copyright ownership. 012 */ 013 package org.mmtk.policy; 014 015 import org.mmtk.plan.Plan; 016 import org.mmtk.plan.TransitiveClosure; 017 import org.mmtk.utility.heap.Map; 018 import org.mmtk.utility.heap.Mmapper; 019 import org.mmtk.utility.heap.PageResource; 020 import org.mmtk.utility.heap.SpaceDescriptor; 021 import org.mmtk.utility.heap.VMRequest; 022 import org.mmtk.utility.options.Options; 023 import org.mmtk.utility.Log; 024 import org.mmtk.utility.Constants; 025 026 import org.mmtk.vm.VM; 027 028 import org.vmmagic.pragma.*; 029 import org.vmmagic.unboxed.*; 030 031 /** 032 * This class defines and manages spaces. Each policy is an instance 033 * of a space. A space is a region of virtual memory (contiguous or 034 * discontigous) which is subject to the same memory management 035 * regime. Multiple spaces (instances of this class or its 036 * descendants) may have the same policy (eg there could be numerous 037 * instances of CopySpace, each with different roles). Spaces are 038 * defined in terms of a unique region of virtual memory, so no two 039 * space instances ever share any virtual memory.<p> 040 * 041 * In addition to tracking virtual memory use and the mapping to 042 * policy, spaces also manage memory consumption (<i>used</i> virtual 043 * memory).<p> 044 * 045 */ 046 @Uninterruptible 047 public abstract class Space implements Constants { 048 049 /**************************************************************************** 050 * 051 * Class variables 052 */ 053 054 /** 055 * 056 */ 057 private static boolean DEBUG = false; 058 059 // the following is somewhat arbitrary for the 64 bit system at this stage 060 public static final int LOG_ADDRESS_SPACE = (BYTES_IN_ADDRESS == 4) ? 32 : 40; 061 public static final int LOG_BYTES_IN_CHUNK = 22; 062 public static final int BYTES_IN_CHUNK = 1 << LOG_BYTES_IN_CHUNK; 063 public static final int PAGES_IN_CHUNK = 1 << (LOG_BYTES_IN_CHUNK - LOG_BYTES_IN_PAGE); 064 private static final int LOG_MAX_CHUNKS = LOG_ADDRESS_SPACE - LOG_BYTES_IN_CHUNK; 065 public static final int MAX_CHUNKS = 1 << LOG_MAX_CHUNKS; 066 public static final int MAX_SPACES = 20; // quite arbitrary 067 068 public static final Address HEAP_START = chunkAlign(VM.HEAP_START, true); 069 public static final Address AVAILABLE_START = chunkAlign(VM.AVAILABLE_START, false); 070 public static final Address AVAILABLE_END = chunkAlign(VM.AVAILABLE_END, true); 071 public static final Extent AVAILABLE_BYTES = AVAILABLE_END.toWord().minus(AVAILABLE_START.toWord()).toExtent(); 072 public static final int AVAILABLE_PAGES = AVAILABLE_BYTES.toWord().rshl(LOG_BYTES_IN_PAGE).toInt(); 073 public static final Address HEAP_END = chunkAlign(VM.HEAP_END, false); 074 075 private static final boolean FORCE_SLOW_MAP_LOOKUP = false; 076 077 private static final int PAGES = 0; 078 private static final int MB = 1; 079 private static final int PAGES_MB = 2; 080 private static final int MB_PAGES = 3; 081 082 private static int spaceCount = 0; 083 private static Space[] spaces = new Space[MAX_SPACES]; 084 private static Address heapCursor = HEAP_START; 085 private static Address heapLimit = HEAP_END; 086 087 /**************************************************************************** 088 * 089 * Instance variables 090 */ 091 092 /** 093 * 094 */ 095 private final String name; 096 private final int nameLength; 097 protected final int descriptor; 098 private final int index; 099 private final VMRequest vmRequest; 100 101 protected final boolean immortal; 102 protected final boolean movable; 103 protected final boolean contiguous; 104 protected final boolean zeroed; 105 106 protected PageResource pr; 107 protected final Address start; 108 protected final Extent extent; 109 protected Address headDiscontiguousRegion; 110 111 /**************************************************************************** 112 * 113 * Initialization 114 */ 115 116 { 117 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(PAGES_IN_CHUNK > 1); 118 } 119 120 /** 121 * This is the base constructor for <i>all</i> spaces.<p> 122 * 123 * @param name The name of this space (used when printing error messages etc) 124 * @param movable Are objects in this space movable? 125 * @param immortal Are objects in this space immortal (uncollected)? 126 * @param zeroed if it is {@code true}, allocated memory is zeroed. 127 * @param vmRequest An object describing the virtual memory requested. 128 */ 129 protected Space(String name, boolean movable, boolean immortal, boolean zeroed, VMRequest vmRequest) { 130 this.name = name; 131 this.nameLength = name.length(); // necessary to avoid calling length() in uninterruptible code 132 this.movable = movable; 133 this.immortal = immortal; 134 this.zeroed = zeroed; 135 this.vmRequest = vmRequest; 136 this.index = spaceCount++; 137 spaces[index] = this; 138 139 if (vmRequest.type == VMRequest.REQUEST_DISCONTIGUOUS) { 140 this.contiguous = false; 141 this.descriptor = SpaceDescriptor.createDescriptor(); 142 this.start = Address.zero(); 143 this.extent = Extent.zero(); 144 this.headDiscontiguousRegion = Address.zero(); 145 VM.memory.setHeapRange(index, HEAP_START, HEAP_END); // this should really be refined! Once we have a code space, we can be a lot more specific about what is a valid code heap area 146 return; 147 } 148 149 Address start; 150 Extent extent; 151 152 if (vmRequest.type == VMRequest.REQUEST_FRACTION) { 153 extent = getFracAvailable(vmRequest.frac); 154 } else { 155 extent = vmRequest.extent; 156 } 157 158 if (extent.NE(chunkAlign(extent, false))) { 159 VM.assertions.fail(name + " requested non-aligned extent: " + extent.toLong() + " bytes"); 160 } 161 162 if (vmRequest.type == VMRequest.REQUEST_FIXED) { 163 start = vmRequest.start; 164 if (start.NE(chunkAlign(start, false))) { 165 VM.assertions.fail(name + " starting on non-aligned boundary: " + start.toLong() + " bytes"); 166 } 167 } else if (vmRequest.top) { 168 heapLimit = heapLimit.minus(extent); 169 start = heapLimit; 170 } else { 171 start = heapCursor; 172 heapCursor = heapCursor.plus(extent); 173 } 174 175 if (heapCursor.GT(heapLimit)) { 176 Log.write("Out of virtual address space allocating \""); 177 Log.write(name); Log.write("\" at "); 178 Log.write(heapCursor.minus(extent)); Log.write(" ("); 179 Log.write(heapCursor); Log.write(" > "); 180 Log.write(heapLimit); Log.writeln(")"); 181 VM.assertions.fail("exiting"); 182 } 183 184 this.contiguous = true; 185 this.start = start; 186 this.extent = extent; 187 this.descriptor = SpaceDescriptor.createDescriptor(start, start.plus(extent)); 188 189 VM.memory.setHeapRange(index, start, start.plus(extent)); 190 Map.insert(start, extent, descriptor, this); 191 192 if (DEBUG) { 193 Log.write(name); Log.write(" "); 194 Log.write(start); Log.write(" "); 195 Log.write(start.plus(extent)); Log.write(" "); 196 Log.writeln(extent.toWord()); 197 } 198 } 199 200 /**************************************************************************** 201 * 202 * Accessor methods 203 */ 204 205 /** Start of discontig getter @return The start of the discontiguous space */ 206 public static Address getDiscontigStart() { return heapCursor; } 207 208 /** End of discontig getter @return The end of the discontiguous space */ 209 public static Address getDiscontigEnd() { return heapLimit.minus(1); } 210 211 /** Name getter @return The name of this space */ 212 public final String getName() { return name; } 213 214 /** Start getter @return The start address of this space */ 215 public final Address getStart() { return start; } 216 217 /** Extent getter @return The size (extent) of this space */ 218 public final Extent getExtent() { return extent; } 219 220 /** Descriptor method @return The integer descriptor for this space */ 221 public final int getDescriptor() { return descriptor; } 222 223 /** Index getter @return The index (ordinal number) of this space */ 224 public final int getIndex() { return index; } 225 226 /** Immortal getter @return {@code true} if this space is never collected */ 227 public final boolean isImmortal() { return immortal; } 228 229 /** Movable getter @return {@code true} if objects in this space may move */ 230 public boolean isMovable() { return movable; } 231 232 /** ReservedPages getter @return The number of reserved pages */ 233 public final int reservedPages() { return pr.reservedPages(); } 234 235 /** CommittedPages getter @return The number of committed pages */ 236 public final int committedPages() { return pr.committedPages(); } 237 238 /** AvailablePages getter @return The number of pages available for allocation */ 239 public final int availablePhysicalPages() { return pr.getAvailablePhysicalPages(); } 240 241 /** Cumulative committed pages getter @return Cumulative committed pages. */ 242 public static long cumulativeCommittedPages() { 243 return PageResource.cumulativeCommittedPages(); 244 } 245 246 /**************************************************************************** 247 * 248 * Object and address tests / accessors 249 */ 250 251 /** 252 * Return {@code true} if the given object is in an immortal (uncollected) space. 253 * 254 * @param object The object in question 255 * @return {@code true} if the given object is in an immortal (uncollected) space. 256 */ 257 public static boolean isImmortal(ObjectReference object) { 258 Space space = getSpaceForObject(object); 259 if (space == null) 260 return true; 261 else 262 return space.isImmortal(); 263 } 264 265 /** 266 * Return {@code true} if the given object is in space that moves objects. 267 * 268 * @param object The object in question 269 * @return {@code true} if the given object is in space that moves objects. 270 */ 271 @Inline 272 public static boolean isMovable(ObjectReference object) { 273 Space space = getSpaceForObject(object); 274 if (space == null) 275 return true; 276 else 277 return space.isMovable(); 278 } 279 280 /** 281 * Return {@code true} if the given object is in a space managed by MMTk. 282 * 283 * @param object The object in question 284 * @return {@code true} if the given object is in a space managed by MMTk. 285 */ 286 @Inline 287 public static boolean isMappedObject(ObjectReference object) { 288 return !object.isNull() && (getSpaceForObject(object) != null) && Mmapper.objectIsMapped(object); 289 } 290 291 /** 292 * Return {@code true} if the given address is in a space managed by MMTk. 293 * 294 * @param address The address in question 295 * @return {@code true} if the given address is in a space managed by MMTk. 296 */ 297 @Inline 298 public static boolean isMappedAddress(Address address) { 299 return Map.getSpaceForAddress(address) != null && Mmapper.addressIsMapped(address); 300 } 301 302 /** 303 * Return {@code true} if the given object is the space associated with the 304 * given descriptor. 305 * 306 * @param descriptor The descriptor for a space 307 * @param object The object in question 308 * @return {@code true} if the given object is in the space associated with 309 * the descriptor. 310 */ 311 @Inline 312 public static boolean isInSpace(int descriptor, ObjectReference object) { 313 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!object.isNull()); 314 return isInSpace(descriptor, VM.objectModel.refToAddress(object)); 315 } 316 317 /** 318 * Return {@code true} if the given address is the space associated with the 319 * given descriptor. 320 * 321 * @param descriptor The descriptor for a space 322 * @param address The address in question. 323 * @return {@code true} if the given address is in the space associated with 324 * the descriptor. 325 */ 326 @Inline 327 public static boolean isInSpace(int descriptor, Address address) { 328 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!address.isZero()); 329 if (FORCE_SLOW_MAP_LOOKUP || !SpaceDescriptor.isContiguous(descriptor)) { 330 return Map.getDescriptorForAddress(address) == descriptor; 331 } else { 332 Address start = SpaceDescriptor.getStart(descriptor); 333 if (!VM.VERIFY_ASSERTIONS && 334 SpaceDescriptor.isContiguousHi(descriptor)) 335 return address.GE(start); 336 else { 337 Extent size = Word.fromIntSignExtend(SpaceDescriptor.getChunks(descriptor)).lsh(LOG_BYTES_IN_CHUNK).toExtent(); 338 Address end = start.plus(size); 339 return address.GE(start) && address.LT(end); 340 } 341 } 342 } 343 344 /** 345 * Return the space for a given object 346 * 347 * @param object The object in question 348 * @return The space containing the object 349 */ 350 @Inline 351 public static Space getSpaceForObject(ObjectReference object) { 352 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!object.isNull()); 353 return Map.getSpaceForAddress(VM.objectModel.refToAddress(object)); 354 } 355 356 /** 357 * Return the space for a given address, not necessarily the 358 * start address of an object. 359 * 360 * @param addr The address in question 361 * @return The space containing the address 362 */ 363 public static Space getSpaceForAddress(Address addr) { 364 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!addr.isZero()); 365 return Map.getSpaceForAddress(addr); 366 } 367 368 /**************************************************************************** 369 * 370 * Page management 371 */ 372 373 /** 374 * Update the zeroing approach for this space. 375 */ 376 @Interruptible 377 public void setZeroingApproach(boolean useNT, boolean concurrent) { 378 pr.updateZeroingApproach(useNT, concurrent); 379 } 380 381 /** 382 * Skip concurrent zeroing (fall back to bulk zeroing). 383 */ 384 public void skipConcurrentZeroing() { 385 pr.skipConcurrentZeroing(); 386 } 387 388 /** 389 * Trigger concurrent zeroing. 390 */ 391 public void triggerConcurrentZeroing() { 392 pr.triggerConcurrentZeroing(); 393 } 394 395 /** 396 * Acquire a number of pages from the page resource, returning 397 * either the address of the first page, or zero on failure.<p> 398 * 399 * This may trigger a GC if necessary.<p> 400 * 401 * First the page budget is checked to see whether polling the GC is 402 * necessary. If so, the GC is polled. If a GC is required then the 403 * request fails and zero is returned.<p> 404 * 405 * If the check of the page budget does not lead to GC being 406 * triggered, then a request is made for specific pages in virtual 407 * memory. If the page manager cannot satisify this request, then 408 * the request fails, a GC is forced, and zero is returned. 409 * Otherwise the address of the first page is returned.<p> 410 * 411 * @param pages The number of pages requested 412 * @return The start of the first page if successful, zero on 413 * failure. 414 */ 415 @LogicallyUninterruptible 416 public final Address acquire(int pages) { 417 boolean allowPoll = VM.activePlan.isMutator() && Plan.isInitialized(); 418 419 /* Check page budget */ 420 int pagesReserved = pr.reservePages(pages); 421 422 /* Poll, either fixing budget or requiring GC */ 423 if (allowPoll && VM.activePlan.global().poll(false, this)) { 424 pr.clearRequest(pagesReserved); 425 VM.collection.blockForGC(); 426 return Address.zero(); // GC required, return failure 427 } 428 429 /* Page budget is ok, try to acquire virtual memory */ 430 Address rtn = pr.getNewPages(pagesReserved, pages, zeroed); 431 if (rtn.isZero()) { 432 /* Failed, so force a GC */ 433 if (!allowPoll) VM.assertions.fail("Physical allocation failed when polling not allowed!"); 434 boolean gcPerformed = VM.activePlan.global().poll(true, this); 435 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(gcPerformed, "GC not performed when forced."); 436 pr.clearRequest(pagesReserved); 437 VM.collection.blockForGC(); 438 return Address.zero(); 439 } 440 441 return rtn; 442 } 443 444 /** 445 * Extend the virtual memory associated with a particular discontiguous 446 * space. This simply involves requesting a suitable number of chunks 447 * from the pool of chunks available to discontiguous spaces. 448 * 449 * @param chunks The number of chunks by which the space needs to be extended 450 * @return The address of the new discontiguous space. 451 */ 452 public Address growDiscontiguousSpace(int chunks) { 453 Address newHead = Map.allocateContiguousChunks(descriptor, this, chunks, headDiscontiguousRegion); 454 if (newHead.isZero()) { 455 return Address.zero(); 456 } 457 return headDiscontiguousRegion = newHead; 458 } 459 460 /** 461 * Return the number of chunks required to satisfy a request for a certain number of pages 462 * 463 * @param pages The number of pages desired 464 * @return The number of chunks needed to satisfy the request 465 */ 466 public static int requiredChunks(int pages) { 467 Extent extent = chunkAlign(Extent.fromIntZeroExtend(pages<<LOG_BYTES_IN_PAGE), false); 468 return extent.toWord().rshl(LOG_BYTES_IN_CHUNK).toInt(); 469 } 470 471 /** 472 * This hook is called by page resources each time a space grows. The space may 473 * tap into the hook to monitor heap growth. The call is made from within the 474 * page resources' critical region, immediately before yielding the lock. 475 * 476 * @param start The start of the newly allocated space 477 * @param bytes The size of the newly allocated space 478 * @param newChunk {@code true} if the new space encroached upon or started a new chunk or chunks. 479 */ 480 public void growSpace(Address start, Extent bytes, boolean newChunk) {} 481 482 /** 483 * Release one or more contiguous chunks associated with a discontiguous 484 * space. 485 * 486 * @param chunk The address of the start of the contiguous chunk or chunks 487 * @return The number of chunks freed 488 */ 489 public int releaseDiscontiguousChunks(Address chunk) { 490 if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(chunk.EQ(chunkAlign(chunk, true))); 491 if (chunk.EQ(headDiscontiguousRegion)) { 492 headDiscontiguousRegion = Map.getNextContiguousRegion(chunk); 493 } 494 return Map.freeContiguousChunks(chunk); 495 } 496 497 /** 498 * @return The address of the head of the discontiguous chunk map. 499 */ 500 public Address getHeadDiscontiguousRegion() { 501 return headDiscontiguousRegion; 502 } 503 504 public void releaseAllChunks() { 505 Map.freeAllChunks(headDiscontiguousRegion); 506 headDiscontiguousRegion = Address.zero(); 507 } 508 509 /** 510 * Release a unit of allocation (a page or pages) 511 * 512 * @param start The address of the start of the region to be released 513 */ 514 public abstract void release(Address start); 515 516 /** 517 * Get the total number of pages reserved by all of the spaces 518 * 519 * @return the total number of pages reserved by all of the spaces 520 */ 521 private static int getPagesReserved() { 522 int pages = 0; 523 for (int i = 0; i < spaceCount; i++) { 524 pages += spaces[i].reservedPages(); 525 } 526 return pages; 527 } 528 529 /**************************************************************************** 530 * 531 * Debugging / printing 532 */ 533 534 /** 535 * Print out the memory used by all spaces, in megabytes 536 */ 537 public static void printUsageMB() { printUsage(MB); } 538 539 /** 540 * Print out the memory used by all spaces, in megabytes 541 */ 542 public static void printUsagePages() { printUsage(PAGES); } 543 544 /** 545 * Print out a map of virtual memory useage by all spaces 546 */ 547 public static void printVMMap() { 548 Log.writeln("Key: (I)mmortal (N)onmoving (D)iscontiguous (E)xtent (F)raction"); 549 Log.write(" HEAP_START "); Log.writeln(HEAP_START); 550 Log.write("AVAILABLE_START "); Log.writeln(AVAILABLE_START); 551 for (int i = 0; i < spaceCount; i++) { 552 Space space = spaces[i]; 553 554 for (int s = 0; s < 11 - space.nameLength; s++) 555 Log.write(" "); 556 Log.write(space.name); Log.write(" "); 557 Log.write(space.immortal ? "I" : " "); 558 Log.write(space.movable ? " " : "N"); 559 560 if (space.contiguous) { 561 Log.write(" "); 562 Log.write(space.start); Log.write("->"); 563 Log.write(space.start.plus(space.extent.minus(1))); 564 if (space.vmRequest.type == VMRequest.REQUEST_EXTENT) { 565 Log.write(" E "); Log.write(space.vmRequest.extent); 566 } else if (space.vmRequest.type == VMRequest.REQUEST_FRACTION) { 567 Log.write(" F "); Log.write(space.vmRequest.frac); 568 } 569 Log.writeln(); 570 } else { 571 Log.write("D ["); 572 for(Address a = space.headDiscontiguousRegion; !a.isZero(); a = Map.getNextContiguousRegion(a)) { 573 Log.write(a); Log.write("->"); 574 Log.write(a.plus(Map.getContiguousRegionSize(a).minus(1))); 575 if (Map.getNextContiguousRegion(a) != Address.zero()) 576 Log.write(", "); 577 } 578 Log.writeln("]"); 579 } 580 } 581 Log.write(" AVAILABLE_END "); Log.writeln(AVAILABLE_END); 582 Log.write(" HEAP_END "); Log.writeln(HEAP_END); 583 } 584 585 /** 586 * Interface to use to implement the Visitor Pattern for Spaces. 587 */ 588 public static interface SpaceVisitor { 589 void visit(Space s); 590 } 591 592 /** 593 * Implement the Visitor Pattern for Spaces. 594 * @param v The visitor to perform on each Space instance 595 */ 596 @Interruptible 597 public static void visitSpaces(SpaceVisitor v) { 598 for (int i = 0; i < spaceCount; i++) { 599 v.visit(spaces[i]); 600 } 601 } 602 603 604 /** 605 * Ensure that all MMTk spaces (all spaces aside from the VM space) 606 * are mapped. Demand zero map all of them if they are not already 607 * mapped. 608 */ 609 @Interruptible 610 public static void eagerlyMmapMMTkSpaces() { 611 eagerlyMmapMMTkContiguousSpaces(); 612 eagerlyMmapMMTkDiscontiguousSpaces(); 613 } 614 615 616 /** 617 * Ensure that all contiguous MMTk spaces are mapped. Demand zero map 618 * all of them if they are not already mapped. 619 */ 620 @Interruptible 621 public static void eagerlyMmapMMTkContiguousSpaces() { 622 for (int i = 0; i < spaceCount; i++) { 623 Space space = spaces[i]; 624 if (space != VM.memory.getVMSpace()) { 625 if (Options.verbose.getValue() > 2) { 626 Log.write("Mapping "); 627 Log.write(space.name); 628 Log.write(" "); 629 Log.write(space.start); 630 Log.write("->"); 631 Log.writeln(space.start.plus(space.extent.minus(1))); 632 } 633 Mmapper.ensureMapped(space.start, space.extent.toInt()>>LOG_BYTES_IN_PAGE); 634 } 635 } 636 } 637 638 /** 639 * Ensure that all discontiguous MMTk spaces are mapped. Demand zero map 640 * all of them if they are not already mapped. 641 */ 642 @Interruptible 643 public static void eagerlyMmapMMTkDiscontiguousSpaces() { 644 Address regionStart = Space.getDiscontigStart(); 645 Address regionEnd = Space.getDiscontigEnd(); 646 int pages = regionEnd.diff(regionStart).toInt()>>LOG_BYTES_IN_PAGE; 647 Log.write("Mapping discontiguous spaces "); 648 Log.write(regionStart); 649 Log.write("->"); 650 Log.writeln(regionEnd.minus(1)); 651 Mmapper.ensureMapped(getDiscontigStart(), pages); 652 } 653 654 /** 655 * Print out the memory used by all spaces in either megabytes or 656 * pages. 657 * 658 * @param mode An enumeration type that specifies the format for the 659 * prining (PAGES, MB, PAGES_MB, or MB_PAGES). 660 */ 661 private static void printUsage(int mode) { 662 Log.write("used = "); 663 printPages(getPagesReserved(), mode); 664 boolean first = true; 665 for (int i = 0; i < spaceCount; i++) { 666 Space space = spaces[i]; 667 Log.write(first ? " = " : " + "); 668 first = false; 669 Log.write(space.name); Log.write(" "); 670 printPages(space.reservedPages(), mode); 671 } 672 Log.writeln(); 673 } 674 675 /** 676 * Print out the number of pages and or megabytes, depending on the mode. 677 * 678 * @param pages The number of pages 679 * @param mode An enumeration type that specifies the format for the 680 * printing (PAGES, MB, PAGES_MB, or MB_PAGES). 681 */ 682 private static void printPages(int pages, int mode) { 683 double mb = (double) (pages << LOG_BYTES_IN_PAGE) / (double) (1 << 20); 684 switch (mode) { 685 case PAGES: Log.write(pages); Log.write(" pgs"); break; 686 case MB: Log.write(mb); Log.write(" Mb"); break; 687 case PAGES_MB: Log.write(pages); Log.write(" pgs ("); Log.write(mb); Log.write(" Mb)"); break; 688 case MB_PAGES: Log.write(mb); Log.write(" Mb ("); Log.write(pages); Log.write(" pgs)"); break; 689 default: VM.assertions.fail("writePages passed illegal printing mode"); 690 } 691 } 692 693 /**************************************************************************** 694 * 695 * Miscellaneous 696 */ 697 698 /** 699 * Trace an object as part of a collection and return the object, 700 * which may have been forwarded (if a copying collector). 701 * 702 * @param trace The trace being conducted. 703 * @param object The object to trace 704 * @return The object, forwarded, if appropriate 705 */ 706 public abstract ObjectReference traceObject(TransitiveClosure trace, ObjectReference object); 707 708 709 /** 710 * Has the object in this space been reached during the current collection. 711 * This is used for GC Tracing. 712 * 713 * @param object The object reference. 714 * @return {@code true} if the object is reachable. 715 */ 716 public boolean isReachable(ObjectReference object) { 717 return isLive(object); 718 } 719 720 721 /** 722 * Is the object in this space alive? 723 * 724 * @param object The object reference. 725 * @return {@code true} if the object is live. 726 */ 727 public abstract boolean isLive(ObjectReference object); 728 729 /** 730 * Align an address to a space chunk 731 * 732 * @param addr The address to be aligned 733 * @param down If {@code true} the address will be rounded down, otherwise 734 * it will rounded up. 735 * @return The chunk-aligned address 736 */ 737 public static Address chunkAlign(Address addr, boolean down) { 738 if (!down) addr = addr.plus(BYTES_IN_CHUNK - 1); 739 return addr.toWord().rshl(LOG_BYTES_IN_CHUNK).lsh(LOG_BYTES_IN_CHUNK).toAddress(); 740 } 741 742 /** 743 * Align an extent to a space chunk 744 * 745 * @param bytes The extent to be aligned 746 * @param down If {@code true} the extent will be rounded down, otherwise 747 * it will rounded up. 748 * @return The chunk-aligned extent 749 */ 750 public static Extent chunkAlign(Extent bytes, boolean down) { 751 if (!down) bytes = bytes.plus(BYTES_IN_CHUNK - 1); 752 return bytes.toWord().rshl(LOG_BYTES_IN_CHUNK).lsh(LOG_BYTES_IN_CHUNK).toExtent(); 753 } 754 755 /** 756 * Convert a fraction into a number of bytes according to the 757 * fraction of available bytes. 758 * 759 * @param frac The fraction of available virtual memory desired 760 * @return The corresponding number of bytes, chunk-aligned. 761 */ 762 public static Extent getFracAvailable(float frac) { 763 long bytes = (long) (frac * AVAILABLE_BYTES.toLong()); 764 Word mb = Word.fromIntSignExtend((int) (bytes >> LOG_BYTES_IN_MBYTE)); 765 Extent rtn = mb.lsh(LOG_BYTES_IN_MBYTE).toExtent(); 766 return chunkAlign(rtn, false); 767 } 768 }