use of org.vmmagic.pragma.Inline in project JikesRVM by JikesRVM.
the class FreeListPageResource method allocPages.
/**
* Allocate <code>pages</code> pages from this resource.<p>
*
* If the request can be satisfied, then ensure the pages are
* mmpapped and zeroed before returning the address of the start of
* the region. If the request cannot be satisfied, return zero.
*
* @param reservedPages The number of pages reserved due to the initial request.
* @param requiredPages The number of pages required to be allocated.
* @param zeroed If true allocated pages are zeroed.
* @return The start of the first page if successful, zero on
* failure.
*/
@Override
@Inline
protected Address allocPages(int reservedPages, int requiredPages, boolean zeroed) {
if (VM.VERIFY_ASSERTIONS)
VM.assertions._assert(metaDataPagesPerRegion == 0 || requiredPages <= PAGES_IN_CHUNK - metaDataPagesPerRegion);
lock();
boolean newChunk = false;
int pageOffset = freeList.alloc(requiredPages);
if (pageOffset == GenericFreeList.FAILURE && growable) {
pageOffset = allocateContiguousChunks(requiredPages);
newChunk = true;
}
if (pageOffset == GenericFreeList.FAILURE) {
unlock();
return Address.zero();
} else {
pagesCurrentlyOnFreeList -= requiredPages;
if (pageOffset > highWaterMark) {
if (highWaterMark == 0 || (pageOffset ^ highWaterMark) > EmbeddedMetaData.PAGES_IN_REGION) {
int regions = 1 + ((pageOffset - highWaterMark) >> EmbeddedMetaData.LOG_PAGES_IN_REGION);
int metapages = regions * metaDataPagesPerRegion;
reserved += metapages;
committed += metapages;
newChunk = true;
}
highWaterMark = pageOffset;
}
Address rtn = start.plus(Conversions.pagesToBytes(pageOffset));
Extent bytes = Conversions.pagesToBytes(requiredPages);
// The meta-data portion of reserved Pages was committed above.
commitPages(reservedPages, requiredPages);
space.growSpace(rtn, bytes, newChunk);
unlock();
HeapLayout.mmapper.ensureMapped(rtn, requiredPages);
if (zeroed)
VM.memory.zero(zeroNT, rtn, bytes);
VM.events.tracePageAcquired(space, rtn, requiredPages);
return rtn;
}
}
use of org.vmmagic.pragma.Inline in project JikesRVM by JikesRVM.
the class MonotonePageResource method allocPages.
/**
* Allocate <code>pages</code> pages from this resource. Simply
* bump the cursor, and fail if we hit the sentinel.<p>
*
* If the request can be satisfied, then ensure the pages are
* mmpapped and zeroed before returning the address of the start of
* the region. If the request cannot be satisfied, return zero.
*
* @param reservedPages The number of pages reserved due to the initial request.
* @param requiredPages The number of pages required to be allocated.
* @return The start of the first page if successful, zero on
* failure.
*/
@Override
@Inline
protected Address allocPages(int reservedPages, int requiredPages, boolean zeroed) {
boolean newChunk = false;
lock();
Address rtn = cursor;
if (VM.VERIFY_ASSERTIONS) {
/*
* Cursor should always be zero, or somewhere in the current chunk. If we have just
* allocated exactly enough pages to exhaust the current chunk, then cursor can point
* to the next chunk.
*/
if (currentChunk.GT(cursor) || (Conversions.chunkAlign(cursor, true).NE(currentChunk) && Conversions.chunkAlign(cursor, true).NE(currentChunk.plus(VMLayoutConstants.BYTES_IN_CHUNK)))) {
logChunkFields("MonotonePageResource.allocPages:fail");
}
VM.assertions._assert(currentChunk.LE(cursor));
VM.assertions._assert(cursor.isZero() || Conversions.chunkAlign(cursor, true).EQ(currentChunk) || Conversions.chunkAlign(cursor, true).EQ(currentChunk.plus(VMLayoutConstants.BYTES_IN_CHUNK)));
}
if (metaDataPagesPerRegion != 0) {
/* adjust allocation for metadata */
Address regionStart = getRegionStart(cursor.plus(Conversions.pagesToBytes(requiredPages)));
Offset regionDelta = regionStart.diff(cursor);
if (regionDelta.sGE(Offset.zero())) {
/* start new region, so adjust pages and return address accordingly */
requiredPages += Conversions.bytesToPages(regionDelta) + metaDataPagesPerRegion;
rtn = regionStart.plus(Conversions.pagesToBytes(metaDataPagesPerRegion));
}
}
Extent bytes = Conversions.pagesToBytes(requiredPages);
Address tmp = cursor.plus(bytes);
if (!contiguous && tmp.GT(sentinel)) {
/* we're out of virtual memory within our discontiguous region, so ask for more */
int requiredChunks = Space.requiredChunks(requiredPages);
// Returns zero on failure
currentChunk = space.growDiscontiguousSpace(requiredChunks);
cursor = currentChunk;
sentinel = cursor.plus(currentChunk.isZero() ? 0 : requiredChunks << VMLayoutConstants.LOG_BYTES_IN_CHUNK);
rtn = cursor;
tmp = cursor.plus(bytes);
newChunk = true;
}
if (VM.VERIFY_ASSERTIONS)
VM.assertions._assert(rtn.GE(cursor) && rtn.LT(cursor.plus(bytes)));
if (tmp.GT(sentinel)) {
unlock();
return Address.zero();
} else {
Address old = cursor;
cursor = tmp;
/* In a contiguous space we can bump along into the next chunk, so preserve the currentChunk invariant */
if (contiguous && Conversions.chunkAlign(cursor, true).NE(currentChunk)) {
currentChunk = Conversions.chunkAlign(cursor, true);
}
commitPages(reservedPages, requiredPages);
space.growSpace(old, bytes, newChunk);
unlock();
HeapLayout.mmapper.ensureMapped(old, requiredPages);
if (zeroed) {
if (!zeroConcurrent) {
VM.memory.zero(zeroNT, old, bytes);
} else {
while (cursor.GT(zeroingCursor)) ;
}
}
VM.events.tracePageAcquired(space, rtn, requiredPages);
return rtn;
}
}
use of org.vmmagic.pragma.Inline in project JikesRVM by JikesRVM.
the class BumpPointer method scanRegion.
/**
* Perform a linear scan through a single contiguous region
*
* @param scanner The scan object to delegate to.
* @param start The start of this region
*/
@Inline
private void scanRegion(LinearScan scanner, Address start) {
/* Get the end of this region */
Address dataEnd = start.plus(DATA_END_OFFSET).loadAddress();
/* dataEnd = zero represents the current region. */
Address currentLimit = (dataEnd.isZero() ? cursor : dataEnd);
if (currentLimit.EQ(start.plus(DATA_END_OFFSET).plus(BYTES_IN_ADDRESS))) {
/* Empty region, so we can not call getObjectFromStartAddress() */
return;
}
ObjectReference current = VM.objectModel.getObjectFromStartAddress(start.plus(DATA_START_OFFSET));
/* Loop through each object up to the limit */
do {
/* Read end address first, as scan may be destructive */
Address currentObjectEnd = VM.objectModel.getObjectEndAddress(current);
scanner.scan(current);
if (currentObjectEnd.GE(currentLimit)) {
/* We have scanned the last object */
break;
}
/* Find the next object from the start address (dealing with alignment gaps, etc.) */
ObjectReference next = VM.objectModel.getObjectFromStartAddress(currentObjectEnd);
if (VM.VERIFY_ASSERTIONS) {
/* Must be monotonically increasing */
VM.assertions._assert(next.toAddress().GT(current.toAddress()));
}
current = next;
} while (true);
}
use of org.vmmagic.pragma.Inline in project JikesRVM by JikesRVM.
the class BumpPointer method alloc.
/**
* Allocate space for a new object. This is frequently executed code and
* the coding is deliberately sensitive to the optimizing compiler.
* After changing this, always check the IR/MC that is generated.
*
* @param bytes The number of bytes allocated
* @param align The requested alignment
* @param offset The offset from the alignment
* @return The address of the first byte of the allocated region
*/
@Inline
public final Address alloc(int bytes, int align, int offset) {
Address start = alignAllocationNoFill(cursor, align, offset);
Address end = start.plus(bytes);
if (end.GT(internalLimit))
return allocSlow(start, end, align, offset);
fillAlignmentGap(cursor, start);
cursor = end;
end.plus(SIZE_OF_TWO_X86_CACHE_LINES_IN_BYTES).prefetch();
return start;
}
use of org.vmmagic.pragma.Inline in project JikesRVM by JikesRVM.
the class SharedDeque method alloc.
@Inline
final Address alloc() {
Address rtn = rps.acquire(PAGES_PER_BUFFER);
if (rtn.isZero()) {
Space.printUsageMB();
VM.assertions.fail("Failed to allocate space for queue. Is metadata virtual memory exhausted?");
}
if (VM.VERIFY_ASSERTIONS)
VM.assertions._assert(rtn.EQ(bufferStart(rtn)));
return rtn;
}
Aggregations