Search in sources :

Example 16 with Extent

use of org.vmmagic.unboxed.Extent in project JikesRVM by JikesRVM.

the class RawMemoryFreeList method raiseHighWater.

/**
 * Raise the high water mark by requesting more pages from the OS
 * @param blocks
 */
private void raiseHighWater(int blocks) {
    Extent growExtent = Conversions.pagesToBytes(pagesPerBlock * blocks);
    if (highWater.EQ(limit)) {
        Log.write("limit=", limit);
        Log.write(", highWater=", highWater);
        Log.writeln(", growExtent=", growExtent);
        VM.assertions.fail("Attempt to grow FreeList beyond limit");
    }
    if (highWater.plus(growExtent).GT(limit)) {
        growExtent = highWater.diff(limit).toWord().toExtent();
    }
    mmap(highWater, growExtent);
    highWater = highWater.plus(growExtent);
}
Also used : Extent(org.vmmagic.unboxed.Extent)

Example 17 with Extent

use of org.vmmagic.unboxed.Extent in project JikesRVM by JikesRVM.

the class Map64 method createFreeList.

@Override
@Interruptible
public GenericFreeList createFreeList(FreeListPageResource pr, int units, int grain) {
    Space space = pr.getSpace();
    Address start = space.getStart();
    Extent extent = space.getExtent();
    int index = spaceIndex(start);
    units = (int) (units * NON_MAP_FRACTION);
    Extent listExtent = Conversions.pagesToBytes(RawMemoryFreeList.sizeInPages(units, 1));
    if (VMLayoutConstants.VERBOSE_BUILD) {
        Log.write("Allocating free list for space ");
        Log.write(space.getName());
        Log.write(", start = ", start);
        Log.write(", extent = ", extent);
        Log.write(", units = ", units);
        Log.write("  listPages = ", RawMemoryFreeList.sizeInPages(units, 1));
        Log.writeln(", listExtent = ", listExtent);
    }
    RawMemoryFreeList list = new RawMemoryFreeList(start, start.plus(listExtent), units, grain);
    flPageResources[index] = pr;
    flMap[index] = list;
    /* Adjust the base address and highwater to account for the allocated chunks for the map */
    Address base = Conversions.chunkAlign(start.plus(listExtent), false);
    highWater.set(index, base);
    baseAddress.set(index, base);
    return list;
}
Also used : Space(org.mmtk.policy.Space) RawMemoryFreeList(org.mmtk.utility.RawMemoryFreeList) Address(org.vmmagic.unboxed.Address) Extent(org.vmmagic.unboxed.Extent) Interruptible(org.vmmagic.pragma.Interruptible)

Example 18 with Extent

use of org.vmmagic.unboxed.Extent in project JikesRVM by JikesRVM.

the class Map64 method allocateContiguousChunks.

/**
 * Allocate some number of contiguous chunks within a discontiguous region.  In a 64-bit
 * model, this involves extending a contiguous region, using 'head' as the address
 * of the highest chunk allocated.
 *
 * @param descriptor The descriptor for the space to which these chunks will be assigned
 * @param space The space to which these chunks will be assigned
 * @param chunks The number of chunks required
 * @param head The previous contiguous set of chunks for this space (to create a linked list of contiguous regions for each space)
 * @return The address of the assigned memory.  If the request fails we return Address.zero().
 */
@Override
public Address allocateContiguousChunks(int descriptor, Space space, int chunks, Address head) {
    if (VM.VERIFY_ASSERTIONS)
        VM.assertions._assert(spaceIndex(space.getStart()) == SpaceDescriptor.getIndex(descriptor));
    int index = SpaceDescriptor.getIndex(descriptor);
    Address rtn = highWater.get(index);
    Extent extent = Extent.fromIntZeroExtend(chunks << LOG_BYTES_IN_CHUNK);
    highWater.set(index, rtn.plus(extent));
    /* Grow the free list to accommodate the new chunks */
    RawMemoryFreeList freeList = flMap[spaceIndex(space.getStart())];
    if (freeList != null) {
        freeList.growFreeList(Conversions.bytesToPages(extent));
        int basePage = Conversions.bytesToPages(rtn.diff(baseAddress.get(index)));
        for (int offset = 0; offset < chunks * PAGES_IN_CHUNK; offset += PAGES_IN_CHUNK) {
            freeList.setUncoalescable(basePage + offset);
            /* The 32-bit implementation requires that pages are returned allocated to the caller */
            freeList.alloc(PAGES_IN_CHUNK, basePage + offset);
        }
    }
    return rtn;
}
Also used : RawMemoryFreeList(org.mmtk.utility.RawMemoryFreeList) Address(org.vmmagic.unboxed.Address) Extent(org.vmmagic.unboxed.Extent)

Example 19 with Extent

use of org.vmmagic.unboxed.Extent in project JikesRVM by JikesRVM.

the class FreeListPageResource method allocPages.

/**
 * Allocate <code>pages</code> pages from this resource.<p>
 *
 * If the request can be satisfied, then ensure the pages are
 * mmpapped and zeroed before returning the address of the start of
 * the region.  If the request cannot be satisfied, return zero.
 *
 * @param reservedPages The number of pages reserved due to the initial request.
 * @param requiredPages The number of pages required to be allocated.
 * @param zeroed If true allocated pages are zeroed.
 * @return The start of the first page if successful, zero on
 * failure.
 */
@Override
@Inline
protected Address allocPages(int reservedPages, int requiredPages, boolean zeroed) {
    if (VM.VERIFY_ASSERTIONS)
        VM.assertions._assert(metaDataPagesPerRegion == 0 || requiredPages <= PAGES_IN_CHUNK - metaDataPagesPerRegion);
    lock();
    boolean newChunk = false;
    int pageOffset = freeList.alloc(requiredPages);
    if (pageOffset == GenericFreeList.FAILURE && growable) {
        pageOffset = allocateContiguousChunks(requiredPages);
        newChunk = true;
    }
    if (pageOffset == GenericFreeList.FAILURE) {
        unlock();
        return Address.zero();
    } else {
        pagesCurrentlyOnFreeList -= requiredPages;
        if (pageOffset > highWaterMark) {
            if (highWaterMark == 0 || (pageOffset ^ highWaterMark) > EmbeddedMetaData.PAGES_IN_REGION) {
                int regions = 1 + ((pageOffset - highWaterMark) >> EmbeddedMetaData.LOG_PAGES_IN_REGION);
                int metapages = regions * metaDataPagesPerRegion;
                reserved += metapages;
                committed += metapages;
                newChunk = true;
            }
            highWaterMark = pageOffset;
        }
        Address rtn = start.plus(Conversions.pagesToBytes(pageOffset));
        Extent bytes = Conversions.pagesToBytes(requiredPages);
        // The meta-data portion of reserved Pages was committed above.
        commitPages(reservedPages, requiredPages);
        space.growSpace(rtn, bytes, newChunk);
        unlock();
        HeapLayout.mmapper.ensureMapped(rtn, requiredPages);
        if (zeroed)
            VM.memory.zero(zeroNT, rtn, bytes);
        VM.events.tracePageAcquired(space, rtn, requiredPages);
        return rtn;
    }
}
Also used : Address(org.vmmagic.unboxed.Address) Extent(org.vmmagic.unboxed.Extent) Inline(org.vmmagic.pragma.Inline)

Example 20 with Extent

use of org.vmmagic.unboxed.Extent in project JikesRVM by JikesRVM.

the class MonotonePageResource method allocPages.

/**
 * Allocate <code>pages</code> pages from this resource.  Simply
 * bump the cursor, and fail if we hit the sentinel.<p>
 *
 * If the request can be satisfied, then ensure the pages are
 * mmpapped and zeroed before returning the address of the start of
 * the region.  If the request cannot be satisfied, return zero.
 *
 * @param reservedPages The number of pages reserved due to the initial request.
 * @param requiredPages The number of pages required to be allocated.
 * @return The start of the first page if successful, zero on
 * failure.
 */
@Override
@Inline
protected Address allocPages(int reservedPages, int requiredPages, boolean zeroed) {
    boolean newChunk = false;
    lock();
    Address rtn = cursor;
    if (VM.VERIFY_ASSERTIONS) {
        /*
       * Cursor should always be zero, or somewhere in the current chunk.  If we have just
       * allocated exactly enough pages to exhaust the current chunk, then cursor can point
       * to the next chunk.
       */
        if (currentChunk.GT(cursor) || (Conversions.chunkAlign(cursor, true).NE(currentChunk) && Conversions.chunkAlign(cursor, true).NE(currentChunk.plus(VMLayoutConstants.BYTES_IN_CHUNK)))) {
            logChunkFields("MonotonePageResource.allocPages:fail");
        }
        VM.assertions._assert(currentChunk.LE(cursor));
        VM.assertions._assert(cursor.isZero() || Conversions.chunkAlign(cursor, true).EQ(currentChunk) || Conversions.chunkAlign(cursor, true).EQ(currentChunk.plus(VMLayoutConstants.BYTES_IN_CHUNK)));
    }
    if (metaDataPagesPerRegion != 0) {
        /* adjust allocation for metadata */
        Address regionStart = getRegionStart(cursor.plus(Conversions.pagesToBytes(requiredPages)));
        Offset regionDelta = regionStart.diff(cursor);
        if (regionDelta.sGE(Offset.zero())) {
            /* start new region, so adjust pages and return address accordingly */
            requiredPages += Conversions.bytesToPages(regionDelta) + metaDataPagesPerRegion;
            rtn = regionStart.plus(Conversions.pagesToBytes(metaDataPagesPerRegion));
        }
    }
    Extent bytes = Conversions.pagesToBytes(requiredPages);
    Address tmp = cursor.plus(bytes);
    if (!contiguous && tmp.GT(sentinel)) {
        /* we're out of virtual memory within our discontiguous region, so ask for more */
        int requiredChunks = Space.requiredChunks(requiredPages);
        // Returns zero on failure
        currentChunk = space.growDiscontiguousSpace(requiredChunks);
        cursor = currentChunk;
        sentinel = cursor.plus(currentChunk.isZero() ? 0 : requiredChunks << VMLayoutConstants.LOG_BYTES_IN_CHUNK);
        rtn = cursor;
        tmp = cursor.plus(bytes);
        newChunk = true;
    }
    if (VM.VERIFY_ASSERTIONS)
        VM.assertions._assert(rtn.GE(cursor) && rtn.LT(cursor.plus(bytes)));
    if (tmp.GT(sentinel)) {
        unlock();
        return Address.zero();
    } else {
        Address old = cursor;
        cursor = tmp;
        /* In a contiguous space we can bump along into the next chunk, so preserve the currentChunk invariant */
        if (contiguous && Conversions.chunkAlign(cursor, true).NE(currentChunk)) {
            currentChunk = Conversions.chunkAlign(cursor, true);
        }
        commitPages(reservedPages, requiredPages);
        space.growSpace(old, bytes, newChunk);
        unlock();
        HeapLayout.mmapper.ensureMapped(old, requiredPages);
        if (zeroed) {
            if (!zeroConcurrent) {
                VM.memory.zero(zeroNT, old, bytes);
            } else {
                while (cursor.GT(zeroingCursor)) ;
            }
        }
        VM.events.tracePageAcquired(space, rtn, requiredPages);
        return rtn;
    }
}
Also used : Address(org.vmmagic.unboxed.Address) Extent(org.vmmagic.unboxed.Extent) Offset(org.vmmagic.unboxed.Offset) Inline(org.vmmagic.pragma.Inline)

Aggregations

Extent (org.vmmagic.unboxed.Extent)21 Address (org.vmmagic.unboxed.Address)11 Inline (org.vmmagic.pragma.Inline)3 Offset (org.vmmagic.unboxed.Offset)3 Test (org.junit.Test)2 BaseMMTkTest (org.mmtk.harness.tests.BaseMMTkTest)2 RawMemoryFreeList (org.mmtk.utility.RawMemoryFreeList)2 Interruptible (org.vmmagic.pragma.Interruptible)2 Word (org.vmmagic.unboxed.Word)2 Field (java.lang.reflect.Field)1 RVMField (org.jikesrvm.classloader.RVMField)1 RVMType (org.jikesrvm.classloader.RVMType)1 TypeReference (org.jikesrvm.classloader.TypeReference)1 AddressConstantOperand (org.jikesrvm.compilers.opt.ir.operand.AddressConstantOperand)1 DoubleConstantOperand (org.jikesrvm.compilers.opt.ir.operand.DoubleConstantOperand)1 FloatConstantOperand (org.jikesrvm.compilers.opt.ir.operand.FloatConstantOperand)1 IntConstantOperand (org.jikesrvm.compilers.opt.ir.operand.IntConstantOperand)1 LongConstantOperand (org.jikesrvm.compilers.opt.ir.operand.LongConstantOperand)1 NullConstantOperand (org.jikesrvm.compilers.opt.ir.operand.NullConstantOperand)1 ObjectConstantOperand (org.jikesrvm.compilers.opt.ir.operand.ObjectConstantOperand)1