use of org.vmmagic.pragma.Unpreemptible in project JikesRVM by JikesRVM.
the class VM method disableGC.
/**
* disableGC: Disable GC if it hasn't already been disabled. This
* enforces a stack discipline; we need it for the JNI Get*Critical and
* Release*Critical functions. Should be matched with a subsequent call to
* enableGC().
*
* @param recursiveOK whether recursion is allowed.
*/
@Inline
@Unpreemptible("We may boost the size of the stack with GC disabled and may get preempted doing this")
public static void disableGC(boolean recursiveOK) {
// current (non-GC) thread is going to be holding raw addresses, therefore we must:
//
// 1. make sure we have enough stack space to run until GC is re-enabled
// (otherwise we might trigger a stack reallocation)
// (We can't resize the stack if there's a native frame, so don't
// do it and hope for the best)
//
// 2. force all other threads that need GC to wait until this thread
// is done with the raw addresses
//
// 3. ensure that this thread doesn't try to allocate any objects
// (because an allocation attempt might trigger a collection that
// would invalidate the addresses we're holding)
//
RVMThread myThread = RVMThread.getCurrentThread();
// 0. Sanity Check; recursion
int gcDepth = myThread.getDisableGCDepth();
if (VM.VerifyAssertions)
VM._assert(gcDepth >= 0);
gcDepth++;
myThread.setDisableGCDepth(gcDepth);
if (gcDepth > 1) {
// We've already disabled it.
return;
}
//
if (Magic.getFramePointer().minus(StackFrameLayout.getStackSizeGCDisabled()).LT(myThread.stackLimit) && !myThread.hasNativeStackFrame()) {
RVMThread.resizeCurrentStack(myThread.getStackLength() + StackFrameLayout.getStackSizeGCDisabled(), null);
}
// 2.
//
myThread.disableYieldpoints();
//
if (VM.VerifyAssertions) {
if (!recursiveOK) {
// recursion not allowed
VM._assert(!myThread.getDisallowAllocationsByThisThread());
}
myThread.setDisallowAllocationsByThisThread();
}
}
use of org.vmmagic.pragma.Unpreemptible in project JikesRVM by JikesRVM.
the class OptExceptionDeliverer method unwindStackFrame.
/**
* Unwind a stackframe.
*/
@Override
@Unpreemptible("Deliver exception possibly from unpreemptible code")
public void unwindStackFrame(CompiledMethod cm, AbstractRegisters registers) {
Address fp = registers.getInnermostFramePointer();
OptCompiledMethod compiledMethod = (OptCompiledMethod) cm;
// restore non-volatile registers
Offset frameOffset = Offset.fromIntSignExtend(compiledMethod.getUnsignedNonVolatileOffset());
int firstInteger = compiledMethod.getFirstNonVolatileGPR();
if (firstInteger >= 0) {
if (VM.BuildFor64Addr) {
frameOffset = frameOffset.plus(7).toWord().and(Word.fromIntSignExtend(~7)).toOffset();
}
for (int i = firstInteger; i < 32; i++) {
registers.getGPRs().set(i, fp.loadWord(frameOffset));
frameOffset = frameOffset.plus(BYTES_IN_ADDRESS);
}
}
int firstFloat = compiledMethod.getFirstNonVolatileFPR();
if (firstFloat >= 0) {
frameOffset = frameOffset.plus(7).toWord().and(Word.fromIntSignExtend(~7)).toOffset();
for (int i = firstFloat; i < 32; i++) {
long temp = Magic.getLongAtOffset(Magic.addressAsObject(fp), frameOffset);
registers.getFPRs()[i] = Magic.longBitsAsDouble(temp);
frameOffset = frameOffset.plus(BYTES_IN_DOUBLE);
}
}
registers.unwindStackFrame();
}
use of org.vmmagic.pragma.Unpreemptible in project JikesRVM by JikesRVM.
the class BaselineExceptionDeliverer method deliverException.
/**
* Pass control to a catch block.
*/
@Override
@Unpreemptible("Unwind stack possibly from unpreemptible code")
public void deliverException(CompiledMethod compiledMethod, Address catchBlockInstructionAddress, Throwable exceptionObject, AbstractRegisters registers) {
Address fp = registers.getInnermostFramePointer();
// reset sp to "empty expression stack" state
//
Address sp = fp.plus(((ArchBaselineCompiledMethod) compiledMethod).getEmptyStackOffset());
// push exception object as argument to catch block
//
sp = sp.minus(BYTES_IN_ADDRESS);
sp.store(Magic.objectAsAddress(exceptionObject));
// set address at which to resume executing frame
//
registers.setIP(catchBlockInstructionAddress);
// branch to catch block
//
// disabled right before Runtime.deliverException was called
VM.enableGC();
if (VM.VerifyAssertions)
VM._assert(registers.getInUse());
registers.setInUse(false);
Magic.restoreHardwareExceptionState(registers);
if (VM.VerifyAssertions)
VM._assert(NOT_REACHED);
}
use of org.vmmagic.pragma.Unpreemptible in project JikesRVM by JikesRVM.
the class ThinLock method lock.
@NoInline
@NoNullCheck
@Unpreemptible
public static void lock(Object o, Offset lockOffset) {
if (STATS)
fastLocks++;
Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId());
for (int cnt = 0; ; cnt++) {
Word old = Magic.getWordAtOffset(o, lockOffset);
Word stat = old.and(TL_STAT_MASK);
boolean tryToInflate = false;
if (stat.EQ(TL_STAT_BIASABLE)) {
Word id = old.and(TL_THREAD_ID_MASK);
if (id.isZero()) {
if (ENABLE_BIASED_LOCKING) {
// lock is unbiased, bias it in our favor and grab it
if (Synchronization.tryCompareAndSwap(o, lockOffset, old, old.or(threadId).plus(TL_LOCK_COUNT_UNIT))) {
if (!VM.MagicAttemptImpliesStoreLoadBarrier)
Magic.fence();
return;
}
} else {
// a thin lock
if (Synchronization.tryCompareAndSwap(o, lockOffset, old, old.or(threadId).or(TL_STAT_THIN))) {
if (!VM.MagicAttemptImpliesStoreLoadBarrier)
Magic.fence();
return;
}
}
} else if (id.EQ(threadId)) {
// lock is biased in our favor
Word changed = old.plus(TL_LOCK_COUNT_UNIT);
if (!changed.and(TL_LOCK_COUNT_MASK).isZero()) {
setDedicatedU16(o, lockOffset, changed);
Magic.combinedLoadBarrier();
return;
} else {
tryToInflate = true;
}
} else {
if (casFromBiased(o, lockOffset, old, biasBitsToThinBits(old), cnt)) {
// don't spin, since it's thin now
continue;
}
}
} else if (stat.EQ(TL_STAT_THIN)) {
Word id = old.and(TL_THREAD_ID_MASK);
if (id.isZero()) {
if (Synchronization.tryCompareAndSwap(o, lockOffset, old, old.or(threadId))) {
if (!VM.MagicAttemptImpliesStoreLoadBarrier)
Magic.fence();
return;
}
} else if (id.EQ(threadId)) {
Word changed = old.plus(TL_LOCK_COUNT_UNIT);
if (changed.and(TL_LOCK_COUNT_MASK).isZero()) {
tryToInflate = true;
} else if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) {
if (!VM.MagicAttemptImpliesStoreLoadBarrier)
Magic.fence();
return;
}
} else if (cnt > retryLimit) {
tryToInflate = true;
}
} else {
if (VM.VerifyAssertions)
VM._assert(stat.EQ(TL_STAT_FAT));
// lock is fat. contend on it.
if (Lock.getLock(getLockIndex(old)).lockHeavy(o)) {
return;
}
}
if (tryToInflate) {
if (STATS)
slowLocks++;
// Right Thing if the lock is biased to someone else.
if (inflateAndLock(o, lockOffset)) {
return;
}
} else {
Magic.combinedLoadBarrier();
RVMThread.yieldNoHandshake();
}
}
}
use of org.vmmagic.pragma.Unpreemptible in project JikesRVM by JikesRVM.
the class ThinLock method unlock.
@NoInline
@NoNullCheck
@Unpreemptible
public static void unlock(Object o, Offset lockOffset) {
Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId());
for (int cnt = 0; ; cnt++) {
Word old = Magic.getWordAtOffset(o, lockOffset);
Word stat = old.and(TL_STAT_MASK);
if (stat.EQ(TL_STAT_BIASABLE)) {
Word id = old.and(TL_THREAD_ID_MASK);
if (id.EQ(threadId)) {
if (old.and(TL_LOCK_COUNT_MASK).isZero()) {
RVMThread.raiseIllegalMonitorStateException("biased unlocking: we own this object but the count is already zero", o);
}
setDedicatedU16(o, lockOffset, old.minus(TL_LOCK_COUNT_UNIT));
Magic.fence();
return;
} else {
RVMThread.raiseIllegalMonitorStateException("biased unlocking: we don't own this object", o);
}
} else if (stat.EQ(TL_STAT_THIN)) {
Word id = old.and(TL_THREAD_ID_MASK);
if (id.EQ(threadId)) {
Word changed;
if (old.and(TL_LOCK_COUNT_MASK).isZero()) {
changed = old.and(TL_UNLOCK_MASK).or(TL_STAT_THIN);
} else {
changed = old.minus(TL_LOCK_COUNT_UNIT);
}
Magic.combinedLoadBarrier();
if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) {
if (!VM.MagicAttemptImpliesStoreLoadBarrier)
Magic.fence();
return;
}
} else {
if (false) {
VM.sysWriteln("threadId = ", threadId);
VM.sysWriteln("id = ", id);
}
RVMThread.raiseIllegalMonitorStateException("thin unlocking: we don't own this object", o);
}
} else {
if (VM.VerifyAssertions)
VM._assert(stat.EQ(TL_STAT_FAT));
// fat unlock
Lock.getLock(getLockIndex(old)).unlockHeavy(o);
return;
}
}
}
Aggregations