use of org.jikesrvm.compilers.common.assembler.ForwardReference in project JikesRVM by JikesRVM.
the class BaselineCompilerImpl method genCondBranch.
/**
* Emits a conditional branch on the given condition and bytecode target.
* The caller has just emitted the instruction sequence to set the condition codes.
*
* @param cond condition byte
* @param bTarget target bytecode index
*/
private void genCondBranch(byte cond, int bTarget) {
int mTarget = bytecodeMap[bTarget];
if (!VM.runningTool && ((BaselineCompiledMethod) compiledMethod).hasCounterArray()) {
// Allocate two counters: taken and not taken
int entry = edgeCounterIdx;
edgeCounterIdx += 2;
// Flip conditions so we can jump over the increment of the taken counter.
ForwardReference notTaken = asm.forwardJcc(asm.flipCode(cond));
// Increment taken counter & jump to target
incEdgeCounter(T1, null, entry + EdgeCounts.TAKEN);
asm.emitJMP_ImmOrLabel(mTarget, bTarget);
// Increment not taken counter
notTaken.resolve(asm);
incEdgeCounter(T1, null, entry + EdgeCounts.NOT_TAKEN);
} else {
asm.emitJCC_Cond_ImmOrLabel(cond, mTarget, bTarget);
}
}
use of org.jikesrvm.compilers.common.assembler.ForwardReference in project JikesRVM by JikesRVM.
the class BaselineCompilerImpl method genThreadSwitchTest.
/**
* @param whereFrom is this thread switch from a PROLOGUE, BACKEDGE, or EPILOGUE?
*/
private void genThreadSwitchTest(int whereFrom) {
if (!isInterruptible) {
return;
}
// thread switch requested ??
asm.emitCMP_RegDisp_Imm(THREAD_REGISTER, Entrypoints.takeYieldpointField.getOffset(), 0);
ForwardReference fr1;
Offset yieldOffset;
if (whereFrom == RVMThread.PROLOGUE) {
// Take yieldpoint if yieldpoint flag is non-zero (either 1 or -1)
fr1 = asm.forwardJcc(EQ);
yieldOffset = Entrypoints.yieldpointFromPrologueMethod.getOffset();
} else if (whereFrom == RVMThread.BACKEDGE) {
// Take yieldpoint if yieldpoint flag is >0
fr1 = asm.forwardJcc(LE);
yieldOffset = Entrypoints.yieldpointFromBackedgeMethod.getOffset();
} else {
// EPILOGUE
// Take yieldpoint if yieldpoint flag is non-zero (either 1 or -1)
fr1 = asm.forwardJcc(EQ);
yieldOffset = Entrypoints.yieldpointFromEpilogueMethod.getOffset();
}
asm.generateJTOCcall(yieldOffset);
fr1.resolve(asm);
if (VM.BuildForAdaptiveSystem && options.INVOCATION_COUNTERS) {
int id = compiledMethod.getId();
InvocationCounts.allocateCounter(id);
asm.generateJTOCloadWord(ECX, AosEntrypoints.invocationCountsField.getOffset());
if (VM.BuildFor32Addr) {
asm.emitSUB_RegDisp_Imm(ECX, Offset.fromIntZeroExtend(compiledMethod.getId() << LOG_BYTES_IN_INT), 1);
} else {
asm.emitSUB_RegDisp_Imm_Quad(ECX, Offset.fromIntZeroExtend(compiledMethod.getId() << LOG_BYTES_IN_INT), 1);
}
ForwardReference notTaken = asm.forwardJcc(GT);
asm.emitPUSH_Imm(id);
genParameterRegisterLoad(asm, 1);
asm.generateJTOCcall(AosEntrypoints.invocationCounterTrippedMethod.getOffset());
notTaken.resolve(asm);
}
}
use of org.jikesrvm.compilers.common.assembler.ForwardReference in project JikesRVM by JikesRVM.
the class JNICompiler method generateEpilogForJNIMethod.
/**
* Handles the C to Java transition: JNI methods in JNIFunctions.java.
* Creates an epilogue for the baseline compiler.
*
* @param asm the assembler to use
* @param method the method that's being compiled
*/
public static void generateEpilogForJNIMethod(Assembler asm, RVMMethod method) {
if (VM.BuildFor32Addr) {
// if returning long, switch the order of the hi/lo word in T0 and T1
if (method.getReturnType().isLongType()) {
asm.emitPUSH_Reg(T1);
asm.emitMOV_Reg_Reg(T1, T0);
asm.emitPOP_Reg(T0);
} else {
if (SSE2_FULL && VM.BuildFor32Addr) {
// Marshall from XMM0 -> FP0
if (method.getReturnType().isDoubleType()) {
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor32Addr);
asm.emitMOVSD_RegDisp_Reg(THREAD_REGISTER, Entrypoints.scratchStorageField.getOffset(), XMM0);
asm.emitFLD_Reg_RegDisp_Quad(FP0, THREAD_REGISTER, Entrypoints.scratchStorageField.getOffset());
} else if (method.getReturnType().isFloatType()) {
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor32Addr);
asm.emitMOVSS_RegDisp_Reg(THREAD_REGISTER, Entrypoints.scratchStorageField.getOffset(), XMM0);
asm.emitFLD_Reg_RegDisp(FP0, THREAD_REGISTER, Entrypoints.scratchStorageField.getOffset());
}
}
}
}
// S0 <- JNIEnvironment
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_RegDisp(S0, THREAD_REGISTER, Entrypoints.jniEnvField.getOffset());
} else {
asm.emitMOV_Reg_RegDisp_Quad(S0, THREAD_REGISTER, Entrypoints.jniEnvField.getOffset());
}
// set jniEnv TopJavaFP using value saved in frame in prolog
if (VM.BuildFor32Addr) {
// EDI<-saved TopJavaFP (offset)
asm.emitMOV_Reg_RegDisp(EDI, EBP, SAVED_JAVA_FP_OFFSET);
// change offset from FP into address
asm.emitADD_Reg_Reg(EDI, EBP);
// jniEnv.TopJavaFP <- EDI
asm.emitMOV_RegDisp_Reg(S0, Entrypoints.JNITopJavaFPField.getOffset(), EDI);
} else {
// EDI<-saved TopJavaFP (offset)
asm.emitMOV_Reg_RegDisp_Quad(EDI, EBP, SAVED_JAVA_FP_OFFSET);
// change offset from FP into address
asm.emitADD_Reg_Reg_Quad(EDI, EBP);
// jniEnv.TopJavaFP <- EDI
asm.emitMOV_RegDisp_Reg_Quad(S0, Entrypoints.JNITopJavaFPField.getOffset(), EDI);
}
// NOTE: we could save the TR in the JNI env, but no need, that would have
// already been done.
// what's going on here:
// - SP and EBP have important stuff in them, but that's fine, since
// a call will restore SP and EBP is non-volatile for RVM code
// - TR still refers to the thread
// save return values
asm.emitPUSH_Reg(T0);
asm.emitPUSH_Reg(T1);
// attempt to change the thread state to IN_JNI
asm.emitMOV_Reg_Imm(T0, RVMThread.IN_JAVA);
asm.emitMOV_Reg_Imm(T1, RVMThread.IN_JNI);
asm.emitLockNextInstruction();
asm.emitCMPXCHG_RegDisp_Reg(THREAD_REGISTER, Entrypoints.execStatusField.getOffset(), T1);
// if success, skip the slow path call
ForwardReference doneEnterJNIRef = asm.forwardJcc(EQ);
// fast path failed, make the call
asm.generateJTOCcall(Entrypoints.enterJNIBlockedFromJNIFunctionCallMethod.getOffset());
// OK - we reach here when we have set the state to IN_JNI
doneEnterJNIRef.resolve(asm);
// restore return values
asm.emitPOP_Reg(T1);
asm.emitPOP_Reg(T0);
// reload native/C nonvolatile regs - saved in prolog
for (FloatingPointMachineRegister r : NATIVE_NONVOLATILE_FPRS) {
// TODO: we assume non-volatile will hold at most a double
if (r instanceof XMM) {
asm.emitMOVSD_Reg_RegInd((XMM) r, SP);
} else {
// NB this will fail for anything other than FPR0
asm.emitFLD_Reg_RegInd_Quad((FPR) r, SP);
}
// adjust space for double
asm.emitPOP_Reg(T0);
asm.emitPOP_Reg(T0);
}
// nonvolatile push as the 1st instruction of the prologue
for (int i = NATIVE_NONVOLATILE_GPRS.length - 1; i >= 0; i--) {
GPR r = NATIVE_NONVOLATILE_GPRS[i];
asm.emitPOP_Reg(r);
}
// Discard JNIEnv, CMID and outer most native frame pointer
if (VM.BuildFor32Addr) {
// discard current stack frame
asm.emitADD_Reg_Imm(SP, 3 * WORDSIZE);
} else {
// discard current stack frame
asm.emitADD_Reg_Imm_Quad(SP, 3 * WORDSIZE);
}
// return to caller
asm.emitRET();
}
use of org.jikesrvm.compilers.common.assembler.ForwardReference in project JikesRVM by JikesRVM.
the class JNICompiler method generateGlueCodeForJNIMethod.
/**
* Handles the C to Java transition: JNI methods in JNIFunctions.java.
* Creates a prologue for the baseline compiler.
* <pre>
* NOTE:
* -We need THREAD_REGISTER to access Java environment; we can get it from
* the JNIEnv* (which is an interior pointer to the JNIEnvironment)
* -Unlike the powerPC scheme which has a special prolog preceding
* the normal Java prolog, the Intel scheme replaces the Java prolog
* completely with the special prolog
*
* Stack on entry Stack at end of prolog after call
* high memory high memory
* | | | |
* EBP -> |saved FP | |saved FP |
* | ... | | ... |
* | | | |
* |arg n-1 | |arg n-1 |
* native | ... | | ... |
* caller |arg 0 | JNIEnv* |arg 0 | JNIEnvironment
* ESP -> |return addr | |return addr |
* | | EBP -> |saved FP | outer most native frame pointer
* | | |methodID | normal MethodID for JNI function
* | | |saved JavaFP| offset to preceeding java frame
* | | |saved nonvol| to be used for nonvolatile storage
* | | | ... | including ebp on entry
* | | |arg 0 | copied in reverse order (JNIEnvironment)
* | | | ... |
* | | ESP -> |arg n-1 |
* | | | | normally compiled Java code continue
* | | | |
* | | | |
* | | | |
* low memory low memory
* </pre>
*
* @param asm the assembler to use
* @param method the method that's being compiled (i.e. the method which is a bridge
* from native).
* @param methodID the id of the compiled method
*/
public static void generateGlueCodeForJNIMethod(Assembler asm, NormalMethod method, int methodID) {
// Variable tracking the depth of the stack as we generate the prologue
int stackDepth = 0;
// 2nd word of header = space for frame pointer
if (VM.VerifyAssertions)
VM._assert(STACKFRAME_FRAME_POINTER_OFFSET.toInt() == stackDepth << LG_WORDSIZE);
asm.emitPUSH_Reg(EBP);
stackDepth--;
// start new frame: set FP to point to the new frame
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_Reg(EBP, SP);
} else {
asm.emitMOV_Reg_Reg_Quad(EBP, SP);
}
// set 3rd word of header: method ID
if (VM.VerifyAssertions)
VM._assert(STACKFRAME_METHOD_ID_OFFSET.toInt() == stackDepth << LG_WORDSIZE);
asm.emitPUSH_Imm(methodID);
stackDepth--;
// buy space for the SAVED_JAVA_FP
if (VM.VerifyAssertions)
VM._assert(STACKFRAME_BODY_OFFSET.toInt() == stackDepth << LG_WORDSIZE);
asm.emitPUSH_Reg(T0);
stackDepth--;
// store non-volatiles
for (GPR r : NATIVE_NONVOLATILE_GPRS) {
if (r != EBP) {
asm.emitPUSH_Reg(r);
} else {
// save original EBP value
asm.emitPUSH_RegInd(EBP);
}
stackDepth--;
}
for (FloatingPointMachineRegister r : NATIVE_NONVOLATILE_FPRS) {
// TODO: we assume non-volatile will hold at most a double
// adjust space for double
asm.emitPUSH_Reg(T0);
asm.emitPUSH_Reg(T0);
stackDepth -= 2;
if (r instanceof XMM) {
asm.emitMOVSD_RegInd_Reg(SP, (XMM) r);
} else {
// NB this will fail for anything other than FPR0
asm.emitFST_RegInd_Reg_Quad(SP, (FPR) r);
}
}
if (VM.VerifyAssertions) {
boolean b = stackDepth << LG_WORDSIZE == STACKFRAME_BODY_OFFSET.toInt() - (SAVED_GPRS_FOR_JNI << LG_WORDSIZE);
if (!b) {
String msg = "of2fp=" + stackDepth + " sg4j=" + SAVED_GPRS_FOR_JNI;
VM._assert(VM.NOT_REACHED, msg);
}
}
// Adjust first param from JNIEnv* to JNIEnvironment.
final Offset firstStackArgOffset = Offset.fromIntSignExtend(2 * WORDSIZE);
if (jniExternalFunctionsFieldOffset != 0) {
if (NATIVE_PARAMETER_GPRS.length > 0) {
if (VM.BuildFor32Addr) {
asm.emitSUB_Reg_Imm(NATIVE_PARAMETER_GPRS[0], jniExternalFunctionsFieldOffset);
} else {
asm.emitSUB_Reg_Imm_Quad(NATIVE_PARAMETER_GPRS[0], jniExternalFunctionsFieldOffset);
}
} else {
if (VM.BuildFor32Addr) {
asm.emitSUB_RegDisp_Imm(EBP, firstStackArgOffset, jniExternalFunctionsFieldOffset);
} else {
asm.emitSUB_RegDisp_Imm_Quad(EBP, firstStackArgOffset, jniExternalFunctionsFieldOffset);
}
}
}
// copy the arguments in reverse order
// does NOT include implicit this or class ptr
final TypeReference[] argTypes = method.getParameterTypes();
Offset stackArgOffset = firstStackArgOffset;
// negative value relative to EBP
final int startOfStackedArgs = stackDepth + 1;
int argGPR = 0;
int argFPR = 0;
for (TypeReference argType : argTypes) {
if (argType.isFloatType()) {
if (argFPR < NATIVE_PARAMETER_FPRS.length) {
// adjust stack
asm.emitPUSH_Reg(T0);
if (VM.BuildForSSE2) {
asm.emitMOVSS_RegInd_Reg(SP, (XMM) NATIVE_PARAMETER_FPRS[argFPR]);
} else {
asm.emitFSTP_RegInd_Reg(SP, FP0);
}
argFPR++;
} else {
asm.emitPUSH_RegDisp(EBP, stackArgOffset);
stackArgOffset = stackArgOffset.plus(WORDSIZE);
}
stackDepth--;
} else if (argType.isDoubleType()) {
if (argFPR < NATIVE_PARAMETER_FPRS.length) {
// adjust stack
asm.emitPUSH_Reg(T0);
asm.emitPUSH_Reg(T0);
if (VM.BuildForSSE2) {
asm.emitMOVSD_RegInd_Reg(SP, (XMM) NATIVE_PARAMETER_FPRS[argFPR]);
} else {
asm.emitFSTP_RegInd_Reg_Quad(SP, FP0);
}
argFPR++;
} else {
if (VM.BuildFor32Addr) {
asm.emitPUSH_RegDisp(EBP, stackArgOffset.plus(WORDSIZE));
asm.emitPUSH_RegDisp(EBP, stackArgOffset);
stackArgOffset = stackArgOffset.plus(2 * WORDSIZE);
} else {
// adjust stack
asm.emitPUSH_Reg(T0);
asm.emitPUSH_RegDisp(EBP, stackArgOffset);
stackArgOffset = stackArgOffset.plus(WORDSIZE);
}
}
stackDepth -= 2;
} else if (argType.isLongType()) {
if (VM.BuildFor32Addr) {
if (argGPR + 1 < NATIVE_PARAMETER_GPRS.length) {
asm.emitPUSH_Reg(NATIVE_PARAMETER_GPRS[argGPR]);
asm.emitPUSH_Reg(NATIVE_PARAMETER_GPRS[argGPR + 1]);
argGPR += 2;
} else if (argGPR < NATIVE_PARAMETER_GPRS.length) {
asm.emitPUSH_RegDisp(EBP, stackArgOffset);
asm.emitPUSH_Reg(NATIVE_PARAMETER_GPRS[argGPR]);
argGPR++;
stackArgOffset = stackArgOffset.plus(WORDSIZE);
} else {
asm.emitPUSH_RegDisp(EBP, stackArgOffset.plus(WORDSIZE));
asm.emitPUSH_RegDisp(EBP, stackArgOffset);
stackArgOffset = stackArgOffset.plus(WORDSIZE * 2);
}
stackDepth -= 2;
} else {
// adjust stack
asm.emitPUSH_Reg(T0);
if (argGPR < NATIVE_PARAMETER_GPRS.length) {
asm.emitPUSH_Reg(NATIVE_PARAMETER_GPRS[argGPR]);
argGPR++;
} else {
asm.emitPUSH_RegDisp(EBP, stackArgOffset);
stackDepth -= 2;
stackArgOffset = stackArgOffset.plus(WORDSIZE);
}
stackDepth -= 2;
}
} else {
// expect integer arguments
if (argGPR < NATIVE_PARAMETER_GPRS.length) {
asm.emitPUSH_Reg(NATIVE_PARAMETER_GPRS[argGPR]);
argGPR++;
} else {
asm.emitPUSH_RegDisp(EBP, stackArgOffset);
stackArgOffset = stackArgOffset.plus(WORDSIZE);
}
stackDepth--;
}
}
// Restore JTOC register
if (JTOC_REGISTER != null) {
asm.emitMOV_Reg_Imm_Quad(JTOC_REGISTER, BootRecord.the_boot_record.tocRegister.toLong());
}
// START of code sequence to atomically change thread status from
// IN_JNI to IN_JAVA, looping in a call to
// RVMThread.leaveJNIBlockedFromJNIFunctionCallMethod if
// BLOCKED_IN_NATIVE
// backward branch label
int retryLabel = asm.getMachineCodeIndex();
// Restore THREAD_REGISTER from JNIEnvironment
if (VM.BuildFor32Addr) {
// pick up arg 0 (from our frame)
asm.emitMOV_Reg_RegDisp(EBX, EBP, Offset.fromIntSignExtend((startOfStackedArgs - 1) * WORDSIZE));
asm.emitMOV_Reg_RegDisp(THREAD_REGISTER, EBX, Entrypoints.JNIEnvSavedTRField.getOffset());
} else {
// pick up arg 0 (from our frame)
asm.emitMOV_Reg_RegDisp_Quad(EBX, EBP, Offset.fromIntSignExtend((startOfStackedArgs - 1) * WORDSIZE));
asm.emitMOV_Reg_RegDisp_Quad(THREAD_REGISTER, EBX, Entrypoints.JNIEnvSavedTRField.getOffset());
}
// what we need to keep in mind at this point:
// - EBX has JNI env (but it's nonvolatile)
// - EBP has the FP (but it's nonvolatile)
// - stack has the args but not the locals
// - TR has been restored
// attempt to change the thread state to IN_JAVA
asm.emitMOV_Reg_Imm(T0, RVMThread.IN_JNI);
asm.emitMOV_Reg_Imm(T1, RVMThread.IN_JAVA);
asm.emitLockNextInstruction();
asm.emitCMPXCHG_RegDisp_Reg(THREAD_REGISTER, Entrypoints.execStatusField.getOffset(), T1);
// if we succeeded, move on, else go into slow path
ForwardReference doneLeaveJNIRef = asm.forwardJcc(EQ);
// make the slow call
asm.generateJTOCcall(Entrypoints.leaveJNIBlockedFromJNIFunctionCallMethod.getOffset());
// arrive here when we've switched to IN_JAVA
doneLeaveJNIRef.resolve(asm);
// END of code sequence to change state from IN_JNI to IN_JAVA
// status is now IN_JAVA. GC can not occur while we execute on a processor
// in this state, so it is safe to access fields of objects.
// RVM TR register has been restored and EBX contains a pointer to
// the thread's JNIEnvironment.
// done saving, bump SP to reserve room for the local variables
// SP should now be at the point normally marked as emptyStackOffset
int numLocalVariables = method.getLocalWords() - method.getParameterWords();
// TODO: optimize this space adjustment
if (VM.BuildFor32Addr) {
asm.emitSUB_Reg_Imm(SP, (numLocalVariables << LG_WORDSIZE));
} else {
asm.emitSUB_Reg_Imm_Quad(SP, (numLocalVariables << LG_WORDSIZE));
}
// frame of JNIFunction
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_RegDisp(S0, EBX, Entrypoints.JNITopJavaFPField.getOffset());
} else {
asm.emitMOV_Reg_RegDisp_Quad(S0, EBX, Entrypoints.JNITopJavaFPField.getOffset());
}
// get offset from current FP and save in hdr of current frame
if (VM.BuildFor32Addr) {
asm.emitSUB_Reg_Reg(S0, EBP);
asm.emitMOV_RegDisp_Reg(EBP, SAVED_JAVA_FP_OFFSET, S0);
} else {
asm.emitSUB_Reg_Reg_Quad(S0, EBP);
asm.emitMOV_RegDisp_Reg_Quad(EBP, SAVED_JAVA_FP_OFFSET, S0);
}
// clobber the saved frame pointer with that from the JNIEnvironment (work around for omit-frame-pointer)
if (VM.BuildFor32Addr) {
asm.emitMOV_Reg_RegDisp(S0, EBX, Entrypoints.JNIEnvBasePointerOnEntryToNative.getOffset());
asm.emitMOV_RegInd_Reg(EBP, S0);
} else {
asm.emitMOV_Reg_RegDisp_Quad(S0, EBX, Entrypoints.JNIEnvBasePointerOnEntryToNative.getOffset());
asm.emitMOV_RegInd_Reg_Quad(EBP, S0);
}
// put framePointer in Thread following Jikes RVM conventions.
if (VM.BuildFor32Addr) {
asm.emitMOV_RegDisp_Reg(THREAD_REGISTER, ArchEntrypoints.framePointerField.getOffset(), EBP);
} else {
asm.emitMOV_RegDisp_Reg_Quad(THREAD_REGISTER, ArchEntrypoints.framePointerField.getOffset(), EBP);
}
// at this point: TR has been restored &
// processor status = IN_JAVA,
// arguments for the call have been setup, space on the stack for locals
// has been acquired.
// finally proceed with the normal Java compiled code
// skip the thread switch test for now, see BaselineCompilerImpl.genThreadSwitchTest(true)
// asm.emitNOP(1); // end of prologue marker
}
use of org.jikesrvm.compilers.common.assembler.ForwardReference in project JikesRVM by JikesRVM.
the class JNICompiler method gen64BitPowerPC_ELF_ParameterPassingCode.
/**
* Generates instructions to copy parameters from RVM convention to OS convention.
* @param asm The Assembler object
* @param types The parameter types
* @param nextVMArgReg The first parameter GPR in RVM convention,
* the last parameter GPR is defined as LAST_VOLATILE_GPR.
* @param nextVMArgFloatReg The first parameter FPR in RVM convention,
* the last parameter FPR is defined as LAST_VOLATILE_FPR.
* @param spillOffsetVM The spill offset (related to FP) in RVM convention
* @param nextOSArgReg The first parameter GPR in OS convention,
* the last parameter GPR is defined as LAST_OS_PARAMETER_GPR.
* @param nextOSArgFloatReg The first parameter FPR in OS convention,
* the last parameter FPR is defined as LAST_OS_PARAMETER_FPR.
* @param spillOffsetOS The spill offset (related to FP) in OS convention
*/
private static void gen64BitPowerPC_ELF_ParameterPassingCode(Assembler asm, TypeReference[] types, int nextVMArgReg, int nextVMArgFloatReg, int spillOffsetVM, int nextOSArgReg, int nextOSArgFloatReg, int spillOffsetOS) {
if (VM.BuildForPower64ELF_ABI) {
// create one Assembler object for each argument
// This is needed for the following reason:
// -2 new arguments are added in front for native methods, so the normal arguments
// need to be shifted down in addition to being moved
// -to avoid overwriting each other, the arguments must be copied in reverse order
// -the analysis for mapping however must be done in forward order
// -the moving/mapping for each argument may involve a sequence of 1-3 instructions
// which must be kept in the normal order
// To solve this problem, the instructions for each argument is generated in its
// own Assembler in the forward pass, then in the reverse pass, each Assembler
// emist the instruction sequence and copies it into the main Assembler
int numArguments = types.length;
Assembler[] asmForArgs = new Assembler[numArguments];
for (int arg = 0; arg < numArguments; arg++) {
boolean mustSaveFloatToSpill;
asmForArgs[arg] = new Assembler(0);
Assembler asmArg = asmForArgs[arg];
//
if (types[arg].isFloatType()) {
// (1a) reserve one GPR for each float if it is available
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
nextOSArgReg++;
mustSaveFloatToSpill = false;
} else {
// (1b) if GPR has spilled, store the float argument in the callee spill area
// regardless of whether the FPR has spilled or not
mustSaveFloatToSpill = true;
}
spillOffsetOS += BYTES_IN_STACKSLOT;
// (2a) leave those in FPR[1:13] as is unless the GPR has spilled
if (nextVMArgFloatReg <= LAST_OS_PARAMETER_FPR.value()) {
if (mustSaveFloatToSpill) {
asmArg.emitSTFS(FPR.lookup(nextVMArgFloatReg), spillOffsetOS - BYTES_IN_FLOAT, FP);
}
nextOSArgFloatReg++;
nextVMArgFloatReg++;
} else if (nextVMArgFloatReg <= LAST_VOLATILE_FPR.value()) {
// (2b) run out of FPR in OS, but still have 2 more FPR in VM,
// so FPR[14:15] goes to the callee spill area
asmArg.emitSTFS(FPR.lookup(nextVMArgFloatReg), spillOffsetOS - BYTES_IN_FLOAT, FP);
nextVMArgFloatReg++;
} else {
// (2c) run out of FPR in VM, now get the remaining args from the caller spill area
// and move them into the callee spill area
// Kris Venstermans: Attention, different calling convention !!
spillOffsetVM += BYTES_IN_STACKSLOT;
asmArg.emitLFS(FIRST_SCRATCH_FPR, spillOffsetVM - BYTES_IN_FLOAT, FP);
asmArg.emitSTFS(FIRST_SCRATCH_FPR, spillOffsetOS - BYTES_IN_FLOAT, FP);
}
} else if (types[arg].isDoubleType()) {
// For 64-bit float arguments
if (VM.BuildFor64Addr) {
// (1a) reserve one GPR for double
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
nextOSArgReg++;
mustSaveFloatToSpill = false;
} else {
// (1b) if GPR has spilled, store the float argument in the callee spill area
// regardless of whether the FPR has spilled or not
mustSaveFloatToSpill = true;
}
} else {
// (1a) reserve two GPR's for double
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value() - 1) {
nextOSArgReg += 2;
mustSaveFloatToSpill = false;
} else {
// if only one GPR is left, reserve it anyway although it won't be used
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
nextOSArgReg++;
}
mustSaveFloatToSpill = true;
}
}
spillOffsetOS += // Kris Venstermans: equals 2 slots on 32-bit platforms and 1 slot on 64-bit platform
BYTES_IN_DOUBLE;
// (2a) leave those in FPR[1:13] as is unless the GPR has spilled
if (nextVMArgFloatReg <= LAST_OS_PARAMETER_FPR.value()) {
if (mustSaveFloatToSpill) {
asmArg.emitSTFD(FPR.lookup(nextVMArgFloatReg), spillOffsetOS - BYTES_IN_DOUBLE, FP);
}
nextOSArgFloatReg++;
nextVMArgFloatReg++;
} else if (nextVMArgFloatReg <= LAST_VOLATILE_FPR.value()) {
// (2b) run out of FPR in OS, but still have 2 more FPR in VM,
// so FPR[14:15] goes to the callee spill area
asmArg.emitSTFD(FPR.lookup(nextVMArgFloatReg), spillOffsetOS - BYTES_IN_DOUBLE, FP);
nextVMArgFloatReg++;
} else {
// (2c) run out of FPR in VM, now get the remaining args from the caller spill area
// and move them into the callee spill area
spillOffsetVM += BYTES_IN_DOUBLE;
asmArg.emitLFD(FIRST_SCRATCH_FPR, spillOffsetVM - BYTES_IN_DOUBLE, FP);
asmArg.emitSTFD(FIRST_SCRATCH_FPR, spillOffsetOS - BYTES_IN_DOUBLE, FP);
}
} else if (VM.BuildFor32Addr && types[arg].isLongType()) {
// For 64-bit int arguments on 32-bit platforms
//
spillOffsetOS += BYTES_IN_LONG;
// (1a) fit in OS register, move the pair
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value() - 1) {
// move lo-word first
asmArg.emitMR(GPR.lookup(nextOSArgReg + 1), GPR.lookup(nextVMArgReg + 1));
// so it doesn't overwritten
asmArg.emitMR(GPR.lookup(nextOSArgReg), GPR.lookup(nextVMArgReg));
nextOSArgReg += 2;
nextVMArgReg += 2;
} else if (nextOSArgReg == LAST_OS_PARAMETER_GPR.value() && nextVMArgReg <= LAST_VOLATILE_GPR.value() - 1) {
// (1b) fit in VM register but straddle across OS register/spill
asmArg.emitSTW(GPR.lookup(nextVMArgReg + 1), spillOffsetOS - BYTES_IN_STACKSLOT, // move lo-word first, so it doesn't overwritten
FP);
asmArg.emitMR(GPR.lookup(nextOSArgReg), GPR.lookup(nextVMArgReg));
nextOSArgReg += 2;
nextVMArgReg += 2;
} else if (nextOSArgReg > LAST_OS_PARAMETER_GPR.value() && nextVMArgReg <= LAST_VOLATILE_GPR.value() - 1) {
// (1c) fit in VM register, spill in OS without straddling register/spill
asmArg.emitSTW(GPR.lookup(nextVMArgReg++), spillOffsetOS - 2 * BYTES_IN_STACKSLOT, FP);
asmArg.emitSTW(GPR.lookup(nextVMArgReg++), spillOffsetOS - BYTES_IN_STACKSLOT, FP);
} else if (nextVMArgReg == LAST_VOLATILE_GPR.value()) {
// (1d) split across VM/spill, spill in OS
spillOffsetVM += BYTES_IN_STACKSLOT;
asmArg.emitSTW(GPR.lookup(nextVMArgReg++), spillOffsetOS - 2 * BYTES_IN_STACKSLOT, FP);
asmArg.emitLWZ(REGISTER_ZERO, spillOffsetVM - BYTES_IN_STACKSLOT, FP);
asmArg.emitSTW(REGISTER_ZERO, spillOffsetOS - BYTES_IN_STACKSLOT, FP);
} else {
// (1e) spill both in VM and OS
spillOffsetVM += BYTES_IN_LONG;
asmArg.emitLFD(FIRST_SCRATCH_FPR, spillOffsetVM - BYTES_IN_LONG, FP);
asmArg.emitSTFD(FIRST_SCRATCH_FPR, spillOffsetOS - BYTES_IN_LONG, FP);
}
} else if (VM.BuildFor64Addr && types[arg].isLongType()) {
// For 64-bit int arguments on 64-bit platforms
//
spillOffsetOS += BYTES_IN_LONG;
// (1a) fit in OS register, move the register
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
asmArg.emitMR(GPR.lookup(nextOSArgReg++), GPR.lookup(nextVMArgReg++));
// (1b) spill OS register, but still fit in VM register
} else if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
asmArg.emitSTAddr(GPR.lookup(nextVMArgReg++), spillOffsetOS - BYTES_IN_LONG, FP);
} else {
// (1c) spill VM register
spillOffsetVM += BYTES_IN_LONG;
asmArg.emitLAddr(REGISTER_ZERO, spillOffsetVM - BYTES_IN_LONG, // retrieve arg from VM spill area
FP);
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS - BYTES_IN_LONG, FP);
}
} else if (types[arg].isReferenceType()) {
// For reference type, replace with handles before passing to OS
//
spillOffsetOS += BYTES_IN_ADDRESS;
// (1a) fit in OS register, move the register
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
// Are we passing NULL?
asmArg.emitCMPI(GPR.lookup(nextVMArgReg), 0);
ForwardReference isNull = asmArg.emitForwardBC(EQ);
// NO: put it in the JNIRefs array and pass offset
// append ref to end of JNIRefs array
asmArg.emitSTAddrU(GPR.lookup(nextVMArgReg), BYTES_IN_ADDRESS, KLUDGE_TI_REG);
// pass offset in bytes of jref
asmArg.emitSUBFC(GPR.lookup(nextOSArgReg), THREAD_REGISTER, KLUDGE_TI_REG);
ForwardReference done = asmArg.emitForwardB();
// YES: pass NULL (0)
isNull.resolve(asmArg);
asmArg.emitMR(GPR.lookup(nextOSArgReg), GPR.lookup(nextVMArgReg));
// JOIN PATHS
done.resolve(asmArg);
nextVMArgReg++;
nextOSArgReg++;
} else if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
// (1b) spill OS register, but still fit in VM register
// Are we passing NULL?
asmArg.emitCMPI(GPR.lookup(nextVMArgReg), 0);
ForwardReference isNull = asmArg.emitForwardBC(EQ);
// NO: put it in the JNIRefs array and pass offset
// append ref to end of JNIRefs array
asmArg.emitSTAddrU(GPR.lookup(nextVMArgReg), BYTES_IN_ADDRESS, KLUDGE_TI_REG);
// compute offset in bytes for jref
asmArg.emitSUBFC(REGISTER_ZERO, THREAD_REGISTER, KLUDGE_TI_REG);
ForwardReference done = asmArg.emitForwardB();
// YES: pass NULL (0)
isNull.resolve(asmArg);
asmArg.emitLVAL(REGISTER_ZERO, 0);
// JOIN PATHS
done.resolve(asmArg);
// spill into OS frame
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS - BYTES_IN_ADDRESS, FP);
nextVMArgReg++;
} else {
// (1c) spill VM register
spillOffsetVM += BYTES_IN_STACKSLOT;
// retrieve arg from VM spill area
asmArg.emitLAddr(REGISTER_ZERO, spillOffsetVM - BYTES_IN_ADDRESS, FP);
// Are we passing NULL?
asmArg.emitCMPI(REGISTER_ZERO, 0);
ForwardReference isNull = asmArg.emitForwardBC(EQ);
// NO: put it in the JNIRefs array and pass offset
asmArg.emitSTAddrU(REGISTER_ZERO, BYTES_IN_ADDRESS, // append ref to end of JNIRefs array
KLUDGE_TI_REG);
// compute offset in bytes for jref
asmArg.emitSUBFC(REGISTER_ZERO, THREAD_REGISTER, KLUDGE_TI_REG);
ForwardReference done = asmArg.emitForwardB();
// YES: pass NULL (0)
isNull.resolve(asmArg);
asmArg.emitLVAL(REGISTER_ZERO, 0);
// JOIN PATHS
done.resolve(asmArg);
// spill into OS frame
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS - BYTES_IN_ADDRESS, FP);
}
} else {
// For all other types: int, short, char, byte, boolean
spillOffsetOS += BYTES_IN_STACKSLOT;
// (1a) fit in OS register, move the register
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
asmArg.emitMR(GPR.lookup(nextOSArgReg++), GPR.lookup(nextVMArgReg++));
} else if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
// (1b) spill OS register, but still fit in VM register
asmArg.emitSTAddr(GPR.lookup(nextVMArgReg++), spillOffsetOS - BYTES_IN_ADDRESS, FP);
} else {
// (1c) spill VM register
spillOffsetVM += BYTES_IN_STACKSLOT;
// retrieve arg from VM spill area
asmArg.emitLInt(REGISTER_ZERO, spillOffsetVM - BYTES_IN_INT, FP);
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS - BYTES_IN_ADDRESS, FP);
}
}
}
// so that the move does not overwrite the parameters
for (int arg = numArguments - 1; arg >= 0; arg--) {
asm.appendInstructions(asmForArgs[arg].getMachineCodes());
}
}
}
Aggregations