use of org.jikesrvm.compilers.common.assembler.ppc.Assembler in project JikesRVM by JikesRVM.
the class OutOfLineMachineCode method generateReflectiveMethodInvokerInstructions.
/**
* Machine code for reflective method invocation.
* See also: "Compiler.generateMethodInvocation".
*
*<pre>
* Registers taken at runtime:
* T0 == address of method entrypoint to be called
* T1 == address of gpr registers to be loaded
* T2 == address of fpr registers to be loaded
* T4 == address of spill area in calling frame
*
* Registers returned at runtime:
* standard return value conventions used
*
* Side effects at runtime:
* artificial stackframe created and destroyed
* R0, volatile, and scratch registers destroyed
* </pre>
*/
private static CodeArray generateReflectiveMethodInvokerInstructions() {
Assembler asm = new Assembler(0);
//
// free registers: 0, S0
//
// save...
asm.emitMFLR(GPR.R0);
// ...return address
asm.emitSTAddr(GPR.R0, STACKFRAME_RETURN_ADDRESS_OFFSET.toInt(), FP);
// CTR := start of method code
asm.emitMTCTR(T0);
//
// free registers: 0, S0, T0
//
// create new frame
//
// S0 := old frame pointer
asm.emitMR(S0, FP);
// T0 := number of spill words
asm.emitLIntOffset(T0, T4, ObjectModel.getArrayLengthOffset());
// T4 -= 4 (predecrement, ie. T4 + 4 is &spill[0] )
asm.emitADDI(T4, -BYTES_IN_ADDRESS, T4);
int spillLoopLabel = asm.getMachineCodeIndex();
// T0 -= 1 (and set CR)
asm.emitADDICr(T0, T0, -1);
// if T0 < 0 then break
ForwardReference fr1 = asm.emitForwardBC(LT);
// R0 := *(T4 += 4)
asm.emitLAddrU(GPR.R0, BYTES_IN_ADDRESS, T4);
// put one word of spill area
asm.emitSTAddrU(GPR.R0, -BYTES_IN_ADDRESS, FP);
// goto spillLoop:
asm.emitB(spillLoopLabel);
fr1.resolve(asm);
// allocate frame header and save old fp
asm.emitSTAddrU(S0, -STACKFRAME_HEADER_SIZE, FP);
asm.emitLVAL(T0, INVISIBLE_METHOD_ID);
// set method id
asm.emitSTWoffset(T0, FP, STACKFRAME_METHOD_ID_OFFSET);
//
// free registers: 0, S0, T0, T4
//
// load up fprs
//
ForwardReference setupFPRLoader = asm.emitForwardBL();
for (int i = LAST_VOLATILE_FPR.value(); i >= FIRST_VOLATILE_FPR.value(); --i) {
// FPRi := fprs[i]
asm.emitLFDU(FPR.lookup(i), BYTES_IN_DOUBLE, T2);
}
//
// free registers: 0, S0, T0, T2, T4
//
// load up gprs
//
ForwardReference setupGPRLoader = asm.emitForwardBL();
for (int i = LAST_VOLATILE_GPR.value(); i >= FIRST_VOLATILE_GPR.value(); --i) {
// GPRi := gprs[i]
asm.emitLAddrU(GPR.lookup(i), BYTES_IN_ADDRESS, S0);
}
//
// free registers: 0, S0
//
// invoke method
//
// branch and link to method code
asm.emitBCCTRL();
// emit method epilog
//
// restore caller's frame
asm.emitLAddr(FP, 0, FP);
// pick up return address
asm.emitLAddr(S0, STACKFRAME_RETURN_ADDRESS_OFFSET.toInt(), FP);
//
asm.emitMTLR(S0);
// return to caller
asm.emitBCLR();
setupFPRLoader.resolve(asm);
// T4 := address of first fpr load instruction
asm.emitMFLR(T4);
// T0 := number of fprs to be loaded
asm.emitLIntOffset(T0, T2, ObjectModel.getArrayLengthOffset());
asm.emitADDI(T4, VOLATILE_FPRS << LG_INSTRUCTION_WIDTH, // T4 := address of first instruction following fpr loads
T4);
// T0 := number of bytes of fpr load instructions
asm.emitSLWI(T0, T0, LG_INSTRUCTION_WIDTH);
// T4 := address of instruction for highest numbered fpr to be loaded
asm.emitSUBFC(T4, T0, T4);
// LR := """
asm.emitMTLR(T4);
// predecrement fpr index (to prepare for update instruction)
asm.emitADDI(T2, -BYTES_IN_DOUBLE, T2);
// branch to fpr loading instructions
asm.emitBCLR();
setupGPRLoader.resolve(asm);
// T4 := address of first gpr load instruction
asm.emitMFLR(T4);
// T0 := number of gprs to be loaded
asm.emitLIntOffset(T0, T1, ObjectModel.getArrayLengthOffset());
asm.emitADDI(T4, VOLATILE_GPRS << LG_INSTRUCTION_WIDTH, // T4 := address of first instruction following gpr loads
T4);
// T0 := number of bytes of gpr load instructions
asm.emitSLWI(T0, T0, LG_INSTRUCTION_WIDTH);
// T4 := address of instruction for highest numbered gpr to be loaded
asm.emitSUBFC(T4, T0, T4);
// LR := """
asm.emitMTLR(T4);
// predecrement gpr index (to prepare for update instruction)
asm.emitADDI(S0, -BYTES_IN_ADDRESS, T1);
// branch to gpr loading instructions
asm.emitBCLR();
return asm.getMachineCodes();
}
use of org.jikesrvm.compilers.common.assembler.ppc.Assembler in project JikesRVM by JikesRVM.
the class OutOfLineMachineCode method generateRestoreHardwareExceptionStateInstructions.
/**
* Machine code to implement "Magic.restoreHardwareExceptionState()".
* <pre>
* Registers taken at runtime:
* T0 == address of Registers object
*
* Registers returned at runtime:
* none
*
* Side effects at runtime:
* all registers are restored except condition registers, count register,
* JTOC_POINTER, and THREAD_REGISTER with execution resuming at "registers.ip"
* </pre>
*/
private static CodeArray generateRestoreHardwareExceptionStateInstructions() {
Assembler asm = new Assembler(0);
// restore LR
//
asm.emitLAddrOffset(REGISTER_ZERO, T0, ArchEntrypoints.registersLRField.getOffset());
asm.emitMTLR(REGISTER_ZERO);
// restore IP (hold it in CT register for a moment)
//
asm.emitLAddrOffset(REGISTER_ZERO, T0, ArchEntrypoints.registersIPField.getOffset());
asm.emitMTCTR(REGISTER_ZERO);
// restore fprs
//
// T1 := registers.fprs[]
asm.emitLAddrOffset(T1, T0, ArchEntrypoints.registersFPRsField.getOffset());
for (int i = 0; i < NUM_FPRS; ++i) {
asm.emitLFD(FPR.lookup(i), i << LOG_BYTES_IN_DOUBLE, T1);
}
// restore gprs
//
// T1 := registers.gprs[]
asm.emitLAddrOffset(T1, T0, ArchEntrypoints.registersGPRsField.getOffset());
for (int i = FIRST_NONVOLATILE_GPR.value(); i <= LAST_NONVOLATILE_GPR.value(); ++i) {
asm.emitLAddr(GPR.lookup(i), i << LOG_BYTES_IN_ADDRESS, T1);
}
for (int i = FIRST_SCRATCH_GPR.value(); i <= LAST_SCRATCH_GPR.value(); ++i) {
asm.emitLAddr(GPR.lookup(i), i << LOG_BYTES_IN_ADDRESS, T1);
}
for (int i = FIRST_VOLATILE_GPR.value(); i <= LAST_VOLATILE_GPR.value(); ++i) {
if (i != T1.value())
asm.emitLAddr(GPR.lookup(i), i << LOG_BYTES_IN_ADDRESS, T1);
}
// restore specials
//
asm.emitLAddr(REGISTER_ZERO, REGISTER_ZERO.value() << LOG_BYTES_IN_ADDRESS, T1);
asm.emitLAddr(FP, FP.value() << LOG_BYTES_IN_ADDRESS, T1);
// restore last gpr
//
asm.emitLAddr(T1, T1.value() << LOG_BYTES_IN_ADDRESS, T1);
// resume execution at IP
//
asm.emitBCCTR();
return asm.getMachineCodes();
}
use of org.jikesrvm.compilers.common.assembler.ppc.Assembler in project JikesRVM by JikesRVM.
the class OutOfLineMachineCode method generateThreadSwitchInstructions.
/**
* Machine code to implement "Magic.threadSwitch()".
*
* <pre>
* Currently not functional on PNT. Left for template for possible reintroduction.
*
* Parameters taken at runtime:
* T0 == address of Thread object for the current thread
* T1 == address of Registers object for the new thread
*
* Registers returned at runtime:
* none
*
* Side effects at runtime:
* sets current Thread's beingDispatched field to false
* saves current Thread's nonvolatile hardware state in its Registers object
* restores new thread's Registers nonvolatile hardware state.
* execution resumes at address specificed by restored thread's Registers ip field
* </pre>
*/
private static CodeArray generateThreadSwitchInstructions() {
Assembler asm = new Assembler(0);
Offset ipOffset = ArchEntrypoints.registersIPField.getOffset();
Offset fprsOffset = ArchEntrypoints.registersFPRsField.getOffset();
Offset gprsOffset = ArchEntrypoints.registersGPRsField.getOffset();
// (1) Save nonvolatile hardware state of current thread.
// T3 gets return address
asm.emitMFLR(T3);
asm.emitLAddrOffset(T2, T0, // T2 = T0.contextRegisters
Entrypoints.threadContextRegistersField.getOffset());
// T0.contextRegisters.ip = return address
asm.emitSTAddrOffset(T3, T2, ipOffset);
// save non-volatile fprs
// T3 := T0.contextRegisters.fprs[]
asm.emitLAddrOffset(T3, T2, fprsOffset);
for (int i = FIRST_NONVOLATILE_FPR.value(); i <= LAST_NONVOLATILE_FPR.value(); ++i) {
asm.emitSTFD(FPR.lookup(i), i << LOG_BYTES_IN_DOUBLE, T3);
}
// save non-volatile gprs
// T3 := registers.gprs[]
asm.emitLAddrOffset(T3, T2, gprsOffset);
for (int i = FIRST_NONVOLATILE_GPR.value(); i <= LAST_NONVOLATILE_GPR.value(); ++i) {
asm.emitSTAddr(GPR.lookup(i), i << LOG_BYTES_IN_ADDRESS, T3);
}
// save fp
asm.emitSTAddr(FP, FP.value() << LOG_BYTES_IN_ADDRESS, T3);
// (2) Restore nonvolatile hardware state of new thread.
// restore non-volatile fprs
// T0 := T1.fprs[]
asm.emitLAddrOffset(T0, T1, fprsOffset);
for (int i = FIRST_NONVOLATILE_FPR.value(); i <= LAST_NONVOLATILE_FPR.value(); ++i) {
asm.emitLFD(FPR.lookup(i), i << LOG_BYTES_IN_DOUBLE, T0);
}
// restore non-volatile gprs
// T0 := T1.gprs[]
asm.emitLAddrOffset(T0, T1, gprsOffset);
for (int i = FIRST_NONVOLATILE_GPR.value(); i <= LAST_NONVOLATILE_GPR.value(); ++i) {
asm.emitLAddr(GPR.lookup(i), i << LOG_BYTES_IN_ADDRESS, T0);
}
// restore fp
asm.emitLAddr(FP, FP.value() << LOG_BYTES_IN_ADDRESS, T0);
// resume execution at saved ip (T1.ipOffset)
asm.emitLAddrOffset(T0, T1, ipOffset);
asm.emitMTLR(T0);
asm.emitBCLR();
return asm.getMachineCodes();
}
use of org.jikesrvm.compilers.common.assembler.ppc.Assembler in project JikesRVM by JikesRVM.
the class OutOfLineMachineCode method generateSaveThreadStateInstructions.
/**
* Machine code to implement "Magic.saveThreadState()".
*
* <pre>
* Registers taken at runtime:
* T0 == address of Registers object
*
* Registers returned at runtime:
* none
*
* Side effects at runtime:
* T1 destroyed
* </pre>
*/
private static CodeArray generateSaveThreadStateInstructions() {
Assembler asm = new Assembler(0);
// save return address
//
// T1 = LR (return address)
asm.emitMFLR(T1);
// registers.ip = return address
asm.emitSTAddrOffset(T1, T0, ArchEntrypoints.registersIPField.getOffset());
// save non-volatile fprs
//
// T1 := registers.fprs[]
asm.emitLAddrOffset(T1, T0, ArchEntrypoints.registersFPRsField.getOffset());
for (int i = FIRST_NONVOLATILE_FPR.value(); i <= LAST_NONVOLATILE_FPR.value(); ++i) {
asm.emitSTFD(FPR.lookup(i), i << LOG_BYTES_IN_DOUBLE, T1);
}
// save non-volatile gprs
//
// T1 := registers.gprs[]
asm.emitLAddrOffset(T1, T0, ArchEntrypoints.registersGPRsField.getOffset());
for (int i = FIRST_NONVOLATILE_GPR.value(); i <= LAST_NONVOLATILE_GPR.value(); ++i) {
asm.emitSTAddr(GPR.lookup(i), i << LOG_BYTES_IN_ADDRESS, T1);
}
// save fp
//
asm.emitSTAddr(FP, FP.value() << LOG_BYTES_IN_ADDRESS, T1);
// return to caller
//
asm.emitBCLR();
return asm.getMachineCodes();
}
use of org.jikesrvm.compilers.common.assembler.ppc.Assembler in project JikesRVM by JikesRVM.
the class JNICompiler method gen64BitPowerPC_ELF_ParameterPassingCode.
/**
* Generates instructions to copy parameters from RVM convention to OS convention.
* @param asm The Assembler object
* @param types The parameter types
* @param nextVMArgReg The first parameter GPR in RVM convention,
* the last parameter GPR is defined as LAST_VOLATILE_GPR.
* @param nextVMArgFloatReg The first parameter FPR in RVM convention,
* the last parameter FPR is defined as LAST_VOLATILE_FPR.
* @param spillOffsetVM The spill offset (related to FP) in RVM convention
* @param nextOSArgReg The first parameter GPR in OS convention,
* the last parameter GPR is defined as LAST_OS_PARAMETER_GPR.
* @param nextOSArgFloatReg The first parameter FPR in OS convention,
* the last parameter FPR is defined as LAST_OS_PARAMETER_FPR.
* @param spillOffsetOS The spill offset (related to FP) in OS convention
*/
private static void gen64BitPowerPC_ELF_ParameterPassingCode(Assembler asm, TypeReference[] types, int nextVMArgReg, int nextVMArgFloatReg, int spillOffsetVM, int nextOSArgReg, int nextOSArgFloatReg, int spillOffsetOS) {
if (VM.BuildForPower64ELF_ABI) {
// create one Assembler object for each argument
// This is needed for the following reason:
// -2 new arguments are added in front for native methods, so the normal arguments
// need to be shifted down in addition to being moved
// -to avoid overwriting each other, the arguments must be copied in reverse order
// -the analysis for mapping however must be done in forward order
// -the moving/mapping for each argument may involve a sequence of 1-3 instructions
// which must be kept in the normal order
// To solve this problem, the instructions for each argument is generated in its
// own Assembler in the forward pass, then in the reverse pass, each Assembler
// emist the instruction sequence and copies it into the main Assembler
int numArguments = types.length;
Assembler[] asmForArgs = new Assembler[numArguments];
for (int arg = 0; arg < numArguments; arg++) {
boolean mustSaveFloatToSpill;
asmForArgs[arg] = new Assembler(0);
Assembler asmArg = asmForArgs[arg];
//
if (types[arg].isFloatType()) {
// (1a) reserve one GPR for each float if it is available
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
nextOSArgReg++;
mustSaveFloatToSpill = false;
} else {
// (1b) if GPR has spilled, store the float argument in the callee spill area
// regardless of whether the FPR has spilled or not
mustSaveFloatToSpill = true;
}
spillOffsetOS += BYTES_IN_STACKSLOT;
// (2a) leave those in FPR[1:13] as is unless the GPR has spilled
if (nextVMArgFloatReg <= LAST_OS_PARAMETER_FPR.value()) {
if (mustSaveFloatToSpill) {
asmArg.emitSTFS(FPR.lookup(nextVMArgFloatReg), spillOffsetOS - BYTES_IN_FLOAT, FP);
}
nextOSArgFloatReg++;
nextVMArgFloatReg++;
} else if (nextVMArgFloatReg <= LAST_VOLATILE_FPR.value()) {
// (2b) run out of FPR in OS, but still have 2 more FPR in VM,
// so FPR[14:15] goes to the callee spill area
asmArg.emitSTFS(FPR.lookup(nextVMArgFloatReg), spillOffsetOS - BYTES_IN_FLOAT, FP);
nextVMArgFloatReg++;
} else {
// (2c) run out of FPR in VM, now get the remaining args from the caller spill area
// and move them into the callee spill area
// Kris Venstermans: Attention, different calling convention !!
spillOffsetVM += BYTES_IN_STACKSLOT;
asmArg.emitLFS(FIRST_SCRATCH_FPR, spillOffsetVM - BYTES_IN_FLOAT, FP);
asmArg.emitSTFS(FIRST_SCRATCH_FPR, spillOffsetOS - BYTES_IN_FLOAT, FP);
}
} else if (types[arg].isDoubleType()) {
// For 64-bit float arguments
if (VM.BuildFor64Addr) {
// (1a) reserve one GPR for double
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
nextOSArgReg++;
mustSaveFloatToSpill = false;
} else {
// (1b) if GPR has spilled, store the float argument in the callee spill area
// regardless of whether the FPR has spilled or not
mustSaveFloatToSpill = true;
}
} else {
// (1a) reserve two GPR's for double
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value() - 1) {
nextOSArgReg += 2;
mustSaveFloatToSpill = false;
} else {
// if only one GPR is left, reserve it anyway although it won't be used
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
nextOSArgReg++;
}
mustSaveFloatToSpill = true;
}
}
spillOffsetOS += // Kris Venstermans: equals 2 slots on 32-bit platforms and 1 slot on 64-bit platform
BYTES_IN_DOUBLE;
// (2a) leave those in FPR[1:13] as is unless the GPR has spilled
if (nextVMArgFloatReg <= LAST_OS_PARAMETER_FPR.value()) {
if (mustSaveFloatToSpill) {
asmArg.emitSTFD(FPR.lookup(nextVMArgFloatReg), spillOffsetOS - BYTES_IN_DOUBLE, FP);
}
nextOSArgFloatReg++;
nextVMArgFloatReg++;
} else if (nextVMArgFloatReg <= LAST_VOLATILE_FPR.value()) {
// (2b) run out of FPR in OS, but still have 2 more FPR in VM,
// so FPR[14:15] goes to the callee spill area
asmArg.emitSTFD(FPR.lookup(nextVMArgFloatReg), spillOffsetOS - BYTES_IN_DOUBLE, FP);
nextVMArgFloatReg++;
} else {
// (2c) run out of FPR in VM, now get the remaining args from the caller spill area
// and move them into the callee spill area
spillOffsetVM += BYTES_IN_DOUBLE;
asmArg.emitLFD(FIRST_SCRATCH_FPR, spillOffsetVM - BYTES_IN_DOUBLE, FP);
asmArg.emitSTFD(FIRST_SCRATCH_FPR, spillOffsetOS - BYTES_IN_DOUBLE, FP);
}
} else if (VM.BuildFor32Addr && types[arg].isLongType()) {
// For 64-bit int arguments on 32-bit platforms
//
spillOffsetOS += BYTES_IN_LONG;
// (1a) fit in OS register, move the pair
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value() - 1) {
// move lo-word first
asmArg.emitMR(GPR.lookup(nextOSArgReg + 1), GPR.lookup(nextVMArgReg + 1));
// so it doesn't overwritten
asmArg.emitMR(GPR.lookup(nextOSArgReg), GPR.lookup(nextVMArgReg));
nextOSArgReg += 2;
nextVMArgReg += 2;
} else if (nextOSArgReg == LAST_OS_PARAMETER_GPR.value() && nextVMArgReg <= LAST_VOLATILE_GPR.value() - 1) {
// (1b) fit in VM register but straddle across OS register/spill
asmArg.emitSTW(GPR.lookup(nextVMArgReg + 1), spillOffsetOS - BYTES_IN_STACKSLOT, // move lo-word first, so it doesn't overwritten
FP);
asmArg.emitMR(GPR.lookup(nextOSArgReg), GPR.lookup(nextVMArgReg));
nextOSArgReg += 2;
nextVMArgReg += 2;
} else if (nextOSArgReg > LAST_OS_PARAMETER_GPR.value() && nextVMArgReg <= LAST_VOLATILE_GPR.value() - 1) {
// (1c) fit in VM register, spill in OS without straddling register/spill
asmArg.emitSTW(GPR.lookup(nextVMArgReg++), spillOffsetOS - 2 * BYTES_IN_STACKSLOT, FP);
asmArg.emitSTW(GPR.lookup(nextVMArgReg++), spillOffsetOS - BYTES_IN_STACKSLOT, FP);
} else if (nextVMArgReg == LAST_VOLATILE_GPR.value()) {
// (1d) split across VM/spill, spill in OS
spillOffsetVM += BYTES_IN_STACKSLOT;
asmArg.emitSTW(GPR.lookup(nextVMArgReg++), spillOffsetOS - 2 * BYTES_IN_STACKSLOT, FP);
asmArg.emitLWZ(REGISTER_ZERO, spillOffsetVM - BYTES_IN_STACKSLOT, FP);
asmArg.emitSTW(REGISTER_ZERO, spillOffsetOS - BYTES_IN_STACKSLOT, FP);
} else {
// (1e) spill both in VM and OS
spillOffsetVM += BYTES_IN_LONG;
asmArg.emitLFD(FIRST_SCRATCH_FPR, spillOffsetVM - BYTES_IN_LONG, FP);
asmArg.emitSTFD(FIRST_SCRATCH_FPR, spillOffsetOS - BYTES_IN_LONG, FP);
}
} else if (VM.BuildFor64Addr && types[arg].isLongType()) {
// For 64-bit int arguments on 64-bit platforms
//
spillOffsetOS += BYTES_IN_LONG;
// (1a) fit in OS register, move the register
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
asmArg.emitMR(GPR.lookup(nextOSArgReg++), GPR.lookup(nextVMArgReg++));
// (1b) spill OS register, but still fit in VM register
} else if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
asmArg.emitSTAddr(GPR.lookup(nextVMArgReg++), spillOffsetOS - BYTES_IN_LONG, FP);
} else {
// (1c) spill VM register
spillOffsetVM += BYTES_IN_LONG;
asmArg.emitLAddr(REGISTER_ZERO, spillOffsetVM - BYTES_IN_LONG, // retrieve arg from VM spill area
FP);
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS - BYTES_IN_LONG, FP);
}
} else if (types[arg].isReferenceType()) {
// For reference type, replace with handles before passing to OS
//
spillOffsetOS += BYTES_IN_ADDRESS;
// (1a) fit in OS register, move the register
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
// Are we passing NULL?
asmArg.emitCMPI(GPR.lookup(nextVMArgReg), 0);
ForwardReference isNull = asmArg.emitForwardBC(EQ);
// NO: put it in the JNIRefs array and pass offset
// append ref to end of JNIRefs array
asmArg.emitSTAddrU(GPR.lookup(nextVMArgReg), BYTES_IN_ADDRESS, KLUDGE_TI_REG);
// pass offset in bytes of jref
asmArg.emitSUBFC(GPR.lookup(nextOSArgReg), THREAD_REGISTER, KLUDGE_TI_REG);
ForwardReference done = asmArg.emitForwardB();
// YES: pass NULL (0)
isNull.resolve(asmArg);
asmArg.emitMR(GPR.lookup(nextOSArgReg), GPR.lookup(nextVMArgReg));
// JOIN PATHS
done.resolve(asmArg);
nextVMArgReg++;
nextOSArgReg++;
} else if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
// (1b) spill OS register, but still fit in VM register
// Are we passing NULL?
asmArg.emitCMPI(GPR.lookup(nextVMArgReg), 0);
ForwardReference isNull = asmArg.emitForwardBC(EQ);
// NO: put it in the JNIRefs array and pass offset
// append ref to end of JNIRefs array
asmArg.emitSTAddrU(GPR.lookup(nextVMArgReg), BYTES_IN_ADDRESS, KLUDGE_TI_REG);
// compute offset in bytes for jref
asmArg.emitSUBFC(REGISTER_ZERO, THREAD_REGISTER, KLUDGE_TI_REG);
ForwardReference done = asmArg.emitForwardB();
// YES: pass NULL (0)
isNull.resolve(asmArg);
asmArg.emitLVAL(REGISTER_ZERO, 0);
// JOIN PATHS
done.resolve(asmArg);
// spill into OS frame
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS - BYTES_IN_ADDRESS, FP);
nextVMArgReg++;
} else {
// (1c) spill VM register
spillOffsetVM += BYTES_IN_STACKSLOT;
// retrieve arg from VM spill area
asmArg.emitLAddr(REGISTER_ZERO, spillOffsetVM - BYTES_IN_ADDRESS, FP);
// Are we passing NULL?
asmArg.emitCMPI(REGISTER_ZERO, 0);
ForwardReference isNull = asmArg.emitForwardBC(EQ);
// NO: put it in the JNIRefs array and pass offset
asmArg.emitSTAddrU(REGISTER_ZERO, BYTES_IN_ADDRESS, // append ref to end of JNIRefs array
KLUDGE_TI_REG);
// compute offset in bytes for jref
asmArg.emitSUBFC(REGISTER_ZERO, THREAD_REGISTER, KLUDGE_TI_REG);
ForwardReference done = asmArg.emitForwardB();
// YES: pass NULL (0)
isNull.resolve(asmArg);
asmArg.emitLVAL(REGISTER_ZERO, 0);
// JOIN PATHS
done.resolve(asmArg);
// spill into OS frame
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS - BYTES_IN_ADDRESS, FP);
}
} else {
// For all other types: int, short, char, byte, boolean
spillOffsetOS += BYTES_IN_STACKSLOT;
// (1a) fit in OS register, move the register
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
asmArg.emitMR(GPR.lookup(nextOSArgReg++), GPR.lookup(nextVMArgReg++));
} else if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
// (1b) spill OS register, but still fit in VM register
asmArg.emitSTAddr(GPR.lookup(nextVMArgReg++), spillOffsetOS - BYTES_IN_ADDRESS, FP);
} else {
// (1c) spill VM register
spillOffsetVM += BYTES_IN_STACKSLOT;
// retrieve arg from VM spill area
asmArg.emitLInt(REGISTER_ZERO, spillOffsetVM - BYTES_IN_INT, FP);
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS - BYTES_IN_ADDRESS, FP);
}
}
}
// so that the move does not overwrite the parameters
for (int arg = numArguments - 1; arg >= 0; arg--) {
asm.appendInstructions(asmForArgs[arg].getMachineCodes());
}
}
}
Aggregations