use of org.jikesrvm.ppc.RegisterConstants.LAST_VOLATILE_GPR in project JikesRVM by JikesRVM.
the class JNICompiler method genSVR4ParameterPassingCode.
/**
* Generates instructions to copy parameters from RVM convention to OS convention.
* @param asm The {@link Assembler} object
* @param types The parameter types
* @param nextVMArgReg The first parameter GPR in RVM convention,
* the last parameter GPR is defined as LAST_VOLATILE_GPR.
* @param nextVMArgFloatReg The first parameter FPR in RVM convention,
* the last parameter FPR is defined as LAST_VOLATILE_FPR.
* @param spillOffsetVM The spill offset (related to FP) in RVM convention
* @param nextOSArgReg the first parameter GPR in OS convention,
* the last parameter GPR is defined as LAST_OS_PARAMETER_GPR.
* @param nextOSArgFloatReg The first parameter FPR in OS convention,
* the last parameter FPR is defined as LAST_OS_PARAMETER_FPR.
* @param spillOffsetOS The spill offset (related to FP) in OS convention
*/
private static void genSVR4ParameterPassingCode(Assembler asm, TypeReference[] types, int nextVMArgReg, int nextVMArgFloatReg, int spillOffsetVM, int nextOSArgReg, int nextOSArgFloatReg, int spillOffsetOS) {
if (VM.BuildForSVR4ABI) {
// create one Assembler object for each argument
// This is needed for the following reason:
// -2 new arguments are added in front for native methods, so the normal arguments
// need to be shifted down in addition to being moved
// -to avoid overwriting each other, the arguments must be copied in reverse order
// -the analysis for mapping however must be done in forward order
// -the moving/mapping for each argument may involve a sequence of 1-3 instructions
// which must be kept in the normal order
// To solve this problem, the instructions for each argument is generated in its
// own Assembler in the forward pass, then in the reverse pass, each Assembler
// emist the instruction sequence and copies it into the main Assembler
int numArguments = types.length;
Assembler[] asmForArgs = new Assembler[numArguments];
for (int arg = 0; arg < numArguments; arg++) {
asmForArgs[arg] = new Assembler(0);
Assembler asmArg = asmForArgs[arg];
//
if (types[arg].isFloatingPointType()) {
boolean is32bits = types[arg].isFloatType();
// 1. check the source, the value will be in srcVMArg
// scratch fpr
FPR srcVMArg;
if (nextVMArgFloatReg <= LAST_VOLATILE_FPR.value()) {
srcVMArg = FPR.lookup(nextVMArgFloatReg);
nextVMArgFloatReg++;
} else {
srcVMArg = FIRST_SCRATCH_FPR;
// VM float reg is in spill area
if (is32bits) {
spillOffsetVM += BYTES_IN_STACKSLOT;
asmArg.emitLFS(srcVMArg, spillOffsetVM - BYTES_IN_FLOAT, FP);
} else {
asmArg.emitLFD(srcVMArg, spillOffsetVM, FP);
spillOffsetVM += BYTES_IN_DOUBLE;
}
}
// 2. check the destination,
if (nextOSArgFloatReg <= LAST_OS_PARAMETER_FPR.value()) {
// leave it there
nextOSArgFloatReg++;
} else {
if (VM.BuildForSVR4ABI) {
if (is32bits) {
asmArg.emitSTFS(srcVMArg, spillOffsetOS, FP);
spillOffsetOS += BYTES_IN_ADDRESS;
} else {
// spill it, round the spill address to 8
// assuming FP is aligned to 8
spillOffsetOS = (spillOffsetOS + 7) & -8;
asmArg.emitSTFD(srcVMArg, spillOffsetOS, FP);
spillOffsetOS += BYTES_IN_DOUBLE;
}
}
}
// for 64-bit long arguments
} else if (types[arg].isLongType() && VM.BuildFor32Addr) {
// handle OS first
boolean dstSpilling;
// it is register number or spilling offset
int regOrSpilling = -1;
// 1. check if Linux register > 9
if (nextOSArgReg > (LAST_OS_PARAMETER_GPR.value() - 1)) {
// goes to spilling area
dstSpilling = true;
if (VM.BuildForSVR4ABI) {
/* NOTE: following adjustment is not stated in SVR4 ABI, but
* was implemented in GCC.
* -- Feng
*/
nextOSArgReg = LAST_OS_PARAMETER_GPR.value() + 1;
// do alignment and compute spilling offset
spillOffsetOS = (spillOffsetOS + 7) & -8;
regOrSpilling = spillOffsetOS;
spillOffsetOS += BYTES_IN_LONG;
}
} else {
// use registers
dstSpilling = false;
if (VM.BuildForSVR4ABI) {
// rounds to odd
// if gpr is even, gpr += 1
nextOSArgReg += (nextOSArgReg + 1) & 0x01;
regOrSpilling = nextOSArgReg;
nextOSArgReg += 2;
}
}
// handle RVM source
if (nextVMArgReg < LAST_VOLATILE_GPR.value()) {
// both parts in registers
if (dstSpilling) {
asmArg.emitSTW(GPR.lookup(nextVMArgReg + 1), regOrSpilling + 4, FP);
if (VM.BuildForSVR4ABI) {
asmArg.emitSTW(GPR.lookup(nextVMArgReg), regOrSpilling, FP);
}
} else {
asmArg.emitMR(GPR.lookup(regOrSpilling + 1), GPR.lookup(nextVMArgReg + 1));
asmArg.emitMR(GPR.lookup(regOrSpilling), GPR.lookup(nextVMArgReg));
}
// advance register counting, Linux register number
// already advanced
nextVMArgReg += 2;
} else if (nextVMArgReg == LAST_VOLATILE_GPR.value()) {
// VM striding
if (dstSpilling) {
asmArg.emitLWZ(REGISTER_ZERO, spillOffsetVM, FP);
asmArg.emitSTW(REGISTER_ZERO, regOrSpilling + 4, FP);
asmArg.emitSTW(GPR.lookup(nextVMArgReg), regOrSpilling, FP);
} else {
asmArg.emitLWZ(GPR.lookup(regOrSpilling + 1), spillOffsetVM, FP);
asmArg.emitMR(GPR.lookup(regOrSpilling), GPR.lookup(nextVMArgReg));
}
// advance spillOffsetVM and nextVMArgReg
nextVMArgReg++;
spillOffsetVM += BYTES_IN_STACKSLOT;
} else if (nextVMArgReg > LAST_VOLATILE_GPR.value()) {
if (dstSpilling) {
asmArg.emitLFD(FIRST_SCRATCH_FPR, spillOffsetVM, FP);
asmArg.emitSTFD(FIRST_SCRATCH_FPR, regOrSpilling, FP);
} else {
// this shouldnot happen, VM spills, OS has registers
asmArg.emitLWZ(GPR.lookup(regOrSpilling + 1), spillOffsetVM + 4, FP);
asmArg.emitLWZ(GPR.lookup(regOrSpilling), spillOffsetVM, FP);
}
spillOffsetVM += BYTES_IN_LONG;
}
} else if (types[arg].isLongType() && VM.BuildFor64Addr) {
// handle OS first
boolean dstSpilling;
// it is register number or spilling offset
int regOrSpilling = -1;
// 1. check if Linux register > 9
if (nextOSArgReg > LAST_OS_PARAMETER_GPR.value()) {
// goes to spilling area
dstSpilling = true;
/* NOTE: following adjustment is not stated in SVR4 ABI, but
* was implemented in GCC.
* -- Feng
*/
nextOSArgReg = LAST_OS_PARAMETER_GPR.value() + 1;
// do alignment and compute spilling offset
spillOffsetOS = (spillOffsetOS + 7) & -8;
regOrSpilling = spillOffsetOS;
spillOffsetOS += BYTES_IN_LONG;
} else {
// use registers
dstSpilling = false;
// rounds to odd
regOrSpilling = nextOSArgReg;
nextOSArgReg += 1;
}
// handle RVM source
if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
// both parts in registers
if (dstSpilling) {
asmArg.emitSTD(GPR.lookup(nextVMArgReg), regOrSpilling, FP);
} else {
asmArg.emitMR(GPR.lookup(regOrSpilling), GPR.lookup(nextVMArgReg));
}
// advance register counting, Linux register number
// already advanced
nextVMArgReg += 1;
} else if (nextVMArgReg > LAST_VOLATILE_GPR.value()) {
if (dstSpilling) {
asmArg.emitLFD(FIRST_SCRATCH_FPR, spillOffsetVM, FP);
asmArg.emitSTFD(FIRST_SCRATCH_FPR, regOrSpilling, FP);
} else {
// this shouldnot happen, VM spills, OS has registers;
asmArg.emitLD(GPR.lookup(regOrSpilling), spillOffsetVM, FP);
}
spillOffsetVM += BYTES_IN_LONG;
}
} else if (types[arg].isReferenceType()) {
// For reference type, replace with handles before passing to native
GPR srcreg;
if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
srcreg = GPR.lookup(nextVMArgReg++);
} else {
srcreg = REGISTER_ZERO;
asmArg.emitLAddr(srcreg, spillOffsetVM, FP);
spillOffsetVM += BYTES_IN_ADDRESS;
}
// Are we passing NULL?
asmArg.emitCMPI(srcreg, 0);
ForwardReference isNull = asmArg.emitForwardBC(EQ);
// NO: put it in the JNIRefs array and pass offset
asmArg.emitSTAddrU(srcreg, BYTES_IN_ADDRESS, KLUDGE_TI_REG);
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
asmArg.emitSUBFC(GPR.lookup(nextOSArgReg), THREAD_REGISTER, KLUDGE_TI_REG);
} else {
asmArg.emitSUBFC(REGISTER_ZERO, THREAD_REGISTER, KLUDGE_TI_REG);
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS, FP);
}
ForwardReference done = asmArg.emitForwardB();
// YES: pass NULL (0)
isNull.resolve(asmArg);
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
asmArg.emitLVAL(GPR.lookup(nextOSArgReg), 0);
} else {
asmArg.emitSTAddr(srcreg, spillOffsetOS, FP);
}
// JOIN PATHS
done.resolve(asmArg);
if (VM.BuildForSVR4ABI) {
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
nextOSArgReg++;
} else {
spillOffsetOS += BYTES_IN_ADDRESS;
}
}
} else {
// (1a) fit in OS register, move the register
if (nextOSArgReg <= LAST_OS_PARAMETER_GPR.value()) {
if (VM.BuildForSVR4ABI) {
asmArg.emitMR(GPR.lookup(nextOSArgReg++), GPR.lookup(nextVMArgReg++));
} else {
asmArg.emitMR(GPR.lookup(nextOSArgReg), GPR.lookup(nextVMArgReg++));
}
} else if (nextVMArgReg <= LAST_VOLATILE_GPR.value()) {
// (1b) spill OS register, but still fit in VM register
asmArg.emitSTAddr(GPR.lookup(nextVMArgReg++), spillOffsetOS, FP);
if (VM.BuildForSVR4ABI) {
spillOffsetOS += BYTES_IN_ADDRESS;
}
} else {
// (1c) spill VM register
spillOffsetVM += BYTES_IN_STACKSLOT;
// retrieve arg from VM spill area
asmArg.emitLInt(REGISTER_ZERO, spillOffsetVM - BYTES_IN_INT, FP);
asmArg.emitSTAddr(REGISTER_ZERO, spillOffsetOS, FP);
if (VM.BuildForSVR4ABI) {
spillOffsetOS += BYTES_IN_ADDRESS;
}
}
}
}
// so that the move does not overwrite the parameters
for (int arg = asmForArgs.length - 1; arg >= 0; arg--) {
asm.appendInstructions(asmForArgs[arg].getMachineCodes());
}
}
}
Aggregations