use of org.jikesrvm.compilers.opt.ir.ppc.PhysicalRegisterSet in project JikesRVM by JikesRVM.
the class CallingConvention method callExpand.
/**
* Expand the call as appropriate
* @param s the call instruction
* @param ir the ir
*/
private static void callExpand(Instruction s, IR ir) {
int NumberParams = MIR_Call.getNumberOfParams(s);
// points to the first integer volatile
int int_index = 0;
// poinst to the first f.p. volatile
int double_index = 0;
int callSpillLoc = STACKFRAME_HEADER_SIZE;
PhysicalRegisterSet phys = (PhysicalRegisterSet) ir.regpool.getPhysicalRegisterSet();
Instruction prev = s.prevInstructionInCodeOrder();
Register FP = phys.getFP();
boolean isSysCall = ir.stackManager.isSysCall(s);
boolean firstLongHalf = false;
// (1) Expand parameters
for (int opNum = 0; opNum < NumberParams; opNum++) {
Operand param = MIR_Call.getClearParam(s, opNum);
RegisterOperand Reg = (RegisterOperand) param;
// as part of getting into MIR, we make sure all params are in registers.
Register reg = Reg.getRegister();
if (Reg.getType().isFloatType()) {
if (double_index < NUMBER_DOUBLE_PARAM) {
// register copy
Register real = phys.get(FIRST_DOUBLE_PARAM + (double_index++));
s.insertBefore(MIR_Move.create(PPC_FMR, F(real), Reg));
Reg = F(real);
// Record that the call now has a use of the real reg
// This is to ensure liveness is correct
MIR_Call.setParam(s, opNum, Reg);
} else {
// spill to memory
Instruction p = prev.nextInstructionInCodeOrder();
callSpillLoc += BYTES_IN_ADDRESS;
p.insertBefore(MIR_Store.create(PPC_STFS, F(reg), A(FP), IC(callSpillLoc - BYTES_IN_FLOAT)));
// We don't have uses of the heap at MIR, so null it out
MIR_Call.setParam(s, opNum, null);
}
} else if (Reg.getType().isDoubleType()) {
if (double_index < NUMBER_DOUBLE_PARAM) {
// register copy
Register real = phys.get(FIRST_DOUBLE_PARAM + (double_index++));
s.insertBefore(MIR_Move.create(PPC_FMR, D(real), Reg));
Reg = D(real);
// Record that the call now has a use of the real reg
// This is to ensure liveness is correct
MIR_Call.setParam(s, opNum, Reg);
} else {
// spill to memory
Instruction p = prev.nextInstructionInCodeOrder();
p.insertBefore(MIR_Store.create(PPC_STFD, D(reg), A(FP), IC(callSpillLoc)));
callSpillLoc += BYTES_IN_DOUBLE;
// We don't have uses of the heap at MIR, so null it out
MIR_Call.setParam(s, opNum, null);
}
} else {
// IntType (or half of long) or reference
if (VM.BuildForSVR4ABI) {
/* NOTE: following adjustment is not stated in SVR4 ABI, but
* was implemented in GCC.
*/
if (isSysCall && Reg.getType().isLongType()) {
if (firstLongHalf) {
firstLongHalf = false;
} else {
int true_index = FIRST_INT_PARAM + int_index;
// if gpr is even, gpr += 1
int_index += (true_index + 1) & 0x01;
firstLongHalf = true;
}
}
}
if (int_index < NUMBER_INT_PARAM) {
// register copy
Register real = phys.get(FIRST_INT_PARAM + (int_index++));
RegisterOperand Real = new RegisterOperand(real, Reg.getType());
s.insertBefore(MIR_Move.create(PPC_MOVE, Real, Reg));
Reg = new RegisterOperand(real, Reg.getType());
// Record that the call now has a use of the real reg
// This is to ensure liveness is correct
MIR_Call.setParam(s, opNum, Reg);
} else {
// spill to memory
Instruction p = prev.nextInstructionInCodeOrder();
callSpillLoc += BYTES_IN_ADDRESS;
if (VM.BuildFor64Addr && (Reg.getType().isIntType() || Reg.getType().isShortType() || Reg.getType().isByteType() || Reg.getType().isCharType() || Reg.getType().isBooleanType())) {
p.insertBefore(MIR_Store.create(PPC_STW, new RegisterOperand(reg, Reg.getType()), A(FP), IC(callSpillLoc - BYTES_IN_INT)));
} else {
// same size as addr (ie, either we're in 32 bit mode or we're in 64 bit mode and it's a reference or long)
p.insertBefore(MIR_Store.create(PPC_STAddr, new RegisterOperand(reg, Reg.getType()), A(FP), IC(callSpillLoc - BYTES_IN_ADDRESS)));
}
// We don't have uses of the heap at MIR, so null it out
MIR_Call.setParam(s, opNum, null);
}
}
}
// then make sure we have a big enough stack
if (callSpillLoc != STACKFRAME_HEADER_SIZE) {
ir.stackManager.allocateParameterSpace(callSpillLoc);
}
// (2) expand result
Instruction lastCallSeqInstr = s;
if (MIR_Call.hasResult2(s)) {
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor32Addr);
RegisterOperand result2 = MIR_Call.getClearResult2(s);
RegisterOperand physical = new RegisterOperand(phys.get(FIRST_INT_RETURN + 1), result2.getType());
Instruction tmp = MIR_Move.create(PPC_MOVE, result2, physical);
lastCallSeqInstr.insertAfter(tmp);
lastCallSeqInstr = tmp;
MIR_Call.setResult2(s, null);
}
if (MIR_Call.hasResult(s)) {
RegisterOperand result1 = MIR_Call.getClearResult(s);
if (result1.getType().isFloatingPointType()) {
RegisterOperand physical = new RegisterOperand(phys.get(FIRST_DOUBLE_RETURN), result1.getType());
Instruction tmp = MIR_Move.create(PPC_FMR, result1, physical);
lastCallSeqInstr.insertAfter(tmp);
lastCallSeqInstr = tmp;
MIR_Call.setResult(s, null);
} else {
RegisterOperand physical = new RegisterOperand(phys.get(FIRST_INT_RETURN), result1.getType());
Instruction tmp = MIR_Move.create(PPC_MOVE, result1, physical);
lastCallSeqInstr.insertAfter(tmp);
lastCallSeqInstr = tmp;
MIR_Call.setResult(s, null);
}
}
}
use of org.jikesrvm.compilers.opt.ir.ppc.PhysicalRegisterSet in project JikesRVM by JikesRVM.
the class StackManager method initForArch.
/**
* Initializes the "tmp" regs for this object
* @param ir the governing ir
*/
@Override
public void initForArch(IR ir) {
PhysicalRegisterSet phys = ir.regpool.getPhysicalRegisterSet().asPPC();
phys.getJTOC().reserveRegister();
phys.getFirstConditionRegister().reserveRegister();
}
use of org.jikesrvm.compilers.opt.ir.ppc.PhysicalRegisterSet in project JikesRVM by JikesRVM.
the class FinalMIRExpansion method expand.
/**
* @param ir the IR to expand
* @return upperbound on number of machine code instructions
* that will be generated for this IR
*/
public static int expand(IR ir) {
int instructionCount = 0;
int conditionalBranchCount = 0;
int machinecodeLength = 0;
PhysicalRegisterSet phys = ir.regpool.getPhysicalRegisterSet().asPPC();
MachineCodeOffsets mcOffsets = ir.MIRInfo.mcOffsets;
for (Instruction p = ir.firstInstructionInCodeOrder(); p != null; p = p.nextInstructionInCodeOrder()) {
mcOffsets.setMachineCodeOffset(p, -1);
switch(p.getOpcode()) {
case MIR_LOWTABLESWITCH_opcode:
{
BasicBlock tableBlock = p.getBasicBlock();
BasicBlock nextBlock = tableBlock.splitNodeWithLinksAt(p.prevInstructionInCodeOrder(), ir);
mcOffsets.setMachineCodeOffset(nextBlock.firstInstruction(), -1);
Register regI = MIR_LowTableSwitch.getIndex(p).getRegister();
int NumTargets = MIR_LowTableSwitch.getNumberOfTargets(p);
tableBlock.appendInstruction(MIR_Call.create0(PPC_BL, null, null, nextBlock.makeJumpTarget()));
for (int i = 0; i < NumTargets; i++) {
tableBlock.appendInstruction(MIR_DataLabel.create(PPC_DATA_LABEL, MIR_LowTableSwitch.getClearTarget(p, i)));
}
Register temp = phys.getGPR(0);
p.insertBefore(MIR_Move.create(PPC_MFSPR, A(temp), A(phys.getLR())));
p.insertBefore(MIR_Binary.create(PPC_SLWI, I(regI), I(regI), IC(2)));
p.insertBefore(MIR_LoadUpdate.create(PPC_LIntUX, I(temp), I(regI), A(temp)));
p.insertBefore(MIR_Binary.create(PPC_ADD, A(regI), A(regI), I(temp)));
p.insertBefore(MIR_Move.create(PPC_MTSPR, A(phys.getCTR()), A(regI)));
MIR_Branch.mutate(p, PPC_BCTR);
instructionCount += NumTargets + 7;
}
break;
case PPC_BCOND2_opcode:
{
RegisterOperand cond = MIR_CondBranch2.getClearValue(p);
p.insertAfter(MIR_CondBranch.create(PPC_BCOND, cond.copyU2U(), MIR_CondBranch2.getClearCond2(p), MIR_CondBranch2.getClearTarget2(p), MIR_CondBranch2.getClearBranchProfile2(p)));
MIR_CondBranch.mutate(p, PPC_BCOND, cond, MIR_CondBranch2.getClearCond1(p), MIR_CondBranch2.getClearTarget1(p), MIR_CondBranch2.getClearBranchProfile1(p));
conditionalBranchCount++;
}
break;
case PPC_BLRL_opcode:
case PPC_BCTRL_opcode:
{
// See also ConvertToLowlevelIR.java
if (VM.BuildForIMTInterfaceInvocation) {
if (MIR_Call.hasMethod(p)) {
MethodOperand mo = MIR_Call.getMethod(p);
if (mo.isInterface()) {
InterfaceMethodSignature sig = InterfaceMethodSignature.findOrCreate(mo.getMemberRef());
int signatureId = sig.getId();
Instruction s;
if (fits(signatureId, 16)) {
s = MIR_Unary.create(PPC_LDI, I(phys.getGPR(LAST_SCRATCH_GPR)), IC(signatureId));
p.insertBefore(s);
instructionCount++;
} else {
s = MIR_Unary.create(PPC_LDIS, I(phys.getGPR(LAST_SCRATCH_GPR)), IC(PPCMaskUpper16(signatureId)));
p.insertBefore(s);
s = MIR_Binary.create(PPC_ADDI, I(phys.getGPR(LAST_SCRATCH_GPR)), I(phys.getGPR(LAST_SCRATCH_GPR)), IC(PPCMaskLower16(signatureId)));
p.insertBefore(s);
instructionCount += 2;
}
}
}
}
instructionCount++;
}
break;
case LABEL_opcode:
case BBEND_opcode:
case UNINT_BEGIN_opcode:
case UNINT_END_opcode:
// These generate no code, so don't count them.
break;
case RESOLVE_opcode:
{
Register zero = phys.getGPR(0);
Register JTOC = phys.getJTOC();
Register CTR = phys.getCTR();
if (VM.VerifyAssertions)
VM._assert(p.getBytecodeIndex() >= 0 && p.position() != null);
Offset offset = Entrypoints.optResolveMethod.getOffset();
if (fits(offset, 16)) {
p.insertBefore(MIR_Load.create(PPC_LAddr, A(zero), A(JTOC), IC(PPCMaskLower16(offset))));
} else {
// not implemented
if (VM.VerifyAssertions)
VM._assert(fits(offset, 32));
p.insertBefore(MIR_Binary.create(PPC_ADDIS, A(zero), A(JTOC), IC(PPCMaskUpper16(offset))));
p.insertBefore(MIR_Load.create(PPC_LAddr, A(zero), A(zero), IC(PPCMaskLower16(offset))));
instructionCount += 1;
}
p.insertBefore(MIR_Move.create(PPC_MTSPR, A(CTR), A(zero)));
instructionCount += 3;
// Because the GC Map code holds a reference to the original
// instruction, it is important that we mutate the last instruction
// because this will be the GC point.
MIR_Call.mutate0(p, PPC_BCTRL, null, null);
break;
}
case YIELDPOINT_PROLOGUE_opcode:
{
Register TSR = phys.getTSR();
BasicBlock yieldpoint = findOrCreateYieldpointBlock(ir, RVMThread.PROLOGUE);
// Because the GC Map code holds a reference to the original
// instruction, it is important that we mutate the last instruction
// because this will be the GC point.
MIR_CondCall.mutate0(p, PPC_BCL, null, null, I(TSR), PowerPCConditionOperand.NOT_EQUAL(), yieldpoint.makeJumpTarget());
p.getBasicBlock().insertOut(yieldpoint);
conditionalBranchCount++;
}
break;
case YIELDPOINT_BACKEDGE_opcode:
{
BasicBlock yieldpoint = findOrCreateYieldpointBlock(ir, RVMThread.BACKEDGE);
Register zero = phys.getGPR(0);
Register TSR = phys.getTSR();
Register TR = phys.getTR();
Offset offset = Entrypoints.takeYieldpointField.getOffset();
if (VM.VerifyAssertions)
VM._assert(fits(offset, 16));
p.insertBefore(MIR_Load.create(PPC_LInt, I(zero), A(TR), IC(PPCMaskLower16(offset))));
p.insertBefore(MIR_Binary.create(PPC_CMPI, I(TSR), I(zero), IC(0)));
instructionCount += 2;
// Because the GC Map code holds a reference to the original
// instruction, it is important that we mutate the last instruction
// because this will be the GC point.
MIR_CondCall.mutate0(p, PPC_BCL, null, null, I(TSR), PowerPCConditionOperand.GREATER(), yieldpoint.makeJumpTarget());
p.getBasicBlock().insertOut(yieldpoint);
conditionalBranchCount++;
}
break;
case YIELDPOINT_EPILOGUE_opcode:
{
BasicBlock yieldpoint = findOrCreateYieldpointBlock(ir, RVMThread.EPILOGUE);
Register zero = phys.getGPR(0);
Register TSR = phys.getTSR();
Register TR = phys.getTR();
Offset offset = Entrypoints.takeYieldpointField.getOffset();
if (VM.VerifyAssertions)
VM._assert(fits(offset, 16));
p.insertBefore(MIR_Load.create(PPC_LInt, I(zero), A(TR), IC(PPCMaskLower16(offset))));
p.insertBefore(MIR_Binary.create(PPC_CMPI, I(TSR), I(zero), IC(0)));
instructionCount += 2;
// Because the GC Map code holds a reference to the original
// instruction, it is important that we mutate the last instruction
// because this will be the GC point.
MIR_CondCall.mutate0(p, PPC_BCL, null, null, I(TSR), PowerPCConditionOperand.NOT_EQUAL(), yieldpoint.makeJumpTarget());
p.getBasicBlock().insertOut(yieldpoint);
conditionalBranchCount++;
}
break;
case YIELDPOINT_OSR_opcode:
{
// unconditionally branch to yield point.
BasicBlock yieldpoint = findOrCreateYieldpointBlock(ir, RVMThread.OSROPT);
// Because the GC Map code holds a reference to the original
// instruction, it is important that we mutate the last instruction
// because this will be the GC point.
MIR_Call.mutate0(p, PPC_BL, null, null, yieldpoint.makeJumpTarget());
p.getBasicBlock().insertOut(yieldpoint);
}
instructionCount++;
break;
default:
if (p.operator().isConditionalBranch()) {
conditionalBranchCount++;
} else {
instructionCount++;
}
break;
}
}
// reasonably sized methods
if ((instructionCount + conditionalBranchCount) > AssemblerOpt.MAX_COND_DISPL) {
machinecodeLength = instructionCount + 2 * conditionalBranchCount;
} else {
machinecodeLength = instructionCount + conditionalBranchCount;
}
if ((machinecodeLength & ~AssemblerOpt.MAX_24_BITS) != 0) {
throw new OptimizingCompilerException("CodeGen", "method too large to compile:", AssemblerOpt.MAX_24_BITS);
}
return machinecodeLength;
}
Aggregations