use of org.jikesrvm.compilers.opt.OptimizingCompilerException in project JikesRVM by JikesRVM.
the class FinalMIRExpansion method expand.
/**
* @param ir the IR to expand
* @return upperbound on number of machine code instructions
* that will be generated for this IR
*/
public static int expand(IR ir) {
int instructionCount = 0;
int conditionalBranchCount = 0;
int machinecodeLength = 0;
PhysicalRegisterSet phys = ir.regpool.getPhysicalRegisterSet().asPPC();
MachineCodeOffsets mcOffsets = ir.MIRInfo.mcOffsets;
for (Instruction p = ir.firstInstructionInCodeOrder(); p != null; p = p.nextInstructionInCodeOrder()) {
mcOffsets.setMachineCodeOffset(p, -1);
switch(p.getOpcode()) {
case MIR_LOWTABLESWITCH_opcode:
{
BasicBlock tableBlock = p.getBasicBlock();
BasicBlock nextBlock = tableBlock.splitNodeWithLinksAt(p.prevInstructionInCodeOrder(), ir);
mcOffsets.setMachineCodeOffset(nextBlock.firstInstruction(), -1);
Register regI = MIR_LowTableSwitch.getIndex(p).getRegister();
int NumTargets = MIR_LowTableSwitch.getNumberOfTargets(p);
tableBlock.appendInstruction(MIR_Call.create0(PPC_BL, null, null, nextBlock.makeJumpTarget()));
for (int i = 0; i < NumTargets; i++) {
tableBlock.appendInstruction(MIR_DataLabel.create(PPC_DATA_LABEL, MIR_LowTableSwitch.getClearTarget(p, i)));
}
Register temp = phys.getGPR(0);
p.insertBefore(MIR_Move.create(PPC_MFSPR, A(temp), A(phys.getLR())));
p.insertBefore(MIR_Binary.create(PPC_SLWI, I(regI), I(regI), IC(2)));
p.insertBefore(MIR_LoadUpdate.create(PPC_LIntUX, I(temp), I(regI), A(temp)));
p.insertBefore(MIR_Binary.create(PPC_ADD, A(regI), A(regI), I(temp)));
p.insertBefore(MIR_Move.create(PPC_MTSPR, A(phys.getCTR()), A(regI)));
MIR_Branch.mutate(p, PPC_BCTR);
instructionCount += NumTargets + 7;
}
break;
case PPC_BCOND2_opcode:
{
RegisterOperand cond = MIR_CondBranch2.getClearValue(p);
p.insertAfter(MIR_CondBranch.create(PPC_BCOND, cond.copyU2U(), MIR_CondBranch2.getClearCond2(p), MIR_CondBranch2.getClearTarget2(p), MIR_CondBranch2.getClearBranchProfile2(p)));
MIR_CondBranch.mutate(p, PPC_BCOND, cond, MIR_CondBranch2.getClearCond1(p), MIR_CondBranch2.getClearTarget1(p), MIR_CondBranch2.getClearBranchProfile1(p));
conditionalBranchCount++;
}
break;
case PPC_BLRL_opcode:
case PPC_BCTRL_opcode:
{
// See also ConvertToLowlevelIR.java
if (VM.BuildForIMTInterfaceInvocation) {
if (MIR_Call.hasMethod(p)) {
MethodOperand mo = MIR_Call.getMethod(p);
if (mo.isInterface()) {
InterfaceMethodSignature sig = InterfaceMethodSignature.findOrCreate(mo.getMemberRef());
int signatureId = sig.getId();
Instruction s;
if (fits(signatureId, 16)) {
s = MIR_Unary.create(PPC_LDI, I(phys.getGPR(LAST_SCRATCH_GPR)), IC(signatureId));
p.insertBefore(s);
instructionCount++;
} else {
s = MIR_Unary.create(PPC_LDIS, I(phys.getGPR(LAST_SCRATCH_GPR)), IC(PPCMaskUpper16(signatureId)));
p.insertBefore(s);
s = MIR_Binary.create(PPC_ADDI, I(phys.getGPR(LAST_SCRATCH_GPR)), I(phys.getGPR(LAST_SCRATCH_GPR)), IC(PPCMaskLower16(signatureId)));
p.insertBefore(s);
instructionCount += 2;
}
}
}
}
instructionCount++;
}
break;
case LABEL_opcode:
case BBEND_opcode:
case UNINT_BEGIN_opcode:
case UNINT_END_opcode:
// These generate no code, so don't count them.
break;
case RESOLVE_opcode:
{
Register zero = phys.getGPR(0);
Register JTOC = phys.getJTOC();
Register CTR = phys.getCTR();
if (VM.VerifyAssertions)
VM._assert(p.getBytecodeIndex() >= 0 && p.position() != null);
Offset offset = Entrypoints.optResolveMethod.getOffset();
if (fits(offset, 16)) {
p.insertBefore(MIR_Load.create(PPC_LAddr, A(zero), A(JTOC), IC(PPCMaskLower16(offset))));
} else {
// not implemented
if (VM.VerifyAssertions)
VM._assert(fits(offset, 32));
p.insertBefore(MIR_Binary.create(PPC_ADDIS, A(zero), A(JTOC), IC(PPCMaskUpper16(offset))));
p.insertBefore(MIR_Load.create(PPC_LAddr, A(zero), A(zero), IC(PPCMaskLower16(offset))));
instructionCount += 1;
}
p.insertBefore(MIR_Move.create(PPC_MTSPR, A(CTR), A(zero)));
instructionCount += 3;
// Because the GC Map code holds a reference to the original
// instruction, it is important that we mutate the last instruction
// because this will be the GC point.
MIR_Call.mutate0(p, PPC_BCTRL, null, null);
break;
}
case YIELDPOINT_PROLOGUE_opcode:
{
Register TSR = phys.getTSR();
BasicBlock yieldpoint = findOrCreateYieldpointBlock(ir, RVMThread.PROLOGUE);
// Because the GC Map code holds a reference to the original
// instruction, it is important that we mutate the last instruction
// because this will be the GC point.
MIR_CondCall.mutate0(p, PPC_BCL, null, null, I(TSR), PowerPCConditionOperand.NOT_EQUAL(), yieldpoint.makeJumpTarget());
p.getBasicBlock().insertOut(yieldpoint);
conditionalBranchCount++;
}
break;
case YIELDPOINT_BACKEDGE_opcode:
{
BasicBlock yieldpoint = findOrCreateYieldpointBlock(ir, RVMThread.BACKEDGE);
Register zero = phys.getGPR(0);
Register TSR = phys.getTSR();
Register TR = phys.getTR();
Offset offset = Entrypoints.takeYieldpointField.getOffset();
if (VM.VerifyAssertions)
VM._assert(fits(offset, 16));
p.insertBefore(MIR_Load.create(PPC_LInt, I(zero), A(TR), IC(PPCMaskLower16(offset))));
p.insertBefore(MIR_Binary.create(PPC_CMPI, I(TSR), I(zero), IC(0)));
instructionCount += 2;
// Because the GC Map code holds a reference to the original
// instruction, it is important that we mutate the last instruction
// because this will be the GC point.
MIR_CondCall.mutate0(p, PPC_BCL, null, null, I(TSR), PowerPCConditionOperand.GREATER(), yieldpoint.makeJumpTarget());
p.getBasicBlock().insertOut(yieldpoint);
conditionalBranchCount++;
}
break;
case YIELDPOINT_EPILOGUE_opcode:
{
BasicBlock yieldpoint = findOrCreateYieldpointBlock(ir, RVMThread.EPILOGUE);
Register zero = phys.getGPR(0);
Register TSR = phys.getTSR();
Register TR = phys.getTR();
Offset offset = Entrypoints.takeYieldpointField.getOffset();
if (VM.VerifyAssertions)
VM._assert(fits(offset, 16));
p.insertBefore(MIR_Load.create(PPC_LInt, I(zero), A(TR), IC(PPCMaskLower16(offset))));
p.insertBefore(MIR_Binary.create(PPC_CMPI, I(TSR), I(zero), IC(0)));
instructionCount += 2;
// Because the GC Map code holds a reference to the original
// instruction, it is important that we mutate the last instruction
// because this will be the GC point.
MIR_CondCall.mutate0(p, PPC_BCL, null, null, I(TSR), PowerPCConditionOperand.NOT_EQUAL(), yieldpoint.makeJumpTarget());
p.getBasicBlock().insertOut(yieldpoint);
conditionalBranchCount++;
}
break;
case YIELDPOINT_OSR_opcode:
{
// unconditionally branch to yield point.
BasicBlock yieldpoint = findOrCreateYieldpointBlock(ir, RVMThread.OSROPT);
// Because the GC Map code holds a reference to the original
// instruction, it is important that we mutate the last instruction
// because this will be the GC point.
MIR_Call.mutate0(p, PPC_BL, null, null, yieldpoint.makeJumpTarget());
p.getBasicBlock().insertOut(yieldpoint);
}
instructionCount++;
break;
default:
if (p.operator().isConditionalBranch()) {
conditionalBranchCount++;
} else {
instructionCount++;
}
break;
}
}
// reasonably sized methods
if ((instructionCount + conditionalBranchCount) > AssemblerOpt.MAX_COND_DISPL) {
machinecodeLength = instructionCount + 2 * conditionalBranchCount;
} else {
machinecodeLength = instructionCount + conditionalBranchCount;
}
if ((machinecodeLength & ~AssemblerOpt.MAX_24_BITS) != 0) {
throw new OptimizingCompilerException("CodeGen", "method too large to compile:", AssemblerOpt.MAX_24_BITS);
}
return machinecodeLength;
}
use of org.jikesrvm.compilers.opt.OptimizingCompilerException in project JikesRVM by JikesRVM.
the class BURS_MemOp_Helpers method augmentAddress.
protected final void augmentAddress(Operand op) {
if (VM.VerifyAssertions)
VM._assert(AddrStack != null, "No address to augment");
if (op.isRegister()) {
RegisterOperand rop = op.asRegister();
if (AddrStack.base == null) {
AddrStack.base = rop;
} else if (AddrStack.index == null) {
if (VM.VerifyAssertions)
VM._assert(AddrStack.scale == (byte) 0);
AddrStack.index = rop;
} else {
throw new OptimizingCompilerException("three base registers in address");
}
} else {
if (VM.fullyBooted) {
if (VM.BuildFor64Addr && op instanceof IntConstantOperand)
throw new OptimizingCompilerException("augmenting int to address in 64bit code");
if (VM.BuildFor32Addr && op instanceof LongConstantOperand)
throw new OptimizingCompilerException("augmenting long to address in 32bit code");
}
long dispTemp = op instanceof LongConstantOperand ? ((LongConstantOperand) op).value : ((IntConstantOperand) op).value;
if (VM.VerifyAssertions && VM.BuildFor32Addr)
opt_assert(fits(dispTemp, 32));
Offset disp = Offset.fromLong(dispTemp);
AddrStack.displacement = AddrStack.displacement.plus(disp);
}
}
use of org.jikesrvm.compilers.opt.OptimizingCompilerException in project JikesRVM by JikesRVM.
the class OptTestHarness method compileMethodsInVector.
private void compileMethodsInVector() {
// Compile all baseline methods first
int size = baselineMethodVector.size();
output.sysOutPrintln("Compiling " + size + " methods baseline");
// Compile all methods in baseline vector
for (int i = 0; i < size; i++) {
NormalMethod method = (NormalMethod) baselineMethodVector.get(i);
CompiledMethod cm = null;
cm = BaselineCompiler.compile(method);
method.replaceCompiledMethod(cm);
if (printCodeAddress) {
output.sysOutPrintln(compiledMethodMessage(method));
}
}
// Now compile all methods in opt vector
size = optMethodVector.size();
output.sysOutPrintln("Compiling " + size + " methods opt");
for (int i = 0; i < size; i++) {
NormalMethod method = (NormalMethod) optMethodVector.get(i);
OptOptions opts = optOptionsVector.get(i);
try {
CompiledMethod cm = null;
CompilationPlan cp = new CompilationPlan(method, OptimizationPlanner.createOptimizationPlan(opts), null, opts);
cm = OptimizingCompiler.compile(cp);
method.replaceCompiledMethod(cm);
if (printCodeAddress) {
output.sysOutPrintln(compiledMethodMessage(method));
}
} catch (OptimizingCompilerException e) {
if (e.isFatal && VM.ErrorsFatal) {
e.printStackTrace();
VM.sysFail("Internal vm error: " + e);
} else {
output.sysErrPrintln("SKIPPING opt-compilation of " + method + ":\n " + e.getMessage());
if (opts.PRINT_METHOD) {
e.printStackTrace();
}
}
}
}
}
use of org.jikesrvm.compilers.opt.OptimizingCompilerException in project JikesRVM by JikesRVM.
the class BranchOptimizations method booleanCompareHelper.
/**
* Generate a boolean operation opcode
*
* <pre>
* 1) IF br != 0 THEN x=1 ELSE x=0 replaced by INT_MOVE x=br
* IF br == 0 THEN x=0 ELSE x=1
* 2) IF br == 0 THEN x=1 ELSE x=0 replaced by BOOLEAN_NOT x=br
* IF br != 0 THEN x=0 ELSE x=1
* 3) IF v1 ~ v2 THEN x=1 ELSE x=0 replaced by BOOLEAN_CMP x=v1,v2,~
* </pre>
*
* @param cb conditional branch instruction
* @param res the operand for result
* @param val1 value being compared
* @param val2 value being compared with
* @param cond comparison condition
*/
private void booleanCompareHelper(Instruction cb, RegisterOperand res, Operand val1, Operand val2, ConditionOperand cond) {
if ((val1 instanceof RegisterOperand) && ((RegisterOperand) val1).getType().isBooleanType() && (val2 instanceof IntConstantOperand)) {
int value = ((IntConstantOperand) val2).value;
if (VM.VerifyAssertions && (value != 0) && (value != 1)) {
throw new OptimizingCompilerException("Invalid boolean value");
}
int c = cond.evaluate(value, 0);
if (c == ConditionOperand.TRUE) {
Unary.mutate(cb, BOOLEAN_NOT, res, val1);
return;
} else if (c == ConditionOperand.FALSE) {
Move.mutate(cb, INT_MOVE, res, val1);
return;
}
}
BooleanCmp.mutate(cb, (cb.operator() == REF_IFCMP) ? BOOLEAN_CMP_ADDR : BOOLEAN_CMP_INT, res, val1, val2, cond, new BranchProfileOperand());
}
use of org.jikesrvm.compilers.opt.OptimizingCompilerException in project JikesRVM by JikesRVM.
the class OptimizingCompiler method fail.
/**
* Abort a compilation with an error.
* @param e The exception thrown by a compiler phase
* @param method The method being compiled
*/
private static void fail(Throwable e, NormalMethod method) {
OptimizingCompilerException optExn = new OptimizingCompilerException("Compiler", "failure during compilation of", method.toString());
if (e instanceof OutOfMemoryError) {
VM.sysWriteln("Compiler ran out of memory during compilation of ", method.toString());
optExn.isFatal = false;
} else {
VM.sysWriteln("Compiler failure during compilation of ", method.toString());
e.printStackTrace();
}
throw optExn;
}
Aggregations