use of org.jikesrvm.compilers.opt.ir.Instruction in project JikesRVM by JikesRVM.
the class InstrumentationSamplingFramework method prependStore.
/**
* Append a store of the global counter to the given basic block.
*
* WARNING: Tested in LIR only!
*
* @param bb The block to append the load to
* @param ir The IR
*/
private void prependStore(BasicBlock bb, IR ir) {
if (DEBUG)
VM.sysWriteln("Adding store to " + bb);
Instruction store = null;
if (ir.options.ADAPTIVE_PROCESSOR_SPECIFIC_COUNTER) {
store = Store.create(INT_STORE, cbsReg.copyRO(), ir.regpool.makeTROp(), IRTools.AC(AosEntrypoints.threadCBSField.getOffset()), new LocationOperand(AosEntrypoints.threadCBSField));
bb.prependInstruction(store);
} else {
if (ir.isHIR()) {
store = PutStatic.create(PUTSTATIC, cbsReg.copyRO(), new AddressConstantOperand(AosEntrypoints.globalCBSField.getOffset()), new LocationOperand(AosEntrypoints.globalCBSField));
bb.prependInstruction(store);
} else {
Instruction dummy = Load.create(INT_LOAD, null, null, null, null);
bb.prependInstruction(dummy);
store = Store.create(INT_STORE, cbsReg.copyRO(), ir.regpool.makeJTOCOp(), IRTools.AC(AosEntrypoints.globalCBSField.getOffset()), new LocationOperand(AosEntrypoints.globalCBSField));
dummy.insertBefore(store);
dummy.remove();
}
}
}
use of org.jikesrvm.compilers.opt.ir.Instruction in project JikesRVM by JikesRVM.
the class LowerInstrumentation method lowerInstrumentation.
/**
* Actually perform the lowering
*
* @param ir the governing IR
*/
static void lowerInstrumentation(IR ir) {
/*
for (Enumeration<BasicBlock> bbe = ir.getBasicBlocks();
bbe.hasMoreElements(); ) {
BasicBlock bb = bbe.nextElement();
bb.printExtended();
}
*/
ArrayList<Instruction> instrumentedInstructions = new ArrayList<Instruction>();
// enumeration.
for (Enumeration<BasicBlock> bbe = ir.getBasicBlocks(); bbe.hasMoreElements(); ) {
BasicBlock bb = bbe.nextElement();
Instruction i = bb.firstInstruction();
while (i != null && i != bb.lastInstruction()) {
if (i.operator() == INSTRUMENTED_EVENT_COUNTER) {
instrumentedInstructions.add(i);
}
i = i.nextInstructionInCodeOrder();
}
}
// the counter manager to convert them into real instructions
for (final Instruction i : instrumentedInstructions) {
// Have the counter manager for this data convert this into the
// actual counting code. For now, we'll hard code the counter
// manager. Ideally it should be stored in the instruction,
// (to allow multiple counter managers. It would also make this
// code independent of the adaptive system..)
InstrumentedEventCounterManager counterManager = Instrumentation.eventCounterManager;
counterManager.mutateOptEventCounterInstruction(i, ir);
}
/*
for (Enumeration<BasicBlock> bbe = ir.getBasicBlocks();
bbe.hasMoreElements(); ) {
BasicBlock bb = bbe.nextElement();
bb.printExtended();
}
*/
}
use of org.jikesrvm.compilers.opt.ir.Instruction in project JikesRVM by JikesRVM.
the class FinalMIRExpansion method expand.
/**
* @param ir the IR to expand
* @return return value is garbage for IA32
*/
public static int expand(IR ir) {
PhysicalRegisterSet phys = ir.regpool.getPhysicalRegisterSet().asIA32();
MachineCodeOffsets mcOffsets = ir.MIRInfo.mcOffsets;
for (Instruction next, p = ir.firstInstructionInCodeOrder(); p != null; p = next) {
next = p.nextInstructionInCodeOrder();
mcOffsets.setMachineCodeOffset(p, -1);
switch(p.getOpcode()) {
case IA32_MOVAPS_opcode:
// a reg-reg move turned into a memory move where we can't guarantee alignment
if (MIR_Move.getResult(p).isMemory() || MIR_Move.getValue(p).isMemory()) {
MIR_Move.mutate(p, IA32_MOVSS, MIR_Move.getClearResult(p), MIR_Move.getClearValue(p));
}
break;
case IA32_MOVAPD_opcode:
// a reg-reg move turned into a memory move where we can't guarantee alignment
if (MIR_Move.getResult(p).isMemory() || MIR_Move.getValue(p).isMemory()) {
MIR_Move.mutate(p, IA32_MOVSD, MIR_Move.getClearResult(p), MIR_Move.getClearValue(p));
}
break;
case IA32_TEST_opcode:
// must be first; we can just commute it here.
if (MIR_Test.getVal2(p).isMemory()) {
Operand tmp = MIR_Test.getClearVal1(p);
MIR_Test.setVal1(p, MIR_Test.getClearVal2(p));
MIR_Test.setVal2(p, tmp);
}
break;
case NULL_CHECK_opcode:
{
// mutate this into a TRAPIF, and then fall through to the the
// TRAP_IF case.
Operand ref = NullCheck.getRef(p);
MIR_TrapIf.mutate(p, IA32_TRAPIF, null, ref.copy(), IC(0), IA32ConditionOperand.EQ(), TrapCodeOperand.NullPtr());
}
// There is no break statement here on purpose!
case IA32_TRAPIF_opcode:
{
// split the basic block right before the IA32_TRAPIF
BasicBlock thisBlock = p.getBasicBlock();
BasicBlock trap = thisBlock.createSubBlock(p.getBytecodeIndex(), ir, 0f);
thisBlock.insertOut(trap);
BasicBlock nextBlock = thisBlock.splitNodeWithLinksAt(p, ir);
thisBlock.insertOut(trap);
TrapCodeOperand tc = MIR_TrapIf.getClearTrapCode(p);
p.remove();
mcOffsets.setMachineCodeOffset(nextBlock.firstInstruction(), -1);
// add code to thisBlock to conditionally jump to trap
Instruction cmp = MIR_Compare.create(IA32_CMP, MIR_TrapIf.getVal1(p).copy(), MIR_TrapIf.getVal2(p).copy());
if (p.isMarkedAsPEI()) {
// The trap if was explictly marked, which means that it has
// a memory operand into which we've folded a null check.
// Actually need a GC map for both the compare and the INT.
cmp.markAsPEI();
cmp.copyPosition(p);
ir.MIRInfo.gcIRMap.insertTwin(p, cmp);
}
thisBlock.appendInstruction(cmp);
thisBlock.appendInstruction(MIR_CondBranch.create(IA32_JCC, (IA32ConditionOperand) MIR_TrapIf.getCond(p).copy(), trap.makeJumpTarget(), null));
// add block at end to hold trap instruction, and
// insert trap sequence
ir.cfg.addLastInCodeOrder(trap);
if (tc.isArrayBounds()) {
// attempt to store index expression in processor object for
// C trap handler
Operand index = MIR_TrapIf.getVal2(p);
if (!(index instanceof RegisterOperand || index instanceof IntConstantOperand)) {
// index was spilled, and
index = IC(0xdeadbeef);
// we can't get it back here.
}
MemoryOperand mo = MemoryOperand.BD(ir.regpool.makeTROp(), ArchEntrypoints.arrayIndexTrapParamField.getOffset(), (byte) 4, null, null);
trap.appendInstruction(MIR_Move.create(IA32_MOV, mo, index.copy()));
}
// NOTE: must make p the trap instruction: it is the GC point!
// IMPORTANT: must also inform the GCMap that the instruction has
// been moved!!!
trap.appendInstruction(MIR_Trap.mutate(p, IA32_INT, null, tc));
ir.MIRInfo.gcIRMap.moveToEnd(p);
if (tc.isStackOverflow()) {
// only stackoverflow traps resume at next instruction.
trap.appendInstruction(MIR_Branch.create(IA32_JMP, nextBlock.makeJumpTarget()));
}
}
break;
case IA32_FMOV_ENDING_LIVE_RANGE_opcode:
{
Operand result = MIR_Move.getResult(p);
Operand value = MIR_Move.getValue(p);
if (result.isRegister() && value.isRegister()) {
if (result.similar(value)) {
// eliminate useless move
p.remove();
} else {
int i = PhysicalRegisterSet.getFPRIndex(result.asRegister().getRegister());
int j = PhysicalRegisterSet.getFPRIndex(value.asRegister().getRegister());
if (i == 0) {
MIR_XChng.mutate(p, IA32_FXCH, result, value);
} else if (j == 0) {
MIR_XChng.mutate(p, IA32_FXCH, value, result);
} else {
expandFmov(p, phys);
}
}
} else {
expandFmov(p, phys);
}
break;
}
case DUMMY_DEF_opcode:
case DUMMY_USE_opcode:
case REQUIRE_ESP_opcode:
case ADVISE_ESP_opcode:
p.remove();
break;
case IA32_FMOV_opcode:
expandFmov(p, phys);
break;
case IA32_MOV_opcode:
// Convert 0L to 0 to allow optimization into XOR.
if (MIR_Move.getResult(p).isRegister() && MIR_Move.getValue(p).isLongConstant() && MIR_Move.getValue(p).asLongConstant().value == 0L) {
MIR_Move.setValue(p, IC(0));
}
// Replace result = IA32_MOV 0 with result = IA32_XOR result, result
if (MIR_Move.getResult(p).isRegister() && MIR_Move.getValue(p).isIntConstant() && MIR_Move.getValue(p).asIntConstant().value == 0) {
// Calculate what flags are defined in coming instructions before a use of a flag or BBend
Instruction x = next;
int futureDefs = 0;
while (!BBend.conforms(x) && !PhysicalDefUse.usesEFLAGS(x.operator())) {
futureDefs |= x.operator().implicitDefs;
x = x.nextInstructionInCodeOrder();
}
// If the flags will be destroyed prior to use or we reached the end of the basic block
if (BBend.conforms(x) || (futureDefs & PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF) == PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF) {
Operand result = MIR_Move.getClearResult(p);
MIR_BinaryAcc.mutate(p, IA32_XOR, result, result.copy());
}
}
break;
case IA32_SET__B_opcode:
// Replace <cmp>, set__b, movzx__b with xor, <cmp>, set__b
if (MIR_Set.getResult(p).isRegister() && MIR_Unary.conforms(next) && (next.operator() == IA32_MOVZX__B) && MIR_Unary.getResult(next).isRegister() && MIR_Unary.getVal(next).similar(MIR_Unary.getResult(next)) && MIR_Unary.getVal(next).similar(MIR_Set.getResult(p))) {
// Find instruction in this basic block that defines flags
Instruction x = p.prevInstructionInCodeOrder();
Operand result = MIR_Unary.getResult(next);
boolean foundCmp = false;
outer: while (!Label.conforms(x)) {
Enumeration<Operand> e = x.getUses();
while (e.hasMoreElements()) {
// used by the <cmp> or intervening instruction
if (e.nextElement().similar(result)) {
break outer;
}
}
if (PhysicalDefUse.definesEFLAGS(x.operator()) && !PhysicalDefUse.usesEFLAGS(x.operator())) {
// we found a <cmp> that doesn't use the result or the flags
// that would be clobbered by the xor
foundCmp = true;
break outer;
}
x = x.prevInstructionInCodeOrder();
}
if (foundCmp) {
// We found the <cmp>, mutate the movzx__b into an xor and insert it before the <cmp>
next.remove();
MIR_BinaryAcc.mutate(next, IA32_XOR, result, MIR_Unary.getVal(next));
x.insertBefore(next);
// get ready for the next instruction
next = p.nextInstructionInCodeOrder();
}
}
break;
case IA32_LEA_opcode:
{
// Sometimes we're over eager in BURS in using LEAs and after register
// allocation we can simplify to the accumulate form
// replace reg1 = LEA [reg1 + reg2] with reg1 = reg1 + reg2
// replace reg1 = LEA [reg1 + c1] with reg1 = reg1 + c1
// replace reg1 = LEA [reg1 << c1] with reg1 = reg1 << c1
MemoryOperand value = MIR_Lea.getValue(p);
RegisterOperand result = MIR_Lea.getResult(p);
if ((value.base != null && value.base.getRegister() == result.getRegister()) || (value.index != null && value.index.getRegister() == result.getRegister())) {
// Calculate what flags are defined in coming instructions before a use of a flag or BBend
Instruction x = next;
int futureDefs = 0;
while (!BBend.conforms(x) && !PhysicalDefUse.usesEFLAGS(x.operator())) {
futureDefs |= x.operator().implicitDefs;
x = x.nextInstructionInCodeOrder();
}
// If the flags will be destroyed prior to use or we reached the end of the basic block
if (BBend.conforms(x) || (futureDefs & PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF) == PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF) {
if (value.base != null && value.index != null && value.index.getRegister() == result.getRegister() && value.disp.isZero() && value.scale == 0) {
// reg1 = lea [base + reg1] -> add reg1, base
MIR_BinaryAcc.mutate(p, IA32_ADD, result, value.base);
} else if (value.base != null && value.base.getRegister() == result.getRegister() && value.index != null && value.disp.isZero() && value.scale == 0) {
// reg1 = lea [reg1 + index] -> add reg1, index
MIR_BinaryAcc.mutate(p, IA32_ADD, result, value.index);
} else if (value.base != null && value.base.getRegister() == result.getRegister() && value.index == null) {
if (VM.VerifyAssertions)
VM._assert(fits(value.disp, 32));
// reg1 = lea [reg1 + disp] -> add reg1, disp
MIR_BinaryAcc.mutate(p, IA32_ADD, result, IC(value.disp.toInt()));
} else if (value.base == null && value.index != null && value.index.getRegister() == result.getRegister() && value.scale == 0) {
if (VM.VerifyAssertions)
VM._assert(fits(value.disp, 32));
// reg1 = lea [reg1 + disp] -> add reg1, disp
MIR_BinaryAcc.mutate(p, IA32_ADD, result, IC(value.disp.toInt()));
} else if (value.base == null && value.index != null && value.index.getRegister() == result.getRegister() && value.disp.isZero()) {
// reg1 = lea [reg1 << scale] -> shl reg1, scale
if (value.scale == 0) {
p.remove();
} else if (value.scale == 1) {
MIR_BinaryAcc.mutate(p, IA32_ADD, result, value.index);
} else {
MIR_BinaryAcc.mutate(p, IA32_SHL, result, IC(value.scale));
}
}
}
}
}
break;
case IA32_FCLEAR_opcode:
expandFClear(p, ir);
break;
case IA32_JCC2_opcode:
p.insertBefore(MIR_CondBranch.create(IA32_JCC, MIR_CondBranch2.getClearCond1(p), MIR_CondBranch2.getClearTarget1(p), MIR_CondBranch2.getClearBranchProfile1(p)));
MIR_CondBranch.mutate(p, IA32_JCC, MIR_CondBranch2.getClearCond2(p), MIR_CondBranch2.getClearTarget2(p), MIR_CondBranch2.getClearBranchProfile2(p));
break;
case CALL_SAVE_VOLATILE_opcode:
p.changeOperatorTo(IA32_CALL);
break;
case IA32_LOCK_CMPXCHG_opcode:
p.insertBefore(MIR_Empty.create(IA32_LOCK));
p.changeOperatorTo(IA32_CMPXCHG);
break;
case IA32_LOCK_CMPXCHG8B_opcode:
p.insertBefore(MIR_Empty.create(IA32_LOCK));
p.changeOperatorTo(IA32_CMPXCHG8B);
break;
case YIELDPOINT_PROLOGUE_opcode:
expandYieldpoint(p, ir, Entrypoints.optThreadSwitchFromPrologueMethod, IA32ConditionOperand.NE());
break;
case YIELDPOINT_EPILOGUE_opcode:
expandYieldpoint(p, ir, Entrypoints.optThreadSwitchFromEpilogueMethod, IA32ConditionOperand.NE());
break;
case YIELDPOINT_BACKEDGE_opcode:
expandYieldpoint(p, ir, Entrypoints.optThreadSwitchFromBackedgeMethod, IA32ConditionOperand.GT());
break;
case YIELDPOINT_OSR_opcode:
// must yield, does not check threadSwitch request
expandUnconditionalYieldpoint(p, ir, Entrypoints.optThreadSwitchFromOsrOptMethod);
break;
}
}
return 0;
}
use of org.jikesrvm.compilers.opt.ir.Instruction in project JikesRVM by JikesRVM.
the class AssemblerOpt method backpatchForwardBranches.
/**
* Back-patches any forward branches to the given instruction.
* <p>
* Note: The updated index into the machine code array would normally need
* to be returned but this method currently does not modify the index.
*
* @param branchTarget the LABEL instruction to process
* @param machinecodes machine code array
* @param machineCodeIndex current index into the machine code array
* @param mcOffsets machine code offsets
*/
private void backpatchForwardBranches(Instruction branchTarget, CodeArray machinecodes, int machineCodeIndex, MachineCodeOffsets mcOffsets) {
Iterator<Instruction> branchSources = branchBackPatching.getBranchSources(branchTarget);
while (branchSources.hasNext()) {
Instruction branchStmt = branchSources.next();
int bo = mcOffsets.getMachineCodeOffset(branchStmt) - (1 << LG_INSTRUCTION_WIDTH);
int bi = bo >> LG_INSTRUCTION_WIDTH;
int targetOffset = (machineCodeIndex - bi) << LG_INSTRUCTION_WIDTH;
boolean setLink = false;
if (targetOffset > MAX_DISPL << LG_INSTRUCTION_WIDTH) {
throw new OptimizingCompilerException("CodeGen", "Branch positive offset too large: ", targetOffset);
}
switch(branchStmt.getOpcode()) {
case PPC_B_opcode:
case PPC_BL_opcode:
machinecodes.set(bi, machinecodes.get(bi) | targetOffset & LI_MASK);
break;
case PPC_DATA_LABEL_opcode:
machinecodes.set(bi, targetOffset);
break;
// of target offset, and will fail if it is out of range
case IG_PATCH_POINT_opcode:
// do nothing
break;
case PPC_BCL_opcode:
setLink = true;
// fall through!
default:
// conditional branches
if (targetOffset <= MAX_COND_DISPL << 2) {
// one word is enough
machinecodes.set(bi, machinecodes.get(bi) | targetOffset & BD_MASK);
if (DEBUG) {
VM.sysWriteln("**** Forward Short Cond. Branch ****");
VM.sysWriteln(disasm(machinecodes.get(bi), 0));
}
} else {
// one word is not enough
// we're moving the "real" branch ahead 1 instruction
// if it's a GC point (eg BCL for yieldpoint) then we must
// make sure the GCMap is generated at the correct mc offset.
int oldOffset = mcOffsets.getMachineCodeOffset(branchStmt);
mcOffsets.setMachineCodeOffset(branchStmt, oldOffset + (1 << LG_INSTRUCTION_WIDTH));
// flip the condition and skip the next branch instruction
machinecodes.set(bi, flipCondition(machinecodes.get(bi)));
machinecodes.set(bi, machinecodes.get(bi) | (2 << LG_INSTRUCTION_WIDTH));
// turn off link bit.
machinecodes.set(bi, machinecodes.get(bi) & 0xfffffffe);
// make a long branch
machinecodes.set(bi + 1, Btemplate | ((targetOffset - 4) & LI_MASK));
if (setLink) {
// turn on link bit.
machinecodes.set(bi + 1, machinecodes.get(bi + 1) | 1);
}
if (DEBUG) {
VM.sysWriteln("**** Forward Long Cond. Branch ****");
VM.sysWriteln(disasm(machinecodes.get(bi), 0));
VM.sysWriteln(disasm(machinecodes.get(bi + 1), 0));
}
}
break;
}
unresolvedBranches--;
}
mcOffsets.setMachineCodeOffset(branchTarget, machineCodeIndex << LG_INSTRUCTION_WIDTH);
}
use of org.jikesrvm.compilers.opt.ir.Instruction in project JikesRVM by JikesRVM.
the class AssemblerOpt method genCode.
protected final int genCode(IR ir, boolean shouldPrint) {
int mi = 0;
CodeArray machinecodes = ir.MIRInfo.machinecode;
PhysicalRegisterSet phys = ir.regpool.getPhysicalRegisterSet().asPPC();
int labelCountEstimate = ir.cfg.numberOfNodes();
branchBackPatching = new BranchInformationForBackPatching(labelCountEstimate);
boolean unsafeCondDispl = machinecodes.length() > MAX_COND_DISPL;
// boolean unsafeDispl = machinecodes.length() > MAX_DISPL;
MachineCodeOffsets mcOffsets = ir.MIRInfo.mcOffsets;
for (Instruction p = ir.firstInstructionInCodeOrder(); p != null; p = p.nextInstructionInCodeOrder()) {
int inst = p.operator().instTemplate();
switch(p.getOpcode()) {
case LABEL_opcode:
backpatchForwardBranches(p, machinecodes, mi, mcOffsets);
break;
case BBEND_opcode:
case UNINT_BEGIN_opcode:
case UNINT_END_opcode:
case GUARD_MOVE_opcode:
case GUARD_COMBINE_opcode:
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
break;
case PPC_DATA_INT_opcode:
{
int value = MIR_DataInt.getValue(p).value;
machinecodes.set(mi++, value);
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_DATA_LABEL_opcode:
{
Instruction target = MIR_DataLabel.getTarget(p).target;
int targetOffset = resolveBranch(p, target, mi, mcOffsets);
machinecodes.set(mi++, targetOffset);
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_CRAND_opcode:
case PPC_CRANDC_opcode:
case PPC_CROR_opcode:
case PPC_CRORC_opcode:
{
int op0 = MIR_Condition.getResultBit(p).value & REG_MASK;
int op1 = MIR_Condition.getValue1Bit(p).value & REG_MASK;
int op2 = MIR_Condition.getValue2Bit(p).value & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_ADD_opcode:
case PPC_ADDr_opcode:
case PPC_ADDC_opcode:
case PPC_ADDE_opcode:
case PPC_SUBF_opcode:
case PPC_SUBFr_opcode:
case PPC_SUBFC_opcode:
case PPC_SUBFCr_opcode:
case PPC_SUBFE_opcode:
case PPC_FADD_opcode:
case PPC_FADDS_opcode:
case PPC_FDIV_opcode:
case PPC_FDIVS_opcode:
case PPC_DIVW_opcode:
case PPC_DIVWU_opcode:
case PPC_MULLW_opcode:
case PPC_MULHW_opcode:
case PPC_MULHWU_opcode:
case PPC_FSUB_opcode:
case PPC_FSUBS_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_MULLD_opcode:
case PPC64_DIVD_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_LWZX_opcode:
case PPC_LWARX_opcode:
case PPC_LBZX_opcode:
case PPC_LHAX_opcode:
case PPC_LHZX_opcode:
case PPC_LFDX_opcode:
case PPC_LFSX_opcode:
case PPC_LIntX_opcode:
case PPC_LAddrARX_opcode:
case PPC_LAddrX_opcode:
{
int op0 = MIR_Load.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Load.getAddress(p).getRegister().number & REG_MASK;
int op2 = MIR_Load.getOffset(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_LDX_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Load.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Load.getAddress(p).getRegister().number & REG_MASK;
int op2 = MIR_Load.getOffset(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_STWX_opcode:
case PPC_STWCXr_opcode:
case PPC_STBX_opcode:
case PPC_STHX_opcode:
case PPC_STFDX_opcode:
case PPC_STFSX_opcode:
case PPC_STAddrCXr_opcode:
case PPC_STAddrX_opcode:
case PPC_STAddrUX_opcode:
{
int op0 = MIR_Store.getValue(p).getRegister().number & REG_MASK;
int op1 = MIR_Store.getAddress(p).getRegister().number & REG_MASK;
int op2 = MIR_Store.getOffset(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_LWZUX_opcode:
case PPC_LBZUX_opcode:
case PPC_LIntUX_opcode:
case PPC_LAddrUX_opcode:
{
int op0 = MIR_LoadUpdate.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_LoadUpdate.getAddress(p).getRegister().number & REG_MASK;
int op2 = MIR_LoadUpdate.getOffset(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_LWZU_opcode:
{
int op0 = MIR_LoadUpdate.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_LoadUpdate.getAddress(p).getRegister().number & REG_MASK;
int op2 = MIR_LoadUpdate.getOffset(p).asIntConstant().value & SHORT_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_TW_opcode:
case PPC_TAddr_opcode:
{
int op0 = MIR_Trap.getCond(p).value;
int op1 = MIR_Trap.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Trap.getValue2(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_TD_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Trap.getCond(p).value;
int op1 = MIR_Trap.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Trap.getValue2(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_TWI_opcode:
{
int op0 = MIR_Trap.getCond(p).value;
int op1 = MIR_Trap.getValue1(p).getRegister().number & REG_MASK;
int op2;
if (VM.BuildFor64Addr && MIR_Trap.getValue2(p).isLongConstant()) {
op2 = ((int) MIR_Trap.getValue2(p).asLongConstant().value) & SHORT_MASK;
} else {
op2 = MIR_Trap.getValue2(p).asIntConstant().value & SHORT_MASK;
}
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_TDI_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Trap.getCond(p).value;
int op1 = MIR_Trap.getValue1(p).getRegister().number & REG_MASK;
int op2;
if (MIR_Trap.getValue2(p).isLongConstant()) {
op2 = ((int) MIR_Trap.getValue2(p).asLongConstant().value) & SHORT_MASK;
} else {
op2 = MIR_Trap.getValue2(p).asIntConstant().value & SHORT_MASK;
}
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case NULL_CHECK_opcode:
/* Just a nicer name for a twi <ref> lessthan 1 */
{
int op0 = PowerPCTrapOperand.LOWER;
int op1 = ((RegisterOperand) NullCheck.getRef(p)).getRegister().number & REG_MASK;
int op2 = 1;
inst = VM.BuildFor64Addr ? PPC64_TDI.instTemplate() : PPC_TWI.instTemplate();
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_LDI_opcode:
case PPC_LDIS_opcode:
// D_Form. pseudo instructions derived from PPC_ADDI and PPC_ADDIS
{
int op0 = MIR_Unary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Unary.getValue(p).asIntConstant().value & SHORT_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | op1));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_ADDIC_opcode:
case PPC_ADDICr_opcode:
case PPC_SUBFIC_opcode:
case PPC_MULLI_opcode:
case PPC_ADDI_opcode:
case PPC_ADDIS_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asIntConstant().value & SHORT_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_CNTLZW_opcode:
case PPC_CNTLZAddr_opcode:
case PPC_EXTSB_opcode:
case PPC_EXTSBr_opcode:
case PPC_EXTSH_opcode:
case PPC_EXTSHr_opcode:
{
int op0 = MIR_Unary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Unary.getValue(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_EXTSW_opcode:
case PPC64_EXTSWr_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Unary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Unary.getValue(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_EXTZW_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Unary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Unary.getValue(p).asRegister().getRegister().number & REG_MASK;
// op3low = 0, so op3 == 32
int op3high = 1;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op3high << 5)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_ADDZE_opcode:
case PPC_SUBFZE_opcode:
case PPC_NEG_opcode:
case PPC_NEGr_opcode:
case PPC_ADDME_opcode:
{
int op0 = MIR_Unary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Unary.getValue(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
// Bit positions of op1 and op2 are reversed.
case PPC_XORI_opcode:
case PPC_XORIS_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asIntConstant().value & SHORT_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
// Bit positions of op1 and op2 are reversed.
case PPC_AND_opcode:
case PPC_ANDr_opcode:
case PPC_NAND_opcode:
case PPC_NANDr_opcode:
case PPC_ANDC_opcode:
case PPC_ANDCr_opcode:
case PPC_OR_opcode:
case PPC_ORr_opcode:
case PPC_NOR_opcode:
case PPC_NORr_opcode:
case PPC_ORC_opcode:
case PPC_ORCr_opcode:
case PPC_XOR_opcode:
case PPC_XORr_opcode:
case PPC_EQV_opcode:
case PPC_EQVr_opcode:
case PPC_SLW_opcode:
case PPC_SLWr_opcode:
case PPC_SRW_opcode:
case PPC_SRWr_opcode:
case PPC_SRAW_opcode:
case PPC_SRAWr_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_SLD_opcode:
case PPC64_SLDr_opcode:
case PPC64_SRD_opcode:
case PPC64_SRDr_opcode:
case PPC64_SRAD_opcode:
case PPC64_SRADr_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_MOVE_opcode:
/* pseudo opcode, equal to PPC_ORI with 0 */
{
int op0 = MIR_Move.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Move.getValue(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_SRWI_opcode:
/* pseudo opcode, equal to rlwinm Rx,Ry,32-n,n,31 */
case PPC_SRWIr_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int shift = MIR_Binary.getValue2(p).asIntConstant().value & REG_MASK;
int op2 = (32 - shift);
int op3 = shift;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2 << 11) | (op3 << 6)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
// Bit positions of op1 and op2 are reversed.
case PPC_SLWI_opcode:
case PPC_SLWIr_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int shift = MIR_Binary.getValue2(p).asIntConstant().value & REG_MASK;
int op2 = shift;
int op3 = (31 - shift);
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2 << 11) | (op3 << 1)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_SRAWI_opcode:
case PPC_SRAWIr_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asIntConstant().value & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_SRAddrI_opcode:
{
if (VM.BuildFor32Addr) {
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int shift = MIR_Binary.getValue2(p).asIntConstant().value & REG_MASK;
int op2 = (32 - shift);
int op3 = shift;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2 << 11) | (op3 << 6)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
} else {
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op3 = MIR_Binary.getValue2(p).asIntConstant().value & SIXBIT_MASK;
int op2 = 64 - op3;
int op2low = op2 & 0x1F;
int op2high = (op2 & 0x20) >>> 5;
int op3low = op3 & 0x1F;
int op3high = (op3 & 0x20) >>> 5;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2low << 11) | (op2high << 1) | (op3low << 6) | (op3high << 5)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
}
break;
case PPC_SRAAddrI_opcode:
{
if (VM.BuildFor32Addr) {
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asIntConstant().value & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
} else {
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asIntConstant().value & SIXBIT_MASK;
int op2low = op2 & 0x1F;
int op2high = (op2 & 0x20) >>> 5;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2low << 11) | (op2high << 1)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
}
break;
case PPC64_SRADI_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asIntConstant().value & SIXBIT_MASK;
int op2low = op2 & 0x1F;
int op2high = (op2 & 0x20) >>> 5;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2low << 11) | (op2high << 1)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_SRDI_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op3 = MIR_Binary.getValue2(p).asIntConstant().value & SIXBIT_MASK;
int op2 = 64 - op3;
int op2low = op2 & 0x1F;
int op2high = (op2 & 0x20) >>> 5;
int op3low = op3 & 0x1F;
int op3high = (op3 & 0x20) >>> 5;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2low << 11) | (op2high << 1) | (op3low << 6) | (op3high << 5)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case // shorthand via RLDICR
PPC64_SLDI_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int shift = MIR_Binary.getValue2(p).asIntConstant().value & SIXBIT_MASK;
int op2 = shift;
int op2low = op2 & 0x1F;
int op2high = (op2 & 0x20) >>> 5;
int op3 = 63 - shift;
int op3low = op3 & 0x1F;
int op3high = (op3 & 0x20) >>> 5;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2low << 11) | (op2high << 1) | (op3low << 6) | (op3high << 5)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_RLDICR_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_RotateAndMask.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_RotateAndMask.getValue(p).getRegister().number & REG_MASK;
// shift
int op2 = MIR_RotateAndMask.getShift(p).asIntConstant().value & SIXBIT_MASK;
int op2low = op2 & 0x1F;
int op2high = (op2 & 0x20) >>> 5;
// mask
int op3 = MIR_RotateAndMask.getMaskEnd(p).value & SIXBIT_MASK;
int op3low = op3 & 0x1F;
int op3high = (op3 & 0x20) >>> 5;
if (VM.VerifyAssertions) {
int op4 = MIR_RotateAndMask.getMaskBegin(p).value & SIXBIT_MASK;
VM._assert(op4 == 0);
}
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2low << 11) | (op2high << 1) | (op3low << 6) | (op3high << 5)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_RLDICL_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_RotateAndMask.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_RotateAndMask.getValue(p).getRegister().number & REG_MASK;
// shift
int op2 = MIR_RotateAndMask.getShift(p).asIntConstant().value & SIXBIT_MASK;
int op2low = op2 & 0x1F;
int op2high = (op2 & 0x20) >>> 5;
// mask
int op3 = MIR_RotateAndMask.getMaskBegin(p).value & SIXBIT_MASK;
int op3low = op3 & 0x1F;
int op3high = (op3 & 0x20) >>> 5;
if (VM.VerifyAssertions) {
int op4 = MIR_RotateAndMask.getMaskEnd(p).value & SIXBIT_MASK;
VM._assert(op4 == 63);
}
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2low << 11) | (op2high << 1) | (op3low << 6) | (op3high << 5)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
// Bit positions of op1 and op2 are reversed.
case PPC_ANDIr_opcode:
case PPC_ANDISr_opcode:
case PPC_ORI_opcode:
case PPC_ORIS_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asIntConstant().value & SHORT_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_RLWINM_opcode:
case PPC_RLWINMr_opcode:
{
int op0 = MIR_RotateAndMask.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_RotateAndMask.getValue(p).getRegister().number & REG_MASK;
int op2 = MIR_RotateAndMask.getShift(p).asIntConstant().value & REG_MASK;
int op3 = MIR_RotateAndMask.getMaskBegin(p).value & REG_MASK;
int op4 = MIR_RotateAndMask.getMaskEnd(p).value & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2 << 11) | (op3 << 6) | (op4 << 1)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_RLWIMI_opcode:
case PPC_RLWIMIr_opcode:
{
int op0 = MIR_RotateAndMask.getResult(p).getRegister().number & REG_MASK;
int op0f = MIR_RotateAndMask.getSource(p).getRegister().number & REG_MASK;
if (op0 != op0f) {
throw new OptimizingCompilerException("CodeGen", "format for RLWIMI is incorrect");
}
int op1 = MIR_RotateAndMask.getValue(p).getRegister().number & REG_MASK;
int op2 = MIR_RotateAndMask.getShift(p).asIntConstant().value & REG_MASK;
int op3 = MIR_RotateAndMask.getMaskBegin(p).value & REG_MASK;
int op4 = MIR_RotateAndMask.getMaskEnd(p).value & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2 << 11) | (op3 << 6) | (op4 << 1)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_RLWNM_opcode:
case PPC_RLWNMr_opcode:
{
int op0 = MIR_RotateAndMask.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_RotateAndMask.getValue(p).getRegister().number & REG_MASK;
int op2 = MIR_RotateAndMask.getShift(p).asRegister().getRegister().number & REG_MASK;
int op3 = MIR_RotateAndMask.getMaskBegin(p).value & REG_MASK;
int op4 = MIR_RotateAndMask.getMaskEnd(p).value & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21) | (op2 << 11) | (op3 << 6) | (op4 << 1)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_B_opcode:
{
BranchOperand o = MIR_Branch.getTarget(p);
int targetOffset = resolveBranch(p, o.target, mi, mcOffsets);
machinecodes.set(mi++, inst | (targetOffset & LI_MASK));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_BLR_opcode:
case PPC_BCTR_opcode:
/* p , == bcctr 0x14,BI */
{
// INDIRECT BRANCH (Target == null)
machinecodes.set(mi++, inst);
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_BC_opcode:
case PPC_BCOND_opcode:
/* p 38, BO == 001zy or 011zy */
case PPC_BCC_opcode:
/* p 38, BO == 0000y, 0001y, 0100y or 0101y */
{
// COND BRANCH
int op0 = MIR_CondBranch.getValue(p).getRegister().number & REG_MASK;
int op1 = MIR_CondBranch.getCond(p).value;
// Add (CR field)<<2 to make BI represent the correct
// condition bit (0..3) in the correct condition field (0..7).
// 1 <= op <= 7
int bo_bi = op0 << 2 | op1;
BranchOperand o = MIR_CondBranch.getTarget(p);
int targetOffset = resolveBranch(p, o.target, mi, mcOffsets);
if (targetOffset == 0) {
// unresolved branch
if (DEBUG)
VM.sysWriteln("**** Forward Cond. Branch ****");
machinecodes.set(mi++, inst | (bo_bi << 16));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
if (DEBUG)
VM.sysWriteln(disasm(machinecodes.get(mi - 1), 0));
if (unsafeCondDispl) {
// assume we might need two words
// for now fill with NOP
machinecodes.set(mi++, NOPtemplate);
if (DEBUG)
VM.sysWriteln(disasm(machinecodes.get(mi - 1), 0));
}
} else if (targetOffset < MIN_COND_DISPL << 2) {
// one word is not enough
if (DEBUG)
VM.sysWriteln("**** Backward Long Cond. Branch ****");
// flip the condition and skip the following branch instruction
if (DEBUG)
VM.sysWriteln(disasm(machinecodes.get(mi - 1), 0));
machinecodes.set(mi++, inst | flipCondition(bo_bi << 16) | (2 << 2));
if (DEBUG)
VM.sysWriteln(disasm(machinecodes.get(mi - 1), 0));
// make a long branch to the target
machinecodes.set(mi++, Btemplate | ((targetOffset - 4) & LI_MASK));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
if (DEBUG)
VM.sysWriteln(disasm(machinecodes.get(mi - 1), 0));
} else {
// one word is enough
if (DEBUG)
VM.sysWriteln("**** Backward Short Cond. Branch ****");
machinecodes.set(mi++, inst | (bo_bi << 16) | (targetOffset & BD_MASK));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
if (DEBUG)
VM.sysWriteln(disasm(machinecodes.get(mi - 1), 0));
}
}
break;
case PPC_BCLR_opcode:
case PPC_BCCTR_opcode:
/* p , BO == 0z10y or 0z11y */
{
// INDIRECT COND BRANCH
int op0 = MIR_CondBranch.getValue(p).getRegister().number & REG_MASK;
int op1 = MIR_CondBranch.getCond(p).value;
// Add (CR field)<<2 to make BI represent the correct
// condition bit (0..3) in the correct condition field (0..7).
// 1 <= op <= 7
int bo_bi = op0 << 2 | op1;
machinecodes.set(mi++, inst | (bo_bi << 16));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
if (DEBUG)
VM.sysWrite(disasm(machinecodes.get(mi - 1), 0));
}
break;
case PPC_BL_opcode:
case PPC_BL_SYS_opcode:
{
// CALL
BranchOperand o = MIR_Call.getTarget(p);
int targetOffset = resolveBranch(p, o.target, mi, mcOffsets);
machinecodes.set(mi++, inst | (targetOffset & LI_MASK));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_BLRL_opcode:
/* p 39, == bclrl 0x14,BI */
case PPC_BCTRL_opcode:
/* p , == bcctrl 0x14,BI */
case PPC_BCTRL_SYS_opcode:
/* p , == bcctrl 0x14,BI */
{
// INDIRECT CALL (Target == null)
machinecodes.set(mi++, inst);
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_BCL_opcode:
{
// COND CALL
int op0 = MIR_CondCall.getValue(p).getRegister().number & REG_MASK;
int op1 = MIR_CondCall.getCond(p).value;
// Add (CR field)<<2 to make BI represent the correct
// condition bit (0..3) in the correct condition field (0..7).
// 1 <= op <= 7
int bo_bi = op0 << 2 | op1;
BranchOperand o = MIR_CondCall.getTarget(p);
int targetOffset = resolveBranch(p, o.target, mi, mcOffsets);
if (targetOffset == 0) {
// unresolved branch
if (DEBUG)
VM.sysWriteln("**** Forward Cond. Branch ****");
machinecodes.set(mi++, inst | (bo_bi << 16));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
if (DEBUG)
VM.sysWriteln(disasm(machinecodes.get(mi - 1), 0));
if (unsafeCondDispl) {
// assume we need two words
// for now fill with NOP
machinecodes.set(mi++, NOPtemplate);
if (DEBUG)
VM.sysWriteln(disasm(machinecodes.get(mi - 1), 0));
}
} else if (targetOffset < MIN_COND_DISPL << 2) {
// one instruction is not enough
throw new OperationNotImplementedException(// --dave
"Support for long backwards conditional branch and link is incorrect.");
/*
-- we have to branch (and not link) around an
unconditional branch and link.
-- the code below generates a conditional branch and
link around an unconditional branch.
if (DEBUG) VM.sysWriteln("**** Backward Long Cond. Branch ****");
// flip the condition and skip the following branch instruction
machinecodes.set(mi++, inst | flipCondition(bo_bi<<16) | (2<<2));
if (DEBUG) printInstruction(mi-1, inst,
flipCondition(bo_bi<<16), 2<<2);
// make a long branch to the target
machinecodes.set(mi++, Btemplate | ((targetOffset-4) & LI_MASK));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
if (DEBUG) printInstruction(mi-1, Btemplate, targetOffset-4);
*/
} else {
// one instruction is enough
if (DEBUG)
VM.sysWriteln("**** Backward Short Cond. Branch ****");
machinecodes.set(mi++, inst | (bo_bi << 16) | (targetOffset & BD_MASK));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
if (DEBUG)
VM.sysWrite(disasm(machinecodes.get(mi - 1), 0));
}
}
break;
case PPC_BCLRL_opcode:
{
// INDIRECT COND CALL
int op0 = MIR_CondCall.getValue(p).getRegister().number & REG_MASK;
int op1 = MIR_CondCall.getCond(p).value;
// Add (CR field)<<2 to make BI represent the correct
// condition bit (0..3) in the correct condition field (0..7).
// 1 <= op <= 7
int bo_bi = op0 << 2 | op1;
machinecodes.set(mi++, inst | (bo_bi << 16));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
if (DEBUG)
VM.sysWrite(disasm(machinecodes.get(mi - 1), 0));
}
break;
case PPC_CMP_opcode:
case PPC_CMPL_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 23) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_CMP_opcode:
case PPC64_CMPL_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 23) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_CMPI_opcode:
case PPC_CMPLI_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asIntConstant().value & SHORT_MASK;
machinecodes.set(mi++, (inst | (op0 << 23) | (op1 << 16) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_CMPI_opcode:
case PPC64_CMPLI_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asIntConstant().value & SHORT_MASK;
machinecodes.set(mi++, (inst | (op0 << 23) | (op1 << 16) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_FMR_opcode:
{
int op0 = MIR_Move.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Move.getValue(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_FABS_opcode:
case PPC_FNEG_opcode:
case PPC_FSQRT_opcode:
case PPC_FSQRTS_opcode:
case PPC_FRSP_opcode:
case PPC_FCTIW_opcode:
case PPC_FCTIWZ_opcode:
{
int op0 = MIR_Unary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Unary.getValue(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_FCFID_opcode:
case PPC64_FCTIDZ_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Unary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Unary.getValue(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_FCMPO_opcode:
case PPC_FCMPU_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 23) | (op1 << 16) | (op2 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_FMUL_opcode:
case PPC_FMULS_opcode:
{
int op0 = MIR_Binary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Binary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Binary.getValue2(p).asRegister().getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 6)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_FMADD_opcode:
case PPC_FMADDS_opcode:
case PPC_FMSUB_opcode:
case PPC_FMSUBS_opcode:
case PPC_FNMADD_opcode:
case PPC_FNMADDS_opcode:
case PPC_FNMSUB_opcode:
case PPC_FNMSUBS_opcode:
case PPC_FSEL_opcode:
{
int op0 = MIR_Ternary.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Ternary.getValue1(p).getRegister().number & REG_MASK;
int op2 = MIR_Ternary.getValue2(p).getRegister().number & REG_MASK;
int op3 = MIR_Ternary.getValue3(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | (op2 << 6) | (op3 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_LWZ_opcode:
case PPC_LBZ_opcode:
case PPC_LHA_opcode:
case PPC_LHZ_opcode:
case PPC_LFD_opcode:
case PPC_LFS_opcode:
case PPC_LMW_opcode:
{
int op0 = MIR_Load.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Load.getOffset(p).asIntConstant().value & SHORT_MASK;
int op2 = MIR_Load.getAddress(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | op1 | (op2 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_LD_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Load.getResult(p).getRegister().number & REG_MASK;
int op1 = (MIR_Load.getOffset(p).asIntConstant().value >> 2) & SHORT14_MASK;
int op2 = MIR_Load.getAddress(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 2) | (op2 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_LAddr_opcode:
case PPC_LInt_opcode:
{
if (VM.BuildFor32Addr) {
int op0 = MIR_Load.getResult(p).getRegister().number & REG_MASK;
int op1 = MIR_Load.getOffset(p).asIntConstant().value & SHORT_MASK;
int op2 = MIR_Load.getAddress(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | op1 | (op2 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
} else {
int op0 = MIR_Load.getResult(p).getRegister().number & REG_MASK;
int op1 = (MIR_Load.getOffset(p).asIntConstant().value >> 2) & SHORT14_MASK;
int op2 = MIR_Load.getAddress(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 2) | (op2 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
}
break;
case PPC_STW_opcode:
case PPC_STB_opcode:
case PPC_STH_opcode:
case PPC_STFD_opcode:
case PPC_STFS_opcode:
case PPC_STMW_opcode:
{
int op0 = MIR_Store.getValue(p).getRegister().number & REG_MASK;
int op1 = MIR_Store.getOffset(p).asIntConstant().value & SHORT_MASK;
int op2 = MIR_Store.getAddress(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | op1 | (op2 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_STWU_opcode:
case PPC_STFDU_opcode:
case PPC_STFSU_opcode:
{
int op0 = MIR_StoreUpdate.getValue(p).getRegister().number & REG_MASK;
int op1 = MIR_StoreUpdate.getAddress(p).getRegister().number & REG_MASK;
int op2 = MIR_StoreUpdate.getOffset(p).asIntConstant().value & SHORT_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC64_STD_opcode:
{
if (VM.VerifyAssertions)
VM._assert(VM.BuildFor64Addr);
int op0 = MIR_Store.getValue(p).getRegister().number & REG_MASK;
int op1 = (MIR_Store.getOffset(p).asIntConstant().value >> 2) & SHORT14_MASK;
int op2 = MIR_Store.getAddress(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 2) | (op2 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_STAddr_opcode:
{
if (VM.BuildFor32Addr) {
int op0 = MIR_Store.getValue(p).getRegister().number & REG_MASK;
int op1 = MIR_Store.getOffset(p).asIntConstant().value & SHORT_MASK;
int op2 = MIR_Store.getAddress(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | op1 | (op2 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
} else {
int op0 = MIR_Store.getValue(p).getRegister().number & REG_MASK;
int op1 = (MIR_Store.getOffset(p).asIntConstant().value >> 2) & SHORT14_MASK;
int op2 = MIR_Store.getAddress(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 2) | (op2 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
}
break;
case PPC_STAddrU_opcode:
{
if (VM.BuildFor32Addr) {
int op0 = MIR_StoreUpdate.getValue(p).getRegister().number & REG_MASK;
int op1 = MIR_StoreUpdate.getAddress(p).getRegister().number & REG_MASK;
int op2 = MIR_StoreUpdate.getOffset(p).asIntConstant().value & SHORT_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16) | op2));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
} else {
int op0 = MIR_StoreUpdate.getValue(p).getRegister().number & REG_MASK;
int op1 = (MIR_StoreUpdate.getOffset(p).asIntConstant().value >> 2) & SHORT14_MASK;
int op2 = MIR_StoreUpdate.getAddress(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 2) | (op2 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
}
break;
case PPC_MFSPR_opcode:
{
int op0 = MIR_Move.getResult(p).getRegister().number & REG_MASK;
int op1 = phys.getSPR(MIR_Move.getValue(p).getRegister());
machinecodes.set(mi++, (inst | (op0 << 21) | (op1 << 16)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_MTSPR_opcode:
{
int op0 = phys.getSPR(MIR_Move.getResult(p).getRegister());
int op1 = MIR_Move.getValue(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 21)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_MFTB_opcode:
case PPC_MFTBU_opcode:
{
int op0 = MIR_Move.getResult(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 21)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_HWSYNC_opcode:
case PPC_SYNC_opcode:
case PPC_ISYNC_opcode:
{
machinecodes.set(mi++, inst);
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_DCBST_opcode:
case PPC_DCBT_opcode:
case PPC_DCBTST_opcode:
case PPC_DCBZ_opcode:
case PPC_DCBZL_opcode:
case PPC_DCBF_opcode:
case PPC_ICBI_opcode:
{
int op0 = MIR_CacheOp.getAddress(p).getRegister().number & REG_MASK;
int op1 = MIR_CacheOp.getOffset(p).getRegister().number & REG_MASK;
machinecodes.set(mi++, (inst | (op0 << 16) | (op1 << 11)));
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case PPC_ILLEGAL_INSTRUCTION_opcode:
{
machinecodes.set(mi++, inst);
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
}
break;
case IG_PATCH_POINT_opcode:
{
BranchOperand bop = InlineGuard.getTarget(p);
Instruction target = bop.target;
if (VM.VerifyAssertions) {
VM._assert(target.getOpcode() == LABEL_opcode);
}
// resolve the target instruction, in LABEL_opcode,
// add one case for IG_PATCH_POINT
/* int targetOffset = */
resolveBranch(p, target, mi, mcOffsets);
machinecodes.set(mi++, NOPtemplate);
mcOffsets.setMachineCodeOffset(p, mi << LG_INSTRUCTION_WIDTH);
if (DEBUG_CODE_PATCH) {
VM.sysWrite("to be patched at ", mi - 1);
VM.sysWrite(" inst ");
VM.sysWriteHex(machinecodes.get(mi - 1));
VM.sysWriteln();
}
}
break;
default:
throw new OptimizingCompilerException("CodeGen", "OPCODE not implemented:", p);
}
}
if (unresolvedBranches != 0) {
throw new OptimizingCompilerException("CodeGen", " !!! Unresolved Branch Targets Exist!!! \n");
}
if (shouldPrint) {
OptimizingCompiler.header("Final machine code", ir.method);
Lister lister = new Lister(null);
lister.addLinesForCode(machinecodes);
lister.endAndPrintListing();
}
return mi;
}
Aggregations