use of org.graalvm.compiler.lir.LIRInstruction in project graal by oracle.
the class SSALinearScanEliminateSpillMovePhase method isPhiResolutionMove.
@SuppressWarnings("try")
private boolean isPhiResolutionMove(AbstractBlockBase<?> block, MoveOp move, Interval toInterval) {
if (!toInterval.isSplitParent()) {
return false;
}
if ((toInterval.from() & 1) == 1) {
// phi intervals start at even positions.
return false;
}
if (block.getSuccessorCount() != 1) {
return false;
}
LIRInstruction op = allocator.instructionForId(toInterval.from());
if (!(op instanceof LabelOp)) {
return false;
}
AbstractBlockBase<?> intStartBlock = allocator.blockForId(toInterval.from());
assert allocator.getLIR().getLIRforBlock(intStartBlock).get(0).equals(op);
if (!block.getSuccessors()[0].equals(intStartBlock)) {
return false;
}
DebugContext debug = allocator.getDebug();
try (Indent indent = debug.indent()) {
debug.log("Is a move (%s) to phi interval %s", move, toInterval);
}
return true;
}
use of org.graalvm.compiler.lir.LIRInstruction in project graal by oracle.
the class TraceGlobalMoveResolver method breakCycle.
@SuppressWarnings("try")
private void breakCycle(int spillCandidate) {
// (e.g. r1 . r2, r2 . r1), so one interval must be spilled to memory
assert spillCandidate != -1 : "no interval in register for spilling found";
// create a new spill interval and assign a stack slot to it
Value from = mappingFrom.get(spillCandidate);
try (Indent indent = debug.logAndIndent("BreakCycle: %s", from)) {
AllocatableValue spillSlot = null;
if (TraceRegisterAllocationPhase.Options.TraceRAreuseStackSlotsForMoveResolutionCycleBreaking.getValue(options) && !isStackSlotValue(from)) {
// don't use the stack slot if from is already the stack slot
Value fromStack = mappingFromStack.get(spillCandidate);
if (fromStack != null) {
spillSlot = (AllocatableValue) fromStack;
cycleBreakingSlotsReused.increment(debug);
debug.log("reuse slot for spilling: %s", spillSlot);
}
}
if (spillSlot == null) {
spillSlot = frameMapBuilder.allocateSpillSlot(from.getValueKind());
cycleBreakingSlotsAllocated.increment(debug);
debug.log("created new slot for spilling: %s", spillSlot);
// insert a move from register to stack and update the mapping
LIRInstruction move = insertMove(from, spillSlot);
move.setComment(res, "TraceGlobalMoveResolver: breakCycle");
}
block(spillSlot);
mappingFrom.set(spillCandidate, spillSlot);
unblock(from);
}
}
use of org.graalvm.compiler.lir.LIRInstruction in project graal by oracle.
the class StackMoveOptimizationPhase method run.
@Override
protected void run(TargetDescription target, LIRGenerationResult lirGenRes, PostAllocationOptimizationContext context) {
LIR lir = lirGenRes.getLIR();
DebugContext debug = lir.getDebug();
for (AbstractBlockBase<?> block : lir.getControlFlowGraph().getBlocks()) {
ArrayList<LIRInstruction> instructions = lir.getLIRforBlock(block);
new Closure().process(debug, instructions);
}
}
use of org.graalvm.compiler.lir.LIRInstruction in project graal by oracle.
the class DynamicCounterNode method generate.
@Override
public void generate(NodeLIRBuilderTool generator) {
LIRGeneratorTool lirGen = generator.getLIRGeneratorTool();
String nameWithContext;
if (isWithContext()) {
nameWithContext = getName() + " @ ";
if (graph().method() != null) {
StackTraceElement stackTraceElement = graph().method().asStackTraceElement(0);
if (stackTraceElement != null) {
nameWithContext += " " + stackTraceElement.toString();
} else {
nameWithContext += graph().method().format("%h.%n");
}
}
if (graph().name != null) {
nameWithContext += " (" + graph().name + ")";
}
} else {
nameWithContext = getName();
}
LIRInstruction counterOp = lirGen.createBenchmarkCounter(nameWithContext, getGroup(), generator.operand(increment));
if (counterOp != null) {
lirGen.append(counterOp);
} else {
throw GraalError.unimplemented("Benchmark counters not enabled or not implemented by the back end.");
}
}
use of org.graalvm.compiler.lir.LIRInstruction in project graal by oracle.
the class TraceLinearScanEliminateSpillMovePhase method eliminateSpillMoves.
// called once before assignment of register numbers
@SuppressWarnings("try")
private static void eliminateSpillMoves(TraceLinearScan allocator, boolean shouldEliminateSpillMoves, TraceBuilderResult traceBuilderResult, LIRGenerationResult res) {
DebugContext debug = allocator.getDebug();
try (Indent indent = debug.logAndIndent("Eliminating unnecessary spill moves: Trace%d", traceBuilderResult.getTraceForBlock(allocator.blockAt(0)).getId())) {
allocator.sortIntervalsBySpillPos();
/*
* collect all intervals that must be stored after their definition. The list is sorted
* by Interval.spillDefinitionPos.
*/
TraceInterval interval = allocator.createUnhandledListBySpillPos(spilledIntervals);
if (Assertions.detailedAssertionsEnabled(allocator.getOptions())) {
checkIntervals(debug, interval);
}
if (debug.isLogEnabled()) {
try (Indent indent2 = debug.logAndIndent("Sorted intervals")) {
for (TraceInterval i = interval; i != null; i = i.next) {
debug.log("%5d: %s", i.spillDefinitionPos(), i);
}
}
}
LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer();
for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
try (Indent indent1 = debug.logAndIndent("Handle %s", block)) {
ArrayList<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
int numInst = instructions.size();
int lastOpId = -1;
// iterate all instructions of the block.
for (int j = 0; j < numInst; j++) {
LIRInstruction op = instructions.get(j);
int opId = op.id();
try (Indent indent2 = debug.logAndIndent("%5d %s", opId, op)) {
if (opId == -1) {
MoveOp move = MoveOp.asMoveOp(op);
/*
* Remove move from register to stack if the stack slot is
* guaranteed to be correct. Only moves that have been inserted by
* LinearScan can be removed.
*/
if (shouldEliminateSpillMoves && canEliminateSpillMove(allocator, block, move, lastOpId)) {
/*
* Move target is a stack slot that is always correct, so
* eliminate instruction.
*/
if (debug.isLogEnabled()) {
if (ValueMoveOp.isValueMoveOp(op)) {
ValueMoveOp vmove = ValueMoveOp.asValueMoveOp(op);
debug.log("eliminating move from interval %s to %s in block %s", vmove.getInput(), vmove.getResult(), block);
} else {
LoadConstantOp load = LoadConstantOp.asLoadConstantOp(op);
debug.log("eliminating constant load from %s to %s in block %s", load.getConstant(), load.getResult(), block);
}
}
// null-instructions are deleted by assignRegNum
instructions.set(j, null);
}
} else {
lastOpId = opId;
// interval.spillDefinitionPos() >= opId : "invalid order";
assert interval == TraceInterval.EndMarker || (interval.isSplitParent() && SpillState.IN_MEMORY.contains(interval.spillState())) : "invalid interval";
while (interval != TraceInterval.EndMarker && interval.spillDefinitionPos() == opId) {
debug.log("handle %s", interval);
if (!interval.canMaterialize() && interval.spillState() != SpillState.StartInMemory) {
AllocatableValue fromLocation = interval.getSplitChildAtOpId(opId, OperandMode.DEF).location();
AllocatableValue toLocation = allocator.canonicalSpillOpr(interval);
if (!fromLocation.equals(toLocation)) {
if (!insertionBuffer.initialized()) {
/*
* prepare insertion buffer (appended when all
* instructions in the block are processed)
*/
insertionBuffer.init(instructions);
}
assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState();
assert isStackSlotValue(toLocation) : "to operand must be a stack slot";
LIRInstruction move = allocator.getSpillMoveFactory().createMove(toLocation, fromLocation);
insertionBuffer.append(j + 1, move);
move.setComment(res, "TraceLSRAEliminateSpillMove: spill def pos");
if (debug.isLogEnabled()) {
debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId);
}
}
}
interval = interval.next;
}
}
}
}
if (insertionBuffer.initialized()) {
insertionBuffer.finish();
}
}
}
assert interval == TraceInterval.EndMarker : "missed an interval";
}
}
Aggregations