use of org.graalvm.compiler.lir.StandardOp.LoadConstantOp in project graal by oracle.
the class LinearScanLifetimeAnalysisPhase method getMaterializedValue.
/**
* Returns a value for a interval definition, which can be used for re-materialization.
*
* @param op An instruction which defines a value
* @param operand The destination operand of the instruction
* @param interval The interval for this defined value.
* @return Returns the value which is moved to the instruction and which can be reused at all
* reload-locations in case the interval of this instruction is spilled. Currently this
* can only be a {@link JavaConstant}.
*/
protected Constant getMaterializedValue(LIRInstruction op, Value operand, Interval interval) {
if (LoadConstantOp.isLoadConstantOp(op)) {
LoadConstantOp move = LoadConstantOp.asLoadConstantOp(op);
if (!allocator.neverSpillConstants()) {
/*
* Check if the interval has any uses which would accept an stack location (priority
* == ShouldHaveRegister). Rematerialization of such intervals can result in a
* degradation, because rematerialization always inserts a constant load, even if
* the value is not needed in a register.
*/
Interval.UsePosList usePosList = interval.usePosList();
int numUsePos = usePosList.size();
for (int useIdx = 0; useIdx < numUsePos; useIdx++) {
Interval.RegisterPriority priority = usePosList.registerPriority(useIdx);
if (priority == Interval.RegisterPriority.ShouldHaveRegister) {
return null;
}
}
}
return move.getConstant();
}
return null;
}
use of org.graalvm.compiler.lir.StandardOp.LoadConstantOp in project graal by oracle.
the class LinearScanEliminateSpillMovePhase method eliminateSpillMoves.
// called once before assignment of register numbers
@SuppressWarnings("try")
void eliminateSpillMoves(LIRGenerationResult res) {
DebugContext debug = allocator.getDebug();
try (Indent indent = debug.logAndIndent("Eliminating unnecessary spill moves")) {
/*
* collect all intervals that must be stored after their definition. The list is sorted
* by Interval.spillDefinitionPos.
*/
Interval interval;
interval = allocator.createUnhandledLists(mustStoreAtDefinition, null).getLeft();
if (Assertions.detailedAssertionsEnabled(allocator.getOptions())) {
checkIntervals(debug, interval);
}
LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer();
for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
try (Indent indent1 = debug.logAndIndent("Handle %s", block)) {
ArrayList<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
int numInst = instructions.size();
// iterate all instructions of the block.
for (int j = firstInstructionOfInterest(); j < numInst; j++) {
LIRInstruction op = instructions.get(j);
int opId = op.id();
if (opId == -1) {
MoveOp move = MoveOp.asMoveOp(op);
/*
* Remove move from register to stack if the stack slot is guaranteed to
* be correct. Only moves that have been inserted by LinearScan can be
* removed.
*/
if (Options.LIROptLSRAEliminateSpillMoves.getValue(allocator.getOptions()) && canEliminateSpillMove(block, move)) {
/*
* Move target is a stack slot that is always correct, so eliminate
* instruction.
*/
if (debug.isLogEnabled()) {
if (ValueMoveOp.isValueMoveOp(op)) {
ValueMoveOp vmove = ValueMoveOp.asValueMoveOp(op);
debug.log("eliminating move from interval %d (%s) to %d (%s) in block %s", allocator.operandNumber(vmove.getInput()), vmove.getInput(), allocator.operandNumber(vmove.getResult()), vmove.getResult(), block);
} else {
LoadConstantOp load = LoadConstantOp.asLoadConstantOp(op);
debug.log("eliminating constant load from %s to %d (%s) in block %s", load.getConstant(), allocator.operandNumber(load.getResult()), load.getResult(), block);
}
}
// null-instructions are deleted by assignRegNum
instructions.set(j, null);
}
} else {
/*
* Insert move from register to stack just after the beginning of the
* interval.
*/
assert interval.isEndMarker() || interval.spillDefinitionPos() >= opId : "invalid order";
assert interval.isEndMarker() || (interval.isSplitParent() && interval.spillState() == SpillState.StoreAtDefinition) : "invalid interval";
while (!interval.isEndMarker() && interval.spillDefinitionPos() == opId) {
if (!interval.canMaterialize()) {
if (!insertionBuffer.initialized()) {
/*
* prepare insertion buffer (appended when all instructions
* in the block are processed)
*/
insertionBuffer.init(instructions);
}
AllocatableValue fromLocation = interval.location();
AllocatableValue toLocation = LinearScan.canonicalSpillOpr(interval);
if (!fromLocation.equals(toLocation)) {
assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState();
assert isStackSlotValue(toLocation) : "to operand must be a stack slot";
LIRInstruction move = allocator.getSpillMoveFactory().createMove(toLocation, fromLocation);
insertionBuffer.append(j + 1, move);
move.setComment(res, "LSRAEliminateSpillMove: store at definition");
if (debug.isLogEnabled()) {
debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId);
}
}
}
interval = interval.next;
}
}
}
if (insertionBuffer.initialized()) {
insertionBuffer.finish();
}
}
}
assert interval.isEndMarker() : "missed an interval";
}
}
use of org.graalvm.compiler.lir.StandardOp.LoadConstantOp in project graal by oracle.
the class TraceLinearScanEliminateSpillMovePhase method eliminateSpillMoves.
// called once before assignment of register numbers
@SuppressWarnings("try")
private static void eliminateSpillMoves(TraceLinearScan allocator, boolean shouldEliminateSpillMoves, TraceBuilderResult traceBuilderResult, LIRGenerationResult res) {
DebugContext debug = allocator.getDebug();
try (Indent indent = debug.logAndIndent("Eliminating unnecessary spill moves: Trace%d", traceBuilderResult.getTraceForBlock(allocator.blockAt(0)).getId())) {
allocator.sortIntervalsBySpillPos();
/*
* collect all intervals that must be stored after their definition. The list is sorted
* by Interval.spillDefinitionPos.
*/
TraceInterval interval = allocator.createUnhandledListBySpillPos(spilledIntervals);
if (Assertions.detailedAssertionsEnabled(allocator.getOptions())) {
checkIntervals(debug, interval);
}
if (debug.isLogEnabled()) {
try (Indent indent2 = debug.logAndIndent("Sorted intervals")) {
for (TraceInterval i = interval; i != null; i = i.next) {
debug.log("%5d: %s", i.spillDefinitionPos(), i);
}
}
}
LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer();
for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
try (Indent indent1 = debug.logAndIndent("Handle %s", block)) {
ArrayList<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
int numInst = instructions.size();
int lastOpId = -1;
// iterate all instructions of the block.
for (int j = 0; j < numInst; j++) {
LIRInstruction op = instructions.get(j);
int opId = op.id();
try (Indent indent2 = debug.logAndIndent("%5d %s", opId, op)) {
if (opId == -1) {
MoveOp move = MoveOp.asMoveOp(op);
/*
* Remove move from register to stack if the stack slot is
* guaranteed to be correct. Only moves that have been inserted by
* LinearScan can be removed.
*/
if (shouldEliminateSpillMoves && canEliminateSpillMove(allocator, block, move, lastOpId)) {
/*
* Move target is a stack slot that is always correct, so
* eliminate instruction.
*/
if (debug.isLogEnabled()) {
if (ValueMoveOp.isValueMoveOp(op)) {
ValueMoveOp vmove = ValueMoveOp.asValueMoveOp(op);
debug.log("eliminating move from interval %s to %s in block %s", vmove.getInput(), vmove.getResult(), block);
} else {
LoadConstantOp load = LoadConstantOp.asLoadConstantOp(op);
debug.log("eliminating constant load from %s to %s in block %s", load.getConstant(), load.getResult(), block);
}
}
// null-instructions are deleted by assignRegNum
instructions.set(j, null);
}
} else {
lastOpId = opId;
// interval.spillDefinitionPos() >= opId : "invalid order";
assert interval == TraceInterval.EndMarker || (interval.isSplitParent() && SpillState.IN_MEMORY.contains(interval.spillState())) : "invalid interval";
while (interval != TraceInterval.EndMarker && interval.spillDefinitionPos() == opId) {
debug.log("handle %s", interval);
if (!interval.canMaterialize() && interval.spillState() != SpillState.StartInMemory) {
AllocatableValue fromLocation = interval.getSplitChildAtOpId(opId, OperandMode.DEF).location();
AllocatableValue toLocation = allocator.canonicalSpillOpr(interval);
if (!fromLocation.equals(toLocation)) {
if (!insertionBuffer.initialized()) {
/*
* prepare insertion buffer (appended when all
* instructions in the block are processed)
*/
insertionBuffer.init(instructions);
}
assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState();
assert isStackSlotValue(toLocation) : "to operand must be a stack slot";
LIRInstruction move = allocator.getSpillMoveFactory().createMove(toLocation, fromLocation);
insertionBuffer.append(j + 1, move);
move.setComment(res, "TraceLSRAEliminateSpillMove: spill def pos");
if (debug.isLogEnabled()) {
debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId);
}
}
}
interval = interval.next;
}
}
}
}
if (insertionBuffer.initialized()) {
insertionBuffer.finish();
}
}
}
assert interval == TraceInterval.EndMarker : "missed an interval";
}
}
Aggregations