use of jdk.vm.ci.meta.AllocatableValue in project graal by oracle.
the class HotSpotForeignCallLinkageImpl method finalizeAddress.
@Override
public void finalizeAddress(Backend backend) {
if (address == 0) {
assert checkStubCondition();
InstalledCode code = stub.getCode(backend);
EconomicSet<Register> destroyedRegisters = stub.getDestroyedCallerRegisters();
if (!destroyedRegisters.isEmpty()) {
AllocatableValue[] temporaryLocations = new AllocatableValue[destroyedRegisters.size()];
int i = 0;
for (Register reg : destroyedRegisters) {
temporaryLocations[i++] = reg.asValue();
}
temporaries = temporaryLocations;
}
address = code.getStart();
}
}
use of jdk.vm.ci.meta.AllocatableValue in project graal by oracle.
the class TraceLinearScanEliminateSpillMovePhase method eliminateSpillMoves.
// called once before assignment of register numbers
@SuppressWarnings("try")
private static void eliminateSpillMoves(TraceLinearScan allocator, boolean shouldEliminateSpillMoves, TraceBuilderResult traceBuilderResult, LIRGenerationResult res) {
DebugContext debug = allocator.getDebug();
try (Indent indent = debug.logAndIndent("Eliminating unnecessary spill moves: Trace%d", traceBuilderResult.getTraceForBlock(allocator.blockAt(0)).getId())) {
allocator.sortIntervalsBySpillPos();
/*
* collect all intervals that must be stored after their definition. The list is sorted
* by Interval.spillDefinitionPos.
*/
TraceInterval interval = allocator.createUnhandledListBySpillPos(spilledIntervals);
if (Assertions.detailedAssertionsEnabled(allocator.getOptions())) {
checkIntervals(debug, interval);
}
if (debug.isLogEnabled()) {
try (Indent indent2 = debug.logAndIndent("Sorted intervals")) {
for (TraceInterval i = interval; i != null; i = i.next) {
debug.log("%5d: %s", i.spillDefinitionPos(), i);
}
}
}
LIRInsertionBuffer insertionBuffer = new LIRInsertionBuffer();
for (AbstractBlockBase<?> block : allocator.sortedBlocks()) {
try (Indent indent1 = debug.logAndIndent("Handle %s", block)) {
ArrayList<LIRInstruction> instructions = allocator.getLIR().getLIRforBlock(block);
int numInst = instructions.size();
int lastOpId = -1;
// iterate all instructions of the block.
for (int j = 0; j < numInst; j++) {
LIRInstruction op = instructions.get(j);
int opId = op.id();
try (Indent indent2 = debug.logAndIndent("%5d %s", opId, op)) {
if (opId == -1) {
MoveOp move = MoveOp.asMoveOp(op);
/*
* Remove move from register to stack if the stack slot is
* guaranteed to be correct. Only moves that have been inserted by
* LinearScan can be removed.
*/
if (shouldEliminateSpillMoves && canEliminateSpillMove(allocator, block, move, lastOpId)) {
/*
* Move target is a stack slot that is always correct, so
* eliminate instruction.
*/
if (debug.isLogEnabled()) {
if (ValueMoveOp.isValueMoveOp(op)) {
ValueMoveOp vmove = ValueMoveOp.asValueMoveOp(op);
debug.log("eliminating move from interval %s to %s in block %s", vmove.getInput(), vmove.getResult(), block);
} else {
LoadConstantOp load = LoadConstantOp.asLoadConstantOp(op);
debug.log("eliminating constant load from %s to %s in block %s", load.getConstant(), load.getResult(), block);
}
}
// null-instructions are deleted by assignRegNum
instructions.set(j, null);
}
} else {
lastOpId = opId;
// interval.spillDefinitionPos() >= opId : "invalid order";
assert interval == TraceInterval.EndMarker || (interval.isSplitParent() && SpillState.IN_MEMORY.contains(interval.spillState())) : "invalid interval";
while (interval != TraceInterval.EndMarker && interval.spillDefinitionPos() == opId) {
debug.log("handle %s", interval);
if (!interval.canMaterialize() && interval.spillState() != SpillState.StartInMemory) {
AllocatableValue fromLocation = interval.getSplitChildAtOpId(opId, OperandMode.DEF).location();
AllocatableValue toLocation = allocator.canonicalSpillOpr(interval);
if (!fromLocation.equals(toLocation)) {
if (!insertionBuffer.initialized()) {
/*
* prepare insertion buffer (appended when all
* instructions in the block are processed)
*/
insertionBuffer.init(instructions);
}
assert isRegister(fromLocation) : "from operand must be a register but is: " + fromLocation + " toLocation=" + toLocation + " spillState=" + interval.spillState();
assert isStackSlotValue(toLocation) : "to operand must be a stack slot";
LIRInstruction move = allocator.getSpillMoveFactory().createMove(toLocation, fromLocation);
insertionBuffer.append(j + 1, move);
move.setComment(res, "TraceLSRAEliminateSpillMove: spill def pos");
if (debug.isLogEnabled()) {
debug.log("inserting move after definition of interval %d to stack slot %s at opId %d", interval.operandNumber, interval.spillSlot(), opId);
}
}
}
interval = interval.next;
}
}
}
}
if (insertionBuffer.initialized()) {
insertionBuffer.finish();
}
}
}
assert interval == TraceInterval.EndMarker : "missed an interval";
}
}
use of jdk.vm.ci.meta.AllocatableValue in project graal by oracle.
the class TraceLocalMoveResolver method insertMove.
private void insertMove(Constant fromOpr, TraceInterval toInterval) {
assert insertIdx != -1 : "must setup insert position first";
AllocatableValue toOpr = allocator.getOperand(toInterval);
LIRInstruction move = getAllocator().getSpillMoveFactory().createLoad(toOpr, fromOpr);
insertionBuffer.append(insertIdx, move);
if (debug.isLogEnabled()) {
debug.log("insert move from value %s to %s at %d", fromOpr, toInterval, insertIdx);
}
}
use of jdk.vm.ci.meta.AllocatableValue in project graal by oracle.
the class TraceLocalMoveResolver method breakCycle.
protected void breakCycle(int spillCandidate) {
if (spillCandidate != -1) {
// (e.g. r1 . r2, r2 . r1), so one interval must be spilled to memory
assert spillCandidate != -1 : "no interval in register for spilling found";
// create a new spill interval and assign a stack slot to it
TraceInterval fromInterval1 = mappingFrom.get(spillCandidate);
// do not allocate a new spill slot for temporary interval, but
// use spill slot assigned to fromInterval. Otherwise moves from
// one stack slot to another can happen (not allowed by LIRAssembler
AllocatableValue spillSlot1 = fromInterval1.spillSlot();
if (spillSlot1 == null) {
spillSlot1 = getAllocator().getFrameMapBuilder().allocateSpillSlot(allocator.getKind(fromInterval1));
fromInterval1.setSpillSlot(spillSlot1);
cycleBreakingSlotsAllocated.increment(debug);
}
spillInterval(spillCandidate, fromInterval1, spillSlot1);
return;
}
assert mappingFromSize() > 1;
// Arbitrarily select the first entry for spilling.
int stackSpillCandidate = 0;
TraceInterval fromInterval = getMappingFrom(stackSpillCandidate);
// allocate new stack slot
VirtualStackSlot spillSlot = getAllocator().getFrameMapBuilder().allocateSpillSlot(allocator.getKind(fromInterval));
spillInterval(stackSpillCandidate, fromInterval, spillSlot);
}
use of jdk.vm.ci.meta.AllocatableValue in project graal by oracle.
the class ArithmeticLIRGenerator method emitAddOrSub.
private Variable emitAddOrSub(Value aVal, Value bVal, boolean setFlags, boolean isAdd) {
LIRKind resultKind;
Value a = aVal;
Value b = bVal;
if (isNumericInteger(a.getPlatformKind())) {
LIRKind aKind = a.getValueKind(LIRKind.class);
LIRKind bKind = b.getValueKind(LIRKind.class);
assert a.getPlatformKind() == b.getPlatformKind() : a.getPlatformKind() + " vs. " + b.getPlatformKind();
if (aKind.isUnknownReference()) {
resultKind = aKind;
} else if (bKind.isUnknownReference()) {
resultKind = bKind;
} else if (aKind.isValue() && bKind.isValue()) {
resultKind = aKind;
} else if (aKind.isValue()) {
if (bKind.isDerivedReference()) {
resultKind = bKind;
} else {
AllocatableValue allocatable = getLIRGen().asAllocatable(b);
resultKind = bKind.makeDerivedReference(allocatable);
b = allocatable;
}
} else if (bKind.isValue()) {
if (aKind.isDerivedReference()) {
resultKind = aKind;
} else {
AllocatableValue allocatable = getLIRGen().asAllocatable(a);
resultKind = aKind.makeDerivedReference(allocatable);
a = allocatable;
}
} else {
resultKind = aKind.makeUnknownReference();
}
} else {
resultKind = LIRKind.combine(a, b);
}
return isAdd ? emitAdd(resultKind, a, b, setFlags) : emitSub(resultKind, a, b, setFlags);
}
Aggregations