use of org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize in project graal by oracle.
the class AMD64ArithmeticLIRGenerator method emitStoreConst.
protected void emitStoreConst(AMD64Kind kind, AMD64AddressValue address, ConstantValue value, LIRFrameState state) {
Constant c = value.getConstant();
if (JavaConstant.isNull(c)) {
assert kind == AMD64Kind.DWORD || kind == AMD64Kind.QWORD;
OperandSize size = kind == AMD64Kind.DWORD ? DWORD : QWORD;
getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.MOV, size, address, 0, state));
return;
} else if (c instanceof VMConstant) {
// only 32-bit constants can be patched
if (kind == AMD64Kind.DWORD) {
if (getLIRGen().target().inlineObjects || !(c instanceof JavaConstant)) {
// if c is a JavaConstant, it's an oop, otherwise it's a metaspace constant
assert !(c instanceof JavaConstant) || ((JavaConstant) c).getJavaKind() == JavaKind.Object;
getLIRGen().append(new AMD64BinaryConsumer.MemoryVMConstOp(AMD64MIOp.MOV, address, (VMConstant) c, state));
return;
}
}
} else {
JavaConstant jc = (JavaConstant) c;
assert jc.getJavaKind().isPrimitive();
AMD64MIOp op = AMD64MIOp.MOV;
OperandSize size;
long imm;
switch(kind) {
case BYTE:
op = AMD64MIOp.MOVB;
size = BYTE;
imm = jc.asInt();
break;
case WORD:
size = WORD;
imm = jc.asInt();
break;
case DWORD:
size = DWORD;
imm = jc.asInt();
break;
case QWORD:
size = QWORD;
imm = jc.asLong();
break;
case SINGLE:
size = DWORD;
imm = Float.floatToRawIntBits(jc.asFloat());
break;
case DOUBLE:
size = QWORD;
imm = Double.doubleToRawLongBits(jc.asDouble());
break;
default:
throw GraalError.shouldNotReachHere("unexpected kind " + kind);
}
if (NumUtil.isInt(imm)) {
getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(op, size, address, (int) imm, state));
return;
}
}
// fallback: load, then store
emitStore(kind, address, getLIRGen().asAllocatable(value), state);
}
use of org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize in project graal by oracle.
the class AMD64ArithmeticLIRGenerator method emitCompareOp.
@Override
public void emitCompareOp(AMD64Kind cmpKind, Variable left, Value right) {
OperandSize size;
switch(cmpKind) {
case BYTE:
size = BYTE;
break;
case WORD:
size = WORD;
break;
case DWORD:
size = DWORD;
break;
case QWORD:
size = QWORD;
break;
case SINGLE:
getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PS, left, getLIRGen().asAllocatable(right)));
return;
case DOUBLE:
getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PD, left, getLIRGen().asAllocatable(right)));
return;
default:
throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind);
}
if (isConstantValue(right)) {
Constant c = LIRValueUtil.asConstant(right);
if (JavaConstant.isNull(c)) {
getLIRGen().append(new AMD64BinaryConsumer.Op(TEST, size, left, left));
return;
} else if (c instanceof VMConstant) {
VMConstant vc = (VMConstant) c;
if (size == DWORD && !GeneratePIC.getValue(getOptions())) {
getLIRGen().append(new AMD64BinaryConsumer.VMConstOp(CMP.getMIOpcode(DWORD, false), left, vc));
} else {
getLIRGen().append(new AMD64BinaryConsumer.DataOp(CMP.getRMOpcode(size), size, left, vc));
}
return;
} else if (c instanceof JavaConstant) {
JavaConstant jc = (JavaConstant) c;
if (jc.isDefaultForKind()) {
AMD64RMOp op = size == BYTE ? TESTB : TEST;
getLIRGen().append(new AMD64BinaryConsumer.Op(op, size, left, left));
return;
} else if (NumUtil.is32bit(jc.asLong())) {
getLIRGen().append(new AMD64BinaryConsumer.ConstOp(CMP, size, left, (int) jc.asLong()));
return;
}
}
}
// fallback: load, then compare
getLIRGen().append(new AMD64BinaryConsumer.Op(CMP.getRMOpcode(size), size, left, getLIRGen().asAllocatable(right)));
}
use of org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize in project graal by oracle.
the class AMD64NodeMatchRules method emitIntegerTestBranchMemory.
private ComplexMatchResult emitIntegerTestBranchMemory(IfNode x, ValueNode value, LIRLowerableAccess access) {
LabelRef trueLabel = getLIRBlock(x.trueSuccessor());
LabelRef falseLabel = getLIRBlock(x.falseSuccessor());
double trueLabelProbability = x.probability(x.trueSuccessor());
AMD64Kind kind = getMemoryKind(access);
OperandSize size = kind == AMD64Kind.QWORD ? QWORD : DWORD;
if (value.isConstant()) {
JavaConstant constant = value.asJavaConstant();
if (constant != null && kind == AMD64Kind.QWORD && !NumUtil.isInt(constant.asLong())) {
// Only imm32 as long
return null;
}
return builder -> {
AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress());
gen.append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.TEST, size, address, (int) constant.asLong(), getState(access)));
gen.append(new BranchOp(Condition.EQ, trueLabel, falseLabel, trueLabelProbability));
return null;
};
} else {
return builder -> {
AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress());
gen.append(new AMD64BinaryConsumer.MemoryRMOp(AMD64RMOp.TEST, size, gen.asAllocatable(operand(value)), address, getState(access)));
gen.append(new BranchOp(Condition.EQ, trueLabel, falseLabel, trueLabelProbability));
return null;
};
}
}
use of org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize in project graal by oracle.
the class AMD64NodeMatchRules method subMemory.
@MatchRule("(Sub value Read=access)")
@MatchRule("(Sub value FloatingRead=access)")
public ComplexMatchResult subMemory(ValueNode value, LIRLowerableAccess access) {
OperandSize size = getMemorySize(access);
if (size.isXmmType()) {
TargetDescription target = getLIRGeneratorTool().target();
boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX);
if (isAvx) {
return binaryRead(AVXOp.SUB, size, value, access);
} else {
return binaryRead(SSEOp.SUB, size, value, access);
}
} else {
return binaryRead(SUB.getRMOpcode(size), size, value, access);
}
}
use of org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize in project graal by oracle.
the class AMD64NodeMatchRules method emitSignExtendMemory.
private ComplexMatchResult emitSignExtendMemory(Access access, int fromBits, int toBits, ValueKind<?> addressKind) {
assert fromBits <= toBits && toBits <= 64;
AMD64Kind kind = null;
AMD64RMOp op;
OperandSize size;
if (fromBits == toBits) {
return null;
} else if (toBits > 32) {
kind = AMD64Kind.QWORD;
size = OperandSize.QWORD;
// sign extend to 64 bits
switch(fromBits) {
case 8:
op = MOVSXB;
break;
case 16:
op = MOVSX;
break;
case 32:
op = MOVSXD;
break;
default:
throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)");
}
} else {
kind = AMD64Kind.DWORD;
size = OperandSize.DWORD;
// sign extend to 32 bits (smaller values are internally represented as 32 bit values)
switch(fromBits) {
case 8:
op = MOVSXB;
break;
case 16:
op = MOVSX;
break;
case 32:
return null;
default:
throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)");
}
}
if (kind != null && op != null) {
return emitConvertMemoryOp(kind, op, size, access, addressKind);
}
return null;
}
Aggregations