use of org.graalvm.compiler.nodes.calc.LeftShiftNode in project graal by oracle.
the class ArrayCopyCallNode method computeBase.
private ValueNode computeBase(ValueNode base, ValueNode pos) {
FixedWithNextNode basePtr = graph().add(new GetObjectAddressNode(base));
graph().addBeforeFixed(this, basePtr);
Stamp wordStamp = StampFactory.forKind(runtime.getTarget().wordJavaKind);
ValueNode wordPos = IntegerConvertNode.convert(pos, wordStamp, graph(), NodeView.DEFAULT);
int shift = CodeUtil.log2(getArrayIndexScale(elementKind));
ValueNode scaledIndex = graph().unique(new LeftShiftNode(wordPos, ConstantNode.forInt(shift, graph())));
ValueNode offset = graph().unique(new AddNode(scaledIndex, ConstantNode.forIntegerStamp(wordStamp, getArrayBaseOffset(elementKind), graph())));
return graph().unique(new OffsetAddressNode(basePtr, offset));
}
use of org.graalvm.compiler.nodes.calc.LeftShiftNode in project graal by oracle.
the class AMD64AddressLoweringTest method convertBaseAndShiftedIndexToDisplacement.
@Test
public void convertBaseAndShiftedIndexToDisplacement() {
ValueNode base = graph.addOrUniqueWithInputs(const64(1000));
ValueNode index = graph.addOrUniqueWithInputs(new LeftShiftNode(const64(10), const32(1)));
AddressNode result = lowering.lower(base, index);
assertAddress(result, null, null, Scale.Times2, 1020);
}
use of org.graalvm.compiler.nodes.calc.LeftShiftNode in project graal by oracle.
the class AMD64AddressLoweringTest method convertBaseAndNegatedShiftedIndexToDisplacement.
@Test
public void convertBaseAndNegatedShiftedIndexToDisplacement() {
ValueNode base = graph.addOrUniqueWithInputs(const64(1000));
ValueNode index = graph.addOrUniqueWithInputs(new NegateNode(new LeftShiftNode(const64(10), const32(2))));
AddressNode result = lowering.lower(base, index);
assertAddress(result, null, null, Scale.Times4, 960);
}
use of org.graalvm.compiler.nodes.calc.LeftShiftNode in project graal by oracle.
the class AMD64AddressLowering method improve.
/**
* Tries to optimize addresses so that they match the AMD64-specific addressing mode better
* (base + index * scale + displacement).
*
* @param graph the current graph
* @param debug the current debug context
* @param ret the address that should be optimized
* @param isBaseNegated determines if the address base is negated. if so, all values that are
* extracted from the base will be negated as well
* @param isIndexNegated determines if the index is negated. if so, all values that are
* extracted from the index will be negated as well
* @return true if the address was modified
*/
protected boolean improve(StructuredGraph graph, DebugContext debug, AMD64AddressNode ret, boolean isBaseNegated, boolean isIndexNegated) {
ValueNode newBase = improveInput(ret, ret.getBase(), 0, isBaseNegated);
if (newBase != ret.getBase()) {
ret.setBase(newBase);
return true;
}
ValueNode newIdx = improveInput(ret, ret.getIndex(), ret.getScale().log2, isIndexNegated);
if (newIdx != ret.getIndex()) {
ret.setIndex(newIdx);
return true;
}
if (ret.getIndex() instanceof LeftShiftNode) {
LeftShiftNode shift = (LeftShiftNode) ret.getIndex();
if (shift.getY().isConstant()) {
int amount = ret.getScale().log2 + shift.getY().asJavaConstant().asInt();
Scale scale = Scale.fromShift(amount);
if (scale != null) {
ret.setIndex(shift.getX());
ret.setScale(scale);
return true;
}
}
}
if (ret.getScale() == Scale.Times1) {
if (ret.getIndex() == null && ret.getBase() instanceof AddNode) {
AddNode add = (AddNode) ret.getBase();
ret.setBase(add.getX());
ret.setIndex(considerNegation(graph, add.getY(), isBaseNegated));
return true;
}
if (ret.getBase() == null && ret.getIndex() instanceof AddNode) {
AddNode add = (AddNode) ret.getIndex();
ret.setBase(considerNegation(graph, add.getX(), isIndexNegated));
ret.setIndex(add.getY());
return true;
}
if (ret.getBase() instanceof LeftShiftNode && !(ret.getIndex() instanceof LeftShiftNode)) {
ValueNode tmp = ret.getBase();
ret.setBase(considerNegation(graph, ret.getIndex(), isIndexNegated != isBaseNegated));
ret.setIndex(considerNegation(graph, tmp, isIndexNegated != isBaseNegated));
return true;
}
}
return improveNegation(graph, debug, ret, isBaseNegated, isIndexNegated);
}
use of org.graalvm.compiler.nodes.calc.LeftShiftNode in project graal by oracle.
the class CInterfaceInvocationPlugin method replaceBitfieldAccessor.
private boolean replaceBitfieldAccessor(GraphBuilderContext b, ResolvedJavaMethod method, ValueNode[] args, StructBitfieldInfo bitfieldInfo, AccessorInfo accessorInfo) {
int byteOffset = bitfieldInfo.getByteOffsetInfo().getProperty();
int startBit = bitfieldInfo.getStartBitInfo().getProperty();
int endBit = bitfieldInfo.getEndBitInfo().getProperty();
boolean isUnsigned = bitfieldInfo.isUnsigned();
assert byteOffset >= 0 && byteOffset < ((SizableInfo) bitfieldInfo.getParent()).getSizeInfo().getProperty();
assert startBit >= 0 && startBit < 8;
assert endBit >= startBit && endBit < 64;
/*
* The startBit is always in the first byte. Therefore, the endBit tells us how many bytes
* we actually have to read and write.
*/
JavaKind memoryKind;
if (endBit < 8) {
memoryKind = JavaKind.Byte;
} else if (endBit < 16) {
memoryKind = JavaKind.Short;
} else if (endBit < 32) {
memoryKind = JavaKind.Int;
} else {
memoryKind = JavaKind.Long;
}
int numBytes = memoryKind.getByteCount();
/*
* Try to align the byteOffset to be a multiple of numBytes. That should always be possible,
* but we don't trust the C compiler and memory layout enough to make it an assertion.
*/
int alignmentCorrection = byteOffset % numBytes;
if (alignmentCorrection > 0 && endBit + alignmentCorrection * 8 < numBytes * 8) {
byteOffset -= alignmentCorrection;
startBit += alignmentCorrection * 8;
endBit += alignmentCorrection * 8;
}
assert byteOffset >= 0 && byteOffset < ((SizableInfo) bitfieldInfo.getParent()).getSizeInfo().getProperty();
assert startBit >= 0 && startBit < numBytes * 8;
assert endBit >= startBit && endBit < numBytes * 8;
int numBits = endBit - startBit + 1;
assert numBits > 0 && numBits <= numBytes * 8;
/*
* The bit-operations on the value are either performed on Int or Long. We do not perform 8
* or 16 bit arithmetic operations.
*/
JavaKind computeKind = memoryKind.getStackKind();
Stamp computeStamp = StampFactory.forKind(computeKind);
int computeBits = computeKind.getBitCount();
assert startBit >= 0 && startBit < computeBits;
assert endBit >= startBit && endBit < computeBits;
assert computeBits >= numBits;
assert args.length == accessorInfo.parameterCount(true);
ValueNode base = args[accessorInfo.baseParameterNumber(true)];
StructuredGraph graph = b.getGraph();
/*
* Read the memory location. This is also necessary for writes, since we need to keep the
* bits around the written bitfield unchanged.
*/
ValueNode address = makeAddress(graph, args, accessorInfo, base, byteOffset, -1);
LocationIdentity locationIdentity = makeLocationIdentity(b, method, args, accessorInfo);
Stamp stamp = StampFactory.forInteger(memoryKind.getBitCount());
ValueNode cur = readOp(b, address, locationIdentity, stamp, accessorInfo);
cur = adaptPrimitiveType(graph, cur, memoryKind, computeKind, true);
switch(accessorInfo.getAccessorKind()) {
case GETTER:
{
if (isUnsigned) {
/*
* Unsigned reads: shift the bitfield to the right and mask out the unnecessary
* high-order bits.
*/
cur = graph.unique(new RightShiftNode(cur, ConstantNode.forInt(startBit, graph)));
cur = graph.unique(new AndNode(cur, ConstantNode.forIntegerStamp(computeStamp, (1L << numBits) - 1, graph)));
} else {
/*
* Signed reads: shift the bitfield to the right end to get the sign bit in
* place, then do a signed left shift to have a proper sign extension.
*/
cur = graph.unique(new LeftShiftNode(cur, ConstantNode.forInt(computeBits - endBit - 1, graph)));
cur = graph.unique(new RightShiftNode(cur, ConstantNode.forInt(computeBits - numBits, graph)));
}
JavaKind resultKind = wordTypes.asKind(b.getInvokeReturnType());
b.push(pushKind(method), adaptPrimitiveType(graph, cur, computeKind, resultKind == JavaKind.Boolean ? resultKind : resultKind.getStackKind(), isUnsigned));
return true;
}
case SETTER:
{
/* Zero out the bits of our bitfields, i.e., the bits we are going to change. */
long mask = ~(((1L << numBits) - 1) << startBit);
cur = graph.unique(new AndNode(cur, ConstantNode.forIntegerStamp(computeStamp, mask, graph)));
/*
* Mask the unnecessary high-order bits of the value to be written, and shift it to
* its place.
*/
ValueNode value = args[accessorInfo.valueParameterNumber(true)];
value = adaptPrimitiveType(graph, value, value.getStackKind(), computeKind, isUnsigned);
value = graph.unique(new AndNode(value, ConstantNode.forIntegerStamp(computeStamp, (1L << numBits) - 1, graph)));
value = graph.unique(new LeftShiftNode(value, ConstantNode.forInt(startBit, graph)));
/* Combine the leftover bits of the original memory word with the new value. */
cur = graph.unique(new OrNode(cur, value));
/* Narrow value to the number of bits we need to write. */
cur = adaptPrimitiveType(graph, cur, computeKind, memoryKind, true);
/* Perform the write (bitcount is taken from the stamp of the written value). */
writeOp(b, address, locationIdentity, cur, accessorInfo);
return true;
}
default:
throw shouldNotReachHere();
}
}
Aggregations