use of org.apache.phoenix.schema.SortOrder in project phoenix by apache.
the class ExplainTable method appendPKColumnValue.
private void appendPKColumnValue(StringBuilder buf, byte[] range, Boolean isNull, int slotIndex, boolean changeViewIndexId) {
if (Boolean.TRUE.equals(isNull)) {
buf.append("null");
return;
}
if (Boolean.FALSE.equals(isNull)) {
buf.append("not null");
return;
}
if (range.length == 0) {
buf.append('*');
return;
}
ScanRanges scanRanges = context.getScanRanges();
PDataType type = scanRanges.getSchema().getField(slotIndex).getDataType();
SortOrder sortOrder = tableRef.getTable().getPKColumns().get(slotIndex).getSortOrder();
if (sortOrder == SortOrder.DESC) {
buf.append('~');
ImmutableBytesWritable ptr = new ImmutableBytesWritable(range);
type.coerceBytes(ptr, type, sortOrder, SortOrder.getDefault());
range = ptr.get();
}
if (changeViewIndexId) {
Short s = (Short) type.toObject(range);
s = (short) (s + (-Short.MAX_VALUE));
buf.append(s.toString());
} else {
Format formatter = context.getConnection().getFormatter(type);
buf.append(type.toStringLiteral(range, formatter));
}
}
use of org.apache.phoenix.schema.SortOrder in project phoenix by apache.
the class PTimestamp method getKeyRange.
/**
* With timestamp, because our last 4 bytes store a value from [0 - 1000000), we need
* to detect when the boundary is crossed if we increment to the nextKey.
*/
@Override
public KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive, byte[] upperRange, boolean upperInclusive) {
/*
* Force lower bound to be inclusive for fixed width keys because it makes comparisons less expensive when you
* can count on one bound or the other being inclusive. Comparing two fixed width exclusive bounds against each
* other is inherently more expensive, because you need to take into account if the bigger key is equal to the
* next key after the smaller key. For example: (A-B] compared against [A-B) An exclusive lower bound A is
* bigger than an exclusive upper bound B. Forcing a fixed width exclusive lower bound key to be inclusive
* prevents us from having to do this extra logic in the compare function.
*
*/
if (lowerRange != KeyRange.UNBOUND && !lowerInclusive && isFixedWidth()) {
if (lowerRange.length != MAX_TIMESTAMP_BYTES) {
throw new IllegalDataException("Unexpected size of " + lowerRange.length + " for " + this);
}
// Infer sortOrder based on most significant byte
SortOrder sortOrder = lowerRange[Bytes.SIZEOF_LONG] < 0 ? SortOrder.DESC : SortOrder.ASC;
int nanos = PUnsignedInt.INSTANCE.getCodec().decodeInt(lowerRange, Bytes.SIZEOF_LONG, sortOrder);
if ((sortOrder == SortOrder.DESC && nanos == 0) || (sortOrder == SortOrder.ASC && nanos == MAX_NANOS_VALUE_EXCLUSIVE - 1)) {
// With timestamp, because our last 4 bytes store a value from [0 - 1000000), we need
// to detect when the boundary is crossed with our nextKey
byte[] newLowerRange = new byte[MAX_TIMESTAMP_BYTES];
if (sortOrder == SortOrder.DESC) {
// Set nanos part as inverted 999999 as it needs to be the max nano value
// The millisecond part is moving to the previous value below
System.arraycopy(lowerRange, 0, newLowerRange, 0, Bytes.SIZEOF_LONG);
PUnsignedInt.INSTANCE.getCodec().encodeInt(MAX_NANOS_VALUE_EXCLUSIVE - 1, newLowerRange, Bytes.SIZEOF_LONG);
SortOrder.invert(newLowerRange, Bytes.SIZEOF_LONG, newLowerRange, Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT);
} else {
// Leave nanos part as zero as the millisecond part is rolling over to the next value
System.arraycopy(lowerRange, 0, newLowerRange, 0, Bytes.SIZEOF_LONG);
}
// Increment millisecond part, but leave nanos alone
if (ByteUtil.nextKey(newLowerRange, Bytes.SIZEOF_LONG)) {
lowerRange = newLowerRange;
} else {
lowerRange = KeyRange.UNBOUND;
}
return KeyRange.getKeyRange(lowerRange, true, upperRange, upperInclusive);
}
}
return super.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive);
}
use of org.apache.phoenix.schema.SortOrder in project phoenix by apache.
the class SaltedScanRangesTest method foreach.
private static Collection<?> foreach(KeyRange[][] ranges, int[] widths, KeyRange keyRange, boolean useSkipScan, boolean expectedResult) {
List<List<KeyRange>> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST);
slots = new ArrayList<>(slots);
slots.add(0, Collections.singletonList(KeyRange.getKeyRange(new byte[] { 0 })));
RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10);
builder.addField(SaltingUtil.SALTING_COLUMN, false, SortOrder.getDefault());
for (final int width : widths) {
if (width > 0) {
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public PDataType getDataType() {
return PChar.INSTANCE;
}
@Override
public Integer getMaxLength() {
return width;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return SortOrder.getDefault();
}
}, false, SortOrder.getDefault());
} else {
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public PDataType getDataType() {
return PVarchar.INSTANCE;
}
@Override
public Integer getMaxLength() {
return width;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return SortOrder.getDefault();
}
}, false, SortOrder.getDefault());
}
}
ScanRanges scanRanges = ScanRanges.createSingleSpan(builder.build(), slots, nBuckets, useSkipScan);
return foreach(scanRanges, widths, keyRange, expectedResult);
}
use of org.apache.phoenix.schema.SortOrder in project phoenix by apache.
the class ArrayConcatFunction method evaluate.
@Override
public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
if (!getLHSExpr().evaluate(tuple, ptr) || ptr.getLength() == 0) {
return false;
}
boolean isLHSRowKeyOrderOptimized = PArrayDataType.isRowKeyOrderOptimized(getLHSExpr().getDataType(), getLHSExpr().getSortOrder(), ptr);
SortOrder sortOrder = getRHSExpr().getSortOrder();
int actualLengthOfArray1 = Math.abs(PArrayDataType.getArrayLength(ptr, getLHSBaseType(), getLHSExpr().getMaxLength()));
int lengthArray1 = ptr.getLength();
int offsetArray1 = ptr.getOffset();
byte[] array1Bytes = ptr.get();
if (!getRHSExpr().evaluate(tuple, ptr) || ptr.getLength() == 0) {
sortOrder = getLHSExpr().getSortOrder();
ptr.set(array1Bytes, offsetArray1, lengthArray1);
return true;
}
checkSizeCompatibility(ptr, sortOrder, getLHSExpr(), getLHSExpr().getDataType(), getRHSExpr(), getRHSExpr().getDataType());
// FIXME: calling version of coerceBytes that takes into account the separator used by LHS
// If the RHS does not have the same separator, it'll be coerced to use it. It's unclear
// if we should do the same for all classes derived from the base class.
// Coerce RHS to LHS type
getLHSExpr().getDataType().coerceBytes(ptr, null, getRHSExpr().getDataType(), getRHSExpr().getMaxLength(), getRHSExpr().getScale(), getRHSExpr().getSortOrder(), getLHSExpr().getMaxLength(), getLHSExpr().getScale(), getLHSExpr().getSortOrder(), isLHSRowKeyOrderOptimized);
return modifierFunction(ptr, lengthArray1, offsetArray1, array1Bytes, getLHSBaseType(), actualLengthOfArray1, getMaxLength(), getLHSExpr());
}
use of org.apache.phoenix.schema.SortOrder in project phoenix by apache.
the class SingleCellColumnExpression method setKeyValueExpression.
private void setKeyValueExpression() {
final boolean isNullable = isNullable();
final SortOrder sortOrder = getSortOrder();
final Integer scale = getScale();
final Integer maxLength = getMaxLength();
final PDataType datatype = getDataType();
this.keyValueColumnExpression = new KeyValueColumnExpression(new PDatum() {
@Override
public boolean isNullable() {
return isNullable;
}
@Override
public SortOrder getSortOrder() {
return sortOrder;
}
@Override
public Integer getScale() {
return scale;
}
@Override
public Integer getMaxLength() {
return maxLength;
}
@Override
public PDataType getDataType() {
return datatype;
}
}, getColumnFamily(), getPositionInArray());
}
Aggregations