use of org.apache.phoenix.schema.SortOrder in project phoenix by apache.
the class IndexMaintainer method buildRowKey.
public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey) {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
boolean prependRegionStartKey = isLocalIndex && regionStartKey != null;
boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0;
int prefixKeyLength = prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length : regionEndKey.length) : 0;
TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0));
DataOutput output = new DataOutputStream(stream);
try {
// For local indexes, we must prepend the row key with the start region key
if (prependRegionStartKey) {
if (regionStartKey.length == 0) {
output.write(new byte[prefixKeyLength]);
} else {
output.write(regionStartKey);
}
}
if (isIndexSalted) {
// will be set at end to index salt byte
output.write(0);
}
// The dataRowKeySchema includes the salt byte field,
// so we must adjust for that here.
int dataPosOffset = isDataTableSalted ? 1 : 0;
BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants();
int[][] dataRowKeyLocator = new int[2][nIndexedColumns];
// Skip data table salt byte
int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength();
dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset);
if (viewIndexId != null) {
output.write(viewIndexId);
}
if (isMultiTenant) {
dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset);
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(dataPosOffset)));
}
dataPosOffset++;
}
// Write index row key
for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) {
Boolean hasValue = dataRowKeySchema.next(ptr, i, maxRowKeyOffset);
// same for all rows in this index)
if (!viewConstantColumnBitSet.get(i)) {
int pos = rowKeyMetaData.getIndexPkPosition(i - dataPosOffset);
if (Boolean.TRUE.equals(hasValue)) {
dataRowKeyLocator[0][pos] = ptr.getOffset();
dataRowKeyLocator[1][pos] = ptr.getLength();
} else {
dataRowKeyLocator[0][pos] = 0;
dataRowKeyLocator[1][pos] = 0;
}
}
}
BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
Iterator<Expression> expressionIterator = indexedExpressions.iterator();
for (int i = 0; i < nIndexedColumns; i++) {
PDataType dataColumnType;
boolean isNullable;
SortOrder dataSortOrder;
if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) {
Expression expression = expressionIterator.next();
dataColumnType = expression.getDataType();
dataSortOrder = expression.getSortOrder();
isNullable = expression.isNullable();
expression.evaluate(new ValueGetterTuple(valueGetter), ptr);
} else {
Field field = dataRowKeySchema.getField(dataPkPosition[i]);
dataColumnType = field.getDataType();
ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]);
dataSortOrder = field.getSortOrder();
isNullable = field.isNullable();
}
boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType);
boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
boolean isIndexColumnDesc = descIndexColumnBitSet.get(i);
if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
} else {
if (!isBytesComparable) {
indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault());
}
if (isDataColumnInverted != isIndexColumnDesc) {
writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
} else {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
}
}
if (!indexColumnType.isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC));
}
}
int length = stream.size();
int minLength = length - maxTrailingNulls;
byte[] indexRowKey = stream.getBuffer();
// Remove trailing nulls
while (length > minLength && indexRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) {
length--;
}
if (isIndexSalted) {
// Set salt byte
byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length - SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
indexRowKey[0] = saltByte;
}
return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length);
} catch (IOException e) {
// Impossible
throw new RuntimeException(e);
} finally {
try {
stream.close();
} catch (IOException e) {
// Impossible
throw new RuntimeException(e);
}
}
}
use of org.apache.phoenix.schema.SortOrder in project phoenix by apache.
the class IndexMaintainer method buildDataRowKey.
/*
* Build the data row key from the index row key
*/
public byte[] buildDataRowKey(ImmutableBytesWritable indexRowKeyPtr, byte[][] viewConstants) {
RowKeySchema indexRowKeySchema = getIndexRowKeySchema();
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes);
DataOutput output = new DataOutputStream(stream);
// Increment dataPosOffset until all have been written
int dataPosOffset = 0;
int viewConstantsIndex = 0;
try {
int indexPosOffset = !isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0;
int maxRowKeyOffset = indexRowKeyPtr.getOffset() + indexRowKeyPtr.getLength();
indexRowKeySchema.iterator(indexRowKeyPtr, ptr, indexPosOffset);
if (isDataTableSalted) {
dataPosOffset++;
// will be set at end to salt byte
output.write(0);
}
if (viewIndexId != null) {
indexRowKeySchema.next(ptr, indexPosOffset++, maxRowKeyOffset);
}
if (isMultiTenant) {
indexRowKeySchema.next(ptr, indexPosOffset, maxRowKeyOffset);
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(dataPosOffset)));
}
indexPosOffset++;
dataPosOffset++;
}
indexPosOffset = (!isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0) + (isMultiTenant ? 1 : 0) + (viewIndexId == null ? 0 : 1);
BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) {
// same for all rows in this index)
if (viewConstantColumnBitSet.get(i)) {
output.write(viewConstants[viewConstantsIndex++]);
} else {
int pos = rowKeyMetaData.getIndexPkPosition(i - dataPosOffset);
Boolean hasValue = indexRowKeySchema.iterator(indexRowKeyPtr, ptr, pos + indexPosOffset + 1);
if (Boolean.TRUE.equals(hasValue)) {
// Write data row key value taking into account coercion and inversion
// if necessary
Field dataField = dataRowKeySchema.getField(i);
Field indexField = indexRowKeySchema.getField(pos + indexPosOffset);
PDataType indexColumnType = indexField.getDataType();
PDataType dataColumnType = dataField.getDataType();
SortOrder dataSortOrder = dataField.getSortOrder();
SortOrder indexSortOrder = indexField.getSortOrder();
boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
if (isBytesComparable && isDataColumnInverted == descIndexColumnBitSet.get(pos)) {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
} else {
if (!isBytesComparable) {
dataColumnType.coerceBytes(ptr, indexColumnType, indexSortOrder, SortOrder.getDefault());
}
if (descIndexColumnBitSet.get(pos) != isDataColumnInverted) {
writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
} else {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
}
}
}
}
// Write separator byte if variable length unless it's the last field in the schema
// (but we still need to write it if it's DESC to ensure sort order is correct).
byte sepByte = SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(i));
if (!dataRowKeySchema.getField(i).getDataType().isFixedWidth() && (((i + 1) != dataRowKeySchema.getFieldCount()) || sepByte == QueryConstants.DESC_SEPARATOR_BYTE)) {
output.writeByte(sepByte);
}
}
int length = stream.size();
int minLength = length - maxTrailingNulls;
byte[] dataRowKey = stream.getBuffer();
// Remove trailing nulls
while (length > minLength && dataRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) {
length--;
}
// there to maintain compatibility between an old client and a new server.
if (isDataTableSalted) {
// Set salt byte
byte saltByte = SaltingUtil.getSaltingByte(dataRowKey, SaltingUtil.NUM_SALTING_BYTES, length - SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
dataRowKey[0] = saltByte;
}
return dataRowKey.length == length ? dataRowKey : Arrays.copyOf(dataRowKey, length);
} catch (IOException e) {
// Impossible
throw new RuntimeException(e);
} finally {
try {
stream.close();
} catch (IOException e) {
// Impossible
throw new RuntimeException(e);
}
}
}
use of org.apache.phoenix.schema.SortOrder in project phoenix by apache.
the class ScanRangesTest method foreach.
private static Collection<?> foreach(KeyRange[][] ranges, int[] widths, KeyRange keyRange, boolean expectedResult) {
List<List<KeyRange>> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST);
RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10);
for (final int width : widths) {
if (width > 0) {
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public PDataType getDataType() {
return PChar.INSTANCE;
}
@Override
public Integer getMaxLength() {
return width;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return SortOrder.getDefault();
}
}, false, SortOrder.getDefault());
} else {
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public PDataType getDataType() {
return PVarchar.INSTANCE;
}
@Override
public Integer getMaxLength() {
return width;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return SortOrder.getDefault();
}
}, false, SortOrder.getDefault());
}
}
ScanRanges scanRanges = ScanRanges.createSingleSpan(builder.build(), slots);
return foreach(scanRanges, widths, keyRange, expectedResult);
}
use of org.apache.phoenix.schema.SortOrder in project phoenix by apache.
the class SortOrderExpressionTest method evaluateAndAssertResult.
private void evaluateAndAssertResult(Expression expression, Object expectedResult, String context) {
context = context == null ? "" : context;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
assertTrue(expression.evaluate(null, ptr));
PDataType dataType = expression.getDataType();
SortOrder sortOrder = expression.getSortOrder();
Object result = dataType.toObject(ptr.get(), ptr.getOffset(), ptr.getLength(), dataType, sortOrder);
assertEquals(context, expectedResult, result);
}
use of org.apache.phoenix.schema.SortOrder in project phoenix by apache.
the class SortOrderIT method testSkipScanCompare.
@Test
public void testSkipScanCompare() throws Exception {
List<Integer> expectedResults = Lists.newArrayList(2, 4);
List<Integer> rExpectedResults = new ArrayList<>(expectedResults);
Collections.reverse(rExpectedResults);
Integer[] saltBuckets = new Integer[] { null, 3 };
PDataType[] dataTypes = new PDataType[] { PDecimal.INSTANCE, PDouble.INSTANCE, PFloat.INSTANCE };
for (Integer saltBucket : saltBuckets) {
for (PDataType dataType : dataTypes) {
for (SortOrder sortOrder : SortOrder.values()) {
testCompareCompositeKey(saltBucket, dataType, sortOrder, "k1 in (2,4)", expectedResults, "");
testCompareCompositeKey(saltBucket, dataType, sortOrder, "k1 in (2,4)", rExpectedResults, "ORDER BY k1 DESC");
}
}
}
}
Aggregations