use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class IndexMaintainer method buildRowKey.
public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey) {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
boolean prependRegionStartKey = isLocalIndex && regionStartKey != null;
boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0;
int prefixKeyLength = prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length : regionEndKey.length) : 0;
TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0));
DataOutput output = new DataOutputStream(stream);
try {
// For local indexes, we must prepend the row key with the start region key
if (prependRegionStartKey) {
if (regionStartKey.length == 0) {
output.write(new byte[prefixKeyLength]);
} else {
output.write(regionStartKey);
}
}
if (isIndexSalted) {
// will be set at end to index salt byte
output.write(0);
}
// The dataRowKeySchema includes the salt byte field,
// so we must adjust for that here.
int dataPosOffset = isDataTableSalted ? 1 : 0;
BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants();
int[][] dataRowKeyLocator = new int[2][nIndexedColumns];
// Skip data table salt byte
int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength();
dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset);
if (viewIndexId != null) {
output.write(viewIndexId);
}
if (isMultiTenant) {
dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset);
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(dataPosOffset)));
}
dataPosOffset++;
}
// Write index row key
for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) {
Boolean hasValue = dataRowKeySchema.next(ptr, i, maxRowKeyOffset);
// same for all rows in this index)
if (!viewConstantColumnBitSet.get(i)) {
int pos = rowKeyMetaData.getIndexPkPosition(i - dataPosOffset);
if (Boolean.TRUE.equals(hasValue)) {
dataRowKeyLocator[0][pos] = ptr.getOffset();
dataRowKeyLocator[1][pos] = ptr.getLength();
} else {
dataRowKeyLocator[0][pos] = 0;
dataRowKeyLocator[1][pos] = 0;
}
}
}
BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
Iterator<Expression> expressionIterator = indexedExpressions.iterator();
for (int i = 0; i < nIndexedColumns; i++) {
PDataType dataColumnType;
boolean isNullable;
SortOrder dataSortOrder;
if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) {
Expression expression = expressionIterator.next();
dataColumnType = expression.getDataType();
dataSortOrder = expression.getSortOrder();
isNullable = expression.isNullable();
expression.evaluate(new ValueGetterTuple(valueGetter), ptr);
} else {
Field field = dataRowKeySchema.getField(dataPkPosition[i]);
dataColumnType = field.getDataType();
ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]);
dataSortOrder = field.getSortOrder();
isNullable = field.isNullable();
}
boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType);
boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
boolean isIndexColumnDesc = descIndexColumnBitSet.get(i);
if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
} else {
if (!isBytesComparable) {
indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault());
}
if (isDataColumnInverted != isIndexColumnDesc) {
writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
} else {
output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
}
}
if (!indexColumnType.isFixedWidth()) {
output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC));
}
}
int length = stream.size();
int minLength = length - maxTrailingNulls;
byte[] indexRowKey = stream.getBuffer();
// Remove trailing nulls
while (length > minLength && indexRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) {
length--;
}
if (isIndexSalted) {
// Set salt byte
byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length - SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
indexRowKey[0] = saltByte;
}
return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length);
} catch (IOException e) {
// Impossible
throw new RuntimeException(e);
} finally {
try {
stream.close();
} catch (IOException e) {
// Impossible
throw new RuntimeException(e);
}
}
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class ConnectionQueryServicesImpl method createPropertiesMap.
private static Map<String, Object> createPropertiesMap(Map<ImmutableBytesWritable, ImmutableBytesWritable> htableProps) {
Map<String, Object> props = Maps.newHashMapWithExpectedSize(htableProps.size());
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry : htableProps.entrySet()) {
ImmutableBytesWritable key = entry.getKey();
ImmutableBytesWritable value = entry.getValue();
props.put(Bytes.toString(key.get(), key.getOffset(), key.getLength()), Bytes.toString(value.get(), value.getOffset(), value.getLength()));
}
return props;
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class ConnectionlessQueryServicesImpl method updateIndexState.
@Override
public MetaDataMutationResult updateIndexState(List<Mutation> tableMetadata, String parentTableName) throws SQLException {
byte[][] rowKeyMetadata = new byte[3][];
SchemaUtil.getVarChars(tableMetadata.get(0).getRow(), rowKeyMetadata);
Mutation m = MetaDataUtil.getTableHeaderRow(tableMetadata);
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
if (!MetaDataUtil.getMutationValue(m, INDEX_STATE_BYTES, kvBuilder, ptr)) {
throw new IllegalStateException();
}
PIndexState newState = PIndexState.fromSerializedValue(ptr.get()[ptr.getOffset()]);
byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
String schemaName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]);
String indexName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
String indexTableName = SchemaUtil.getTableName(schemaName, indexName);
PName tenantId = tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes);
PTable index = metaData.getTableRef(new PTableKey(tenantId, indexTableName)).getTable();
index = PTableImpl.makePTable(index, newState == PIndexState.USABLE ? PIndexState.ACTIVE : newState == PIndexState.UNUSABLE ? PIndexState.INACTIVE : newState);
return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, index);
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class ScanUtil method getTenantIdBytes.
public static byte[] getTenantIdBytes(RowKeySchema schema, boolean isSalted, PName tenantId, boolean isSharedIndex) throws SQLException {
int pkPos = (isSalted ? 1 : 0) + (isSharedIndex ? 1 : 0);
Field field = schema.getField(pkPos);
PDataType dataType = field.getDataType();
byte[] convertedValue;
try {
Object value = dataType.toObject(tenantId.getString());
convertedValue = dataType.toBytes(value);
ImmutableBytesWritable ptr = new ImmutableBytesWritable(convertedValue);
dataType.pad(ptr, field.getMaxLength(), field.getSortOrder());
convertedValue = ByteUtil.copyKeyBytesIfNecessary(ptr);
} catch (IllegalDataException ex) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.TENANTID_IS_OF_WRONG_TYPE).build().buildException();
}
return convertedValue;
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class RoundFloorCeilExpressionsTest method testRoundNegativePrecisionDecimalExpression.
@Test
public void testRoundNegativePrecisionDecimalExpression() throws Exception {
LiteralExpression decimalLiteral = LiteralExpression.newConstant(444.44, PDecimal.INSTANCE);
Expression roundDecimalExpression = RoundDecimalExpression.create(decimalLiteral, -2);
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
roundDecimalExpression.evaluate(null, ptr);
Object result = roundDecimalExpression.getDataType().toObject(ptr);
assertTrue(result instanceof BigDecimal);
BigDecimal resultDecimal = (BigDecimal) result;
assertEquals(0, BigDecimal.valueOf(400).compareTo(resultDecimal));
}
Aggregations