Search in sources :

Example 56 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class IndexMaintainer method buildRowKey.

public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey) {
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    boolean prependRegionStartKey = isLocalIndex && regionStartKey != null;
    boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0;
    int prefixKeyLength = prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length : regionEndKey.length) : 0;
    TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0));
    DataOutput output = new DataOutputStream(stream);
    try {
        // For local indexes, we must prepend the row key with the start region key
        if (prependRegionStartKey) {
            if (regionStartKey.length == 0) {
                output.write(new byte[prefixKeyLength]);
            } else {
                output.write(regionStartKey);
            }
        }
        if (isIndexSalted) {
            // will be set at end to index salt byte
            output.write(0);
        }
        // The dataRowKeySchema includes the salt byte field,
        // so we must adjust for that here.
        int dataPosOffset = isDataTableSalted ? 1 : 0;
        BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
        int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants();
        int[][] dataRowKeyLocator = new int[2][nIndexedColumns];
        // Skip data table salt byte
        int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength();
        dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset);
        if (viewIndexId != null) {
            output.write(viewIndexId);
        }
        if (isMultiTenant) {
            dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset);
            output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
            if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
                output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(dataPosOffset)));
            }
            dataPosOffset++;
        }
        // Write index row key
        for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) {
            Boolean hasValue = dataRowKeySchema.next(ptr, i, maxRowKeyOffset);
            // same for all rows in this index)
            if (!viewConstantColumnBitSet.get(i)) {
                int pos = rowKeyMetaData.getIndexPkPosition(i - dataPosOffset);
                if (Boolean.TRUE.equals(hasValue)) {
                    dataRowKeyLocator[0][pos] = ptr.getOffset();
                    dataRowKeyLocator[1][pos] = ptr.getLength();
                } else {
                    dataRowKeyLocator[0][pos] = 0;
                    dataRowKeyLocator[1][pos] = 0;
                }
            }
        }
        BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
        Iterator<Expression> expressionIterator = indexedExpressions.iterator();
        for (int i = 0; i < nIndexedColumns; i++) {
            PDataType dataColumnType;
            boolean isNullable;
            SortOrder dataSortOrder;
            if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) {
                Expression expression = expressionIterator.next();
                dataColumnType = expression.getDataType();
                dataSortOrder = expression.getSortOrder();
                isNullable = expression.isNullable();
                expression.evaluate(new ValueGetterTuple(valueGetter), ptr);
            } else {
                Field field = dataRowKeySchema.getField(dataPkPosition[i]);
                dataColumnType = field.getDataType();
                ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]);
                dataSortOrder = field.getSortOrder();
                isNullable = field.isNullable();
            }
            boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
            PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType);
            boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
            boolean isIndexColumnDesc = descIndexColumnBitSet.get(i);
            if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) {
                output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
            } else {
                if (!isBytesComparable) {
                    indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault());
                }
                if (isDataColumnInverted != isIndexColumnDesc) {
                    writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
                } else {
                    output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
                }
            }
            if (!indexColumnType.isFixedWidth()) {
                output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC));
            }
        }
        int length = stream.size();
        int minLength = length - maxTrailingNulls;
        byte[] indexRowKey = stream.getBuffer();
        // Remove trailing nulls
        while (length > minLength && indexRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) {
            length--;
        }
        if (isIndexSalted) {
            // Set salt byte
            byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length - SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
            indexRowKey[0] = saltByte;
        }
        return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length);
    } catch (IOException e) {
        // Impossible
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            // Impossible
            throw new RuntimeException(e);
        }
    }
}
Also used : DataOutput(java.io.DataOutput) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataOutputStream(java.io.DataOutputStream) BitSet(org.apache.phoenix.util.BitSet) SortOrder(org.apache.phoenix.schema.SortOrder) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) IOException(java.io.IOException) Field(org.apache.phoenix.schema.ValueSchema.Field) PDataType(org.apache.phoenix.schema.types.PDataType) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ValueGetterTuple(org.apache.phoenix.schema.tuple.ValueGetterTuple)

Example 57 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class ConnectionQueryServicesImpl method createPropertiesMap.

private static Map<String, Object> createPropertiesMap(Map<ImmutableBytesWritable, ImmutableBytesWritable> htableProps) {
    Map<String, Object> props = Maps.newHashMapWithExpectedSize(htableProps.size());
    for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> entry : htableProps.entrySet()) {
        ImmutableBytesWritable key = entry.getKey();
        ImmutableBytesWritable value = entry.getValue();
        props.put(Bytes.toString(key.get(), key.getOffset(), key.getLength()), Bytes.toString(value.get(), value.getOffset(), value.getLength()));
    }
    return props;
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 58 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class ConnectionlessQueryServicesImpl method updateIndexState.

@Override
public MetaDataMutationResult updateIndexState(List<Mutation> tableMetadata, String parentTableName) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetadata.get(0).getRow(), rowKeyMetadata);
    Mutation m = MetaDataUtil.getTableHeaderRow(tableMetadata);
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    if (!MetaDataUtil.getMutationValue(m, INDEX_STATE_BYTES, kvBuilder, ptr)) {
        throw new IllegalStateException();
    }
    PIndexState newState = PIndexState.fromSerializedValue(ptr.get()[ptr.getOffset()]);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    String schemaName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]);
    String indexName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
    String indexTableName = SchemaUtil.getTableName(schemaName, indexName);
    PName tenantId = tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes);
    PTable index = metaData.getTableRef(new PTableKey(tenantId, indexTableName)).getTable();
    index = PTableImpl.makePTable(index, newState == PIndexState.USABLE ? PIndexState.ACTIVE : newState == PIndexState.UNUSABLE ? PIndexState.INACTIVE : newState);
    return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, index);
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PIndexState(org.apache.phoenix.schema.PIndexState) PName(org.apache.phoenix.schema.PName) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableKey(org.apache.phoenix.schema.PTableKey) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) PTable(org.apache.phoenix.schema.PTable)

Example 59 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class ScanUtil method getTenantIdBytes.

public static byte[] getTenantIdBytes(RowKeySchema schema, boolean isSalted, PName tenantId, boolean isSharedIndex) throws SQLException {
    int pkPos = (isSalted ? 1 : 0) + (isSharedIndex ? 1 : 0);
    Field field = schema.getField(pkPos);
    PDataType dataType = field.getDataType();
    byte[] convertedValue;
    try {
        Object value = dataType.toObject(tenantId.getString());
        convertedValue = dataType.toBytes(value);
        ImmutableBytesWritable ptr = new ImmutableBytesWritable(convertedValue);
        dataType.pad(ptr, field.getMaxLength(), field.getSortOrder());
        convertedValue = ByteUtil.copyKeyBytesIfNecessary(ptr);
    } catch (IllegalDataException ex) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.TENANTID_IS_OF_WRONG_TYPE).build().buildException();
    }
    return convertedValue;
}
Also used : Field(org.apache.phoenix.schema.ValueSchema.Field) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PDataType(org.apache.phoenix.schema.types.PDataType) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) IllegalDataException(org.apache.phoenix.schema.IllegalDataException)

Example 60 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class RoundFloorCeilExpressionsTest method testRoundNegativePrecisionDecimalExpression.

@Test
public void testRoundNegativePrecisionDecimalExpression() throws Exception {
    LiteralExpression decimalLiteral = LiteralExpression.newConstant(444.44, PDecimal.INSTANCE);
    Expression roundDecimalExpression = RoundDecimalExpression.create(decimalLiteral, -2);
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    roundDecimalExpression.evaluate(null, ptr);
    Object result = roundDecimalExpression.getDataType().toObject(ptr);
    assertTrue(result instanceof BigDecimal);
    BigDecimal resultDecimal = (BigDecimal) result;
    assertEquals(0, BigDecimal.valueOf(400).compareTo(resultDecimal));
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) RoundDateExpression(org.apache.phoenix.expression.function.RoundDateExpression) RoundDecimalExpression(org.apache.phoenix.expression.function.RoundDecimalExpression) FloorDateExpression(org.apache.phoenix.expression.function.FloorDateExpression) CeilDateExpression(org.apache.phoenix.expression.function.CeilDateExpression) CeilDecimalExpression(org.apache.phoenix.expression.function.CeilDecimalExpression) FloorDecimalExpression(org.apache.phoenix.expression.function.FloorDecimalExpression) BigDecimal(java.math.BigDecimal) Test(org.junit.Test)

Aggregations

ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)296 Test (org.junit.Test)86 Expression (org.apache.phoenix.expression.Expression)36 IOException (java.io.IOException)33 PhoenixArray (org.apache.phoenix.schema.types.PhoenixArray)30 ArrayList (java.util.ArrayList)28 Configuration (org.apache.hadoop.conf.Configuration)28 Result (org.apache.hadoop.hbase.client.Result)28 Cell (org.apache.hadoop.hbase.Cell)27 KeyValue (org.apache.hadoop.hbase.KeyValue)27 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)27 PTable (org.apache.phoenix.schema.PTable)27 PDataType (org.apache.phoenix.schema.types.PDataType)26 PSmallint (org.apache.phoenix.schema.types.PSmallint)25 PTinyint (org.apache.phoenix.schema.types.PTinyint)23 Put (org.apache.hadoop.hbase.client.Put)20 PUnsignedSmallint (org.apache.phoenix.schema.types.PUnsignedSmallint)20 PUnsignedTinyint (org.apache.phoenix.schema.types.PUnsignedTinyint)20 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)19 List (java.util.List)18