Search in sources :

Example 91 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class IndexMaintainer method buildDataRowKey.

/*
     * Build the data row key from the index row key
     */
public byte[] buildDataRowKey(ImmutableBytesWritable indexRowKeyPtr, byte[][] viewConstants) {
    RowKeySchema indexRowKeySchema = getIndexRowKeySchema();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes);
    DataOutput output = new DataOutputStream(stream);
    // Increment dataPosOffset until all have been written
    int dataPosOffset = 0;
    int viewConstantsIndex = 0;
    try {
        int indexPosOffset = !isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0;
        int maxRowKeyOffset = indexRowKeyPtr.getOffset() + indexRowKeyPtr.getLength();
        indexRowKeySchema.iterator(indexRowKeyPtr, ptr, indexPosOffset);
        if (isDataTableSalted) {
            dataPosOffset++;
            // will be set at end to salt byte
            output.write(0);
        }
        if (viewIndexId != null) {
            indexRowKeySchema.next(ptr, indexPosOffset++, maxRowKeyOffset);
        }
        if (isMultiTenant) {
            indexRowKeySchema.next(ptr, indexPosOffset, maxRowKeyOffset);
            output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
            if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
                output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(dataPosOffset)));
            }
            indexPosOffset++;
            dataPosOffset++;
        }
        indexPosOffset = (!isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0) + (isMultiTenant ? 1 : 0) + (viewIndexId == null ? 0 : 1);
        BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
        BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
        for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) {
            // same for all rows in this index)
            if (viewConstantColumnBitSet.get(i)) {
                output.write(viewConstants[viewConstantsIndex++]);
            } else {
                int pos = rowKeyMetaData.getIndexPkPosition(i - dataPosOffset);
                Boolean hasValue = indexRowKeySchema.iterator(indexRowKeyPtr, ptr, pos + indexPosOffset + 1);
                if (Boolean.TRUE.equals(hasValue)) {
                    // Write data row key value taking into account coercion and inversion
                    // if necessary
                    Field dataField = dataRowKeySchema.getField(i);
                    Field indexField = indexRowKeySchema.getField(pos + indexPosOffset);
                    PDataType indexColumnType = indexField.getDataType();
                    PDataType dataColumnType = dataField.getDataType();
                    SortOrder dataSortOrder = dataField.getSortOrder();
                    SortOrder indexSortOrder = indexField.getSortOrder();
                    boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
                    boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
                    if (isBytesComparable && isDataColumnInverted == descIndexColumnBitSet.get(pos)) {
                        output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
                    } else {
                        if (!isBytesComparable) {
                            dataColumnType.coerceBytes(ptr, indexColumnType, indexSortOrder, SortOrder.getDefault());
                        }
                        if (descIndexColumnBitSet.get(pos) != isDataColumnInverted) {
                            writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
                        } else {
                            output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
                        }
                    }
                }
            }
            // Write separator byte if variable length unless it's the last field in the schema
            // (but we still need to write it if it's DESC to ensure sort order is correct).
            byte sepByte = SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(i));
            if (!dataRowKeySchema.getField(i).getDataType().isFixedWidth() && (((i + 1) != dataRowKeySchema.getFieldCount()) || sepByte == QueryConstants.DESC_SEPARATOR_BYTE)) {
                output.writeByte(sepByte);
            }
        }
        int length = stream.size();
        int minLength = length - maxTrailingNulls;
        byte[] dataRowKey = stream.getBuffer();
        // Remove trailing nulls
        int index = dataRowKeySchema.getFieldCount() - 1;
        while (index >= 0 && !dataRowKeySchema.getField(index).getDataType().isFixedWidth() && length > minLength && dataRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) {
            length--;
            index--;
        }
        // there to maintain compatibility between an old client and a new server.
        if (isDataTableSalted) {
            // Set salt byte
            byte saltByte = SaltingUtil.getSaltingByte(dataRowKey, SaltingUtil.NUM_SALTING_BYTES, length - SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
            dataRowKey[0] = saltByte;
        }
        return dataRowKey.length == length ? dataRowKey : Arrays.copyOf(dataRowKey, length);
    } catch (IOException e) {
        // Impossible
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            // Impossible
            throw new RuntimeException(e);
        }
    }
}
Also used : DataOutput(java.io.DataOutput) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataOutputStream(java.io.DataOutputStream) BitSet(org.apache.phoenix.util.BitSet) RowKeySchema(org.apache.phoenix.schema.RowKeySchema) SortOrder(org.apache.phoenix.schema.SortOrder) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) IOException(java.io.IOException) Field(org.apache.phoenix.schema.ValueSchema.Field) PDataType(org.apache.phoenix.schema.types.PDataType)

Example 92 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class IndexMaintainer method buildRowKey.

public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey, long ts) {
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    boolean prependRegionStartKey = isLocalIndex && regionStartKey != null;
    boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0;
    int prefixKeyLength = prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length : regionEndKey.length) : 0;
    TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0));
    DataOutput output = new DataOutputStream(stream);
    try {
        // For local indexes, we must prepend the row key with the start region key
        if (prependRegionStartKey) {
            if (regionStartKey.length == 0) {
                output.write(new byte[prefixKeyLength]);
            } else {
                output.write(regionStartKey);
            }
        }
        if (isIndexSalted) {
            // will be set at end to index salt byte
            output.write(0);
        }
        // The dataRowKeySchema includes the salt byte field,
        // so we must adjust for that here.
        int dataPosOffset = isDataTableSalted ? 1 : 0;
        BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
        int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants();
        int[][] dataRowKeyLocator = new int[2][nIndexedColumns];
        // Skip data table salt byte
        int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength();
        dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset);
        if (viewIndexId != null) {
            output.write(viewIndexId);
        }
        if (isMultiTenant) {
            dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset);
            output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
            if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
                output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(dataPosOffset)));
            }
            dataPosOffset++;
        }
        // Write index row key
        for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) {
            Boolean hasValue = dataRowKeySchema.next(ptr, i, maxRowKeyOffset);
            // same for all rows in this index)
            if (!viewConstantColumnBitSet.get(i)) {
                int pos = rowKeyMetaData.getIndexPkPosition(i - dataPosOffset);
                if (Boolean.TRUE.equals(hasValue)) {
                    dataRowKeyLocator[0][pos] = ptr.getOffset();
                    dataRowKeyLocator[1][pos] = ptr.getLength();
                } else {
                    dataRowKeyLocator[0][pos] = 0;
                    dataRowKeyLocator[1][pos] = 0;
                }
            }
        }
        BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
        Iterator<Expression> expressionIterator = indexedExpressions.iterator();
        for (int i = 0; i < nIndexedColumns; i++) {
            PDataType dataColumnType;
            boolean isNullable;
            SortOrder dataSortOrder;
            if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) {
                Expression expression = expressionIterator.next();
                dataColumnType = expression.getDataType();
                dataSortOrder = expression.getSortOrder();
                isNullable = expression.isNullable();
                expression.evaluate(new ValueGetterTuple(valueGetter, ts), ptr);
            } else {
                Field field = dataRowKeySchema.getField(dataPkPosition[i]);
                dataColumnType = field.getDataType();
                ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]);
                dataSortOrder = field.getSortOrder();
                isNullable = field.isNullable();
            }
            boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
            PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType);
            boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
            boolean isIndexColumnDesc = descIndexColumnBitSet.get(i);
            if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) {
                output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
            } else {
                if (!isBytesComparable) {
                    indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault());
                }
                if (isDataColumnInverted != isIndexColumnDesc) {
                    writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
                } else {
                    output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
                }
            }
            if (!indexColumnType.isFixedWidth()) {
                output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC));
            }
        }
        int length = stream.size();
        int minLength = length - maxTrailingNulls;
        byte[] indexRowKey = stream.getBuffer();
        // Remove trailing nulls
        while (length > minLength && indexRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) {
            length--;
        }
        if (isIndexSalted) {
            // Set salt byte
            byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length - SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
            indexRowKey[0] = saltByte;
        }
        return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length);
    } catch (IOException e) {
        // Impossible
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            // Impossible
            throw new RuntimeException(e);
        }
    }
}
Also used : DataOutput(java.io.DataOutput) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataOutputStream(java.io.DataOutputStream) BitSet(org.apache.phoenix.util.BitSet) SortOrder(org.apache.phoenix.schema.SortOrder) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) IOException(java.io.IOException) Field(org.apache.phoenix.schema.ValueSchema.Field) PDataType(org.apache.phoenix.schema.types.PDataType) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) ValueGetterTuple(org.apache.phoenix.schema.tuple.ValueGetterTuple)

Example 93 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class IndexMaintainer method hasIndexedColumnChanged.

private boolean hasIndexedColumnChanged(ValueGetter oldState, Collection<? extends Cell> pendingUpdates, long ts) throws IOException {
    if (pendingUpdates.isEmpty()) {
        return false;
    }
    Map<ColumnReference, Cell> newState = Maps.newHashMapWithExpectedSize(pendingUpdates.size());
    for (Cell kv : pendingUpdates) {
        newState.put(new ColumnReference(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv)), kv);
    }
    for (ColumnReference ref : indexedColumns) {
        Cell newValue = newState.get(ref);
        if (newValue != null) {
            // Indexed column has potentially changed
            ImmutableBytesWritable oldValue = oldState.getLatestValue(ref, ts);
            boolean newValueSetAsNull = (newValue.getTypeByte() == Type.DeleteColumn.getCode() || newValue.getTypeByte() == Type.Delete.getCode() || CellUtil.matchingValue(newValue, HConstants.EMPTY_BYTE_ARRAY));
            boolean oldValueSetAsNull = oldValue == null || oldValue.getLength() == 0;
            // then just skip to the next indexed column.
            if (newValueSetAsNull && oldValueSetAsNull) {
                continue;
            }
            if (oldValueSetAsNull || newValueSetAsNull) {
                return true;
            }
            // If the old value is different than the new value, the index row needs to be deleted
            if (Bytes.compareTo(oldValue.get(), oldValue.getOffset(), oldValue.getLength(), newValue.getValueArray(), newValue.getValueOffset(), newValue.getValueLength()) != 0) {
                return true;
            }
        }
    }
    return false;
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) Cell(org.apache.hadoop.hbase.Cell) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 94 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class PhoenixIndexCodec method getIndexUpserts.

@Override
public Iterable<IndexUpdate> getIndexUpserts(TableState state, IndexMetaData context) throws IOException {
    PhoenixIndexMetaData metaData = (PhoenixIndexMetaData) context;
    List<IndexMaintainer> indexMaintainers = metaData.getIndexMaintainers();
    if (indexMaintainers.get(0).isRowDeleted(state.getPendingUpdate())) {
        return Collections.emptyList();
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    ptr.set(state.getCurrentRowKey());
    List<IndexUpdate> indexUpdates = Lists.newArrayList();
    for (IndexMaintainer maintainer : indexMaintainers) {
        Pair<ValueGetter, IndexUpdate> statePair = state.getIndexUpdateState(maintainer.getAllColumns(), metaData.getReplayWrite() != null, false, context);
        ValueGetter valueGetter = statePair.getFirst();
        IndexUpdate indexUpdate = statePair.getSecond();
        indexUpdate.setTable(maintainer.isLocalIndex() ? tableName : maintainer.getIndexTableName());
        Put put = maintainer.buildUpdateMutation(KV_BUILDER, valueGetter, ptr, state.getCurrentTimestamp(), regionStartKey, regionEndKey);
        indexUpdate.setUpdate(put);
        indexUpdates.add(indexUpdate);
    }
    return indexUpdates;
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) IndexUpdate(org.apache.phoenix.hbase.index.covered.IndexUpdate) Put(org.apache.hadoop.hbase.client.Put) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter)

Example 95 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class PhoenixIndexCodec method getIndexDeletes.

@Override
public Iterable<IndexUpdate> getIndexDeletes(TableState state, IndexMetaData context) throws IOException {
    PhoenixIndexMetaData metaData = (PhoenixIndexMetaData) context;
    List<IndexMaintainer> indexMaintainers = metaData.getIndexMaintainers();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    ptr.set(state.getCurrentRowKey());
    List<IndexUpdate> indexUpdates = Lists.newArrayList();
    for (IndexMaintainer maintainer : indexMaintainers) {
        // For transactional tables, we use an index maintainer
        // to aid in rollback if there's a KeyValue column in the index. The alternative would be
        // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
        // client side.
        Set<ColumnReference> cols = Sets.newHashSet(maintainer.getAllColumns());
        cols.add(new ColumnReference(indexMaintainers.get(0).getDataEmptyKeyValueCF(), indexMaintainers.get(0).getEmptyKeyValueQualifier()));
        Pair<ValueGetter, IndexUpdate> statePair = state.getIndexUpdateState(cols, metaData.getReplayWrite() != null, true, context);
        ValueGetter valueGetter = statePair.getFirst();
        if (valueGetter != null) {
            IndexUpdate indexUpdate = statePair.getSecond();
            indexUpdate.setTable(maintainer.isLocalIndex() ? tableName : maintainer.getIndexTableName());
            Delete delete = maintainer.buildDeleteMutation(KV_BUILDER, valueGetter, ptr, state.getPendingUpdate(), state.getCurrentTimestamp(), regionStartKey, regionEndKey);
            indexUpdate.setUpdate(delete);
            indexUpdates.add(indexUpdate);
        }
    }
    return indexUpdates;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) IndexUpdate(org.apache.phoenix.hbase.index.covered.IndexUpdate) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter)

Aggregations

ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)296 Test (org.junit.Test)86 Expression (org.apache.phoenix.expression.Expression)36 IOException (java.io.IOException)33 PhoenixArray (org.apache.phoenix.schema.types.PhoenixArray)30 ArrayList (java.util.ArrayList)28 Configuration (org.apache.hadoop.conf.Configuration)28 Result (org.apache.hadoop.hbase.client.Result)28 Cell (org.apache.hadoop.hbase.Cell)27 KeyValue (org.apache.hadoop.hbase.KeyValue)27 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)27 PTable (org.apache.phoenix.schema.PTable)27 PDataType (org.apache.phoenix.schema.types.PDataType)26 PSmallint (org.apache.phoenix.schema.types.PSmallint)25 PTinyint (org.apache.phoenix.schema.types.PTinyint)23 Put (org.apache.hadoop.hbase.client.Put)20 PUnsignedSmallint (org.apache.phoenix.schema.types.PUnsignedSmallint)20 PUnsignedTinyint (org.apache.phoenix.schema.types.PUnsignedTinyint)20 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)19 List (java.util.List)18