Search in sources :

Example 11 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class IndexMaintainer method createGetterFromKeyValues.

public ValueGetter createGetterFromKeyValues(final byte[] rowKey, Collection<? extends Cell> pendingUpdates) {
    final Map<ColumnReference, ImmutableBytesPtr> valueMap = Maps.newHashMapWithExpectedSize(pendingUpdates.size());
    for (Cell kv : pendingUpdates) {
        // create new pointers to each part of the kv
        ImmutableBytesPtr value = new ImmutableBytesPtr(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
        valueMap.put(new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()), value);
    }
    return new ValueGetter() {

        @Override
        public ImmutableBytesWritable getLatestValue(ColumnReference ref) {
            if (ref.equals(dataEmptyKeyValueRef))
                return null;
            return valueMap.get(ref);
        }

        @Override
        public byte[] getRowKey() {
            return rowKey;
        }
    };
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Cell(org.apache.hadoop.hbase.Cell) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter)

Example 12 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class IndexMaintainer method readFields.

// Only called by code older than our 4.10 release
@Deprecated
@Override
public void readFields(DataInput input) throws IOException {
    int encodedIndexSaltBucketsAndMultiTenant = WritableUtils.readVInt(input);
    isMultiTenant = encodedIndexSaltBucketsAndMultiTenant < 0;
    nIndexSaltBuckets = Math.abs(encodedIndexSaltBucketsAndMultiTenant) - 1;
    int encodedIndexedColumnsAndViewId = WritableUtils.readVInt(input);
    boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0;
    if (hasViewIndexId) {
        // Fixed length
        viewIndexId = new byte[MetaDataUtil.getViewIndexIdDataType().getByteSize()];
        input.readFully(viewIndexId);
    }
    int nIndexedColumns = Math.abs(encodedIndexedColumnsAndViewId) - 1;
    indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexedColumns);
    for (int i = 0; i < nIndexedColumns; i++) {
        byte[] cf = Bytes.readByteArray(input);
        byte[] cq = Bytes.readByteArray(input);
        indexedColumns.add(new ColumnReference(cf, cq));
    }
    indexedColumnTypes = Lists.newArrayListWithExpectedSize(nIndexedColumns);
    for (int i = 0; i < nIndexedColumns; i++) {
        PDataType type = PDataType.values()[WritableUtils.readVInt(input)];
        indexedColumnTypes.add(type);
    }
    int encodedCoveredolumnsAndLocalIndex = WritableUtils.readVInt(input);
    isLocalIndex = encodedCoveredolumnsAndLocalIndex < 0;
    int nCoveredColumns = Math.abs(encodedCoveredolumnsAndLocalIndex) - 1;
    coveredColumnsMap = Maps.newHashMapWithExpectedSize(nCoveredColumns);
    for (int i = 0; i < nCoveredColumns; i++) {
        byte[] dataTableCf = Bytes.readByteArray(input);
        byte[] dataTableCq = Bytes.readByteArray(input);
        ColumnReference dataTableRef = new ColumnReference(dataTableCf, dataTableCq);
        byte[] indexTableCf = isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(dataTableCf) : dataTableCf;
        byte[] indexTableCq = IndexUtil.getIndexColumnName(dataTableCf, dataTableCq);
        ColumnReference indexTableRef = new ColumnReference(indexTableCf, indexTableCq);
        coveredColumnsMap.put(dataTableRef, indexTableRef);
    }
    // Hack to serialize whether the index row key is optimizable
    int len = WritableUtils.readVInt(input);
    if (len < 0) {
        rowKeyOrderOptimizable = false;
        len *= -1;
    } else {
        rowKeyOrderOptimizable = true;
    }
    indexTableName = new byte[len];
    input.readFully(indexTableName, 0, len);
    dataEmptyKeyValueCF = Bytes.readByteArray(input);
    len = WritableUtils.readVInt(input);
    //TODO remove this in the next major release
    boolean isNewClient = false;
    if (len < 0) {
        isNewClient = true;
        len = Math.abs(len);
    }
    byte[] emptyKeyValueCF = new byte[len];
    input.readFully(emptyKeyValueCF, 0, len);
    emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueCF);
    if (isNewClient) {
        int numIndexedExpressions = WritableUtils.readVInt(input);
        indexedExpressions = Lists.newArrayListWithExpectedSize(numIndexedExpressions);
        for (int i = 0; i < numIndexedExpressions; i++) {
            Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
            expression.readFields(input);
            indexedExpressions.add(expression);
        }
    } else {
        indexedExpressions = Lists.newArrayListWithExpectedSize(indexedColumns.size());
        Iterator<ColumnReference> colReferenceIter = indexedColumns.iterator();
        Iterator<PDataType> dataTypeIter = indexedColumnTypes.iterator();
        while (colReferenceIter.hasNext()) {
            ColumnReference colRef = colReferenceIter.next();
            final PDataType dataType = dataTypeIter.next();
            indexedExpressions.add(new KeyValueColumnExpression(new PDatum() {

                @Override
                public boolean isNullable() {
                    return true;
                }

                @Override
                public SortOrder getSortOrder() {
                    return SortOrder.getDefault();
                }

                @Override
                public Integer getScale() {
                    return null;
                }

                @Override
                public Integer getMaxLength() {
                    return null;
                }

                @Override
                public PDataType getDataType() {
                    return dataType;
                }
            }, colRef.getFamily(), colRef.getQualifier()));
        }
    }
    rowKeyMetaData = newRowKeyMetaData();
    rowKeyMetaData.readFields(input);
    int nDataCFs = WritableUtils.readVInt(input);
    // Encode indexWALDisabled in nDataCFs
    indexWALDisabled = nDataCFs < 0;
    this.nDataCFs = Math.abs(nDataCFs) - 1;
    int encodedEstimatedIndexRowKeyBytesAndImmutableRows = WritableUtils.readVInt(input);
    this.immutableRows = encodedEstimatedIndexRowKeyBytesAndImmutableRows < 0;
    this.estimatedIndexRowKeyBytes = Math.abs(encodedEstimatedIndexRowKeyBytesAndImmutableRows);
    // Needed for backward compatibility. Clients older than 4.10 will have non-encoded tables.
    this.immutableStorageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN;
    this.encodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
    initCachedState();
}
Also used : PDatum(org.apache.phoenix.schema.PDatum) PDataType(org.apache.phoenix.schema.types.PDataType) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 13 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class PhoenixTransactionalIndexer method getIndexUpdates.

private Collection<Pair<Mutation, byte[]>> getIndexUpdates(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, Iterator<Mutation> mutationIterator, byte[] txRollbackAttribute) throws IOException {
    Transaction tx = indexMetaData.getTransaction();
    if (tx == null) {
        throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString());
    }
    boolean isRollback = txRollbackAttribute != null;
    boolean isImmutable = indexMetaData.isImmutableRows();
    ResultScanner currentScanner = null;
    TransactionAwareHTable txTable = null;
    // Collect up all mutations in batch
    Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations;
    if (isImmutable && !isRollback) {
        findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    } else {
        findPriorValueMutations = mutations;
    }
    while (mutationIterator.hasNext()) {
        Mutation m = mutationIterator.next();
        // add the mutation to the batch set
        ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
        if (mutations != findPriorValueMutations && isDeleteMutation(m)) {
            addMutation(findPriorValueMutations, row, m);
        }
        addMutation(mutations, row, m);
    }
    // Collect the set of mutable ColumnReferences so that we can first
    // run a scan to get the current state. We'll need this to delete
    // the existing index rows.
    List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers();
    int estimatedSize = indexMaintainers.size() * 10;
    Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize);
    for (IndexMaintainer indexMaintainer : indexMaintainers) {
        // For transactional tables, we use an index maintainer
        // to aid in rollback if there's a KeyValue column in the index. The alternative would be
        // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
        // client side.
        Set<ColumnReference> allColumns = indexMaintainer.getAllColumns();
        mutableColumns.addAll(allColumns);
    }
    Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(mutations.size() * 2 * indexMaintainers.size());
    try {
        // this logic will work there too.
        if (!findPriorValueMutations.isEmpty()) {
            List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
            for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) {
                keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary()));
            }
            Scan scan = new Scan();
            // Project all mutable columns
            for (ColumnReference ref : mutableColumns) {
                scan.addColumn(ref.getFamily(), ref.getQualifier());
            }
            /*
                 * Indexes inherit the storage scheme of the data table which means all the indexes have the same
                 * storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
                 * supporting new indexes over existing data tables to have a different storage scheme than the data
                 * table.
                 */
            byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();
            // Project empty key value column
            scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier);
            ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
            scanRanges.initializeScan(scan);
            TableName tableName = env.getRegion().getRegionInfo().getTable();
            HTableInterface htable = env.getTable(tableName);
            txTable = new TransactionAwareHTable(htable);
            txTable.startTx(tx);
            // For rollback, we need to see all versions, including
            // the last committed version as there may be multiple
            // checkpointed versions.
            SkipScanFilter filter = scanRanges.getSkipScanFilter();
            if (isRollback) {
                filter = new SkipScanFilter(filter, true);
                tx.setVisibility(VisibilityLevel.SNAPSHOT_ALL);
            }
            scan.setFilter(filter);
            currentScanner = txTable.getScanner(scan);
        }
        if (isRollback) {
            processRollback(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations);
        } else {
            processMutation(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
        }
    } finally {
        if (txTable != null)
            txTable.close();
    }
    return indexUpdates;
}
Also used : MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) HashMap(java.util.HashMap) KeyRange(org.apache.phoenix.query.KeyRange) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Pair(org.apache.hadoop.hbase.util.Pair) TransactionAwareHTable(org.apache.tephra.hbase.TransactionAwareHTable) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ScanRanges(org.apache.phoenix.compile.ScanRanges) TableName(org.apache.hadoop.hbase.TableName) Transaction(org.apache.tephra.Transaction) Scan(org.apache.hadoop.hbase.client.Scan) Mutation(org.apache.hadoop.hbase.client.Mutation) MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 14 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class KeyValueUtil method getEstimatedRowSize.

/**
     * Estimates the storage size of a row
     * @param mutations map from table to row to RowMutationState
     * @return estimated row size
     */
public static long getEstimatedRowSize(Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> mutations) {
    long size = 0;
    // iterate over tables
    for (Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> tableEntry : mutations.entrySet()) {
        PTable table = tableEntry.getKey().getTable();
        // iterate over rows
        for (Entry<ImmutableBytesPtr, RowMutationState> rowEntry : tableEntry.getValue().entrySet()) {
            int rowLength = rowEntry.getKey().getLength();
            Map<PColumn, byte[]> colValueMap = rowEntry.getValue().getColumnValues();
            switch(table.getImmutableStorageScheme()) {
                case ONE_CELL_PER_COLUMN:
                    // iterate over columns
                    for (Entry<PColumn, byte[]> colValueEntry : colValueMap.entrySet()) {
                        PColumn pColumn = colValueEntry.getKey();
                        size += KeyValue.getKeyValueDataStructureSize(rowLength, pColumn.getFamilyName().getBytes().length, pColumn.getColumnQualifierBytes().length, colValueEntry.getValue().length);
                    }
                    break;
                case SINGLE_CELL_ARRAY_WITH_OFFSETS:
                    // we store all the column values in a single key value that contains all the
                    // column values followed by an offset array
                    size += PArrayDataTypeEncoder.getEstimatedByteSize(table, rowLength, colValueMap);
                    break;
            }
            // count the empty key value
            Pair<byte[], byte[]> emptyKeyValueInfo = EncodedColumnsUtil.getEmptyKeyValueInfo(table);
            size += KeyValue.getKeyValueDataStructureSize(rowLength, SchemaUtil.getEmptyColumnFamilyPtr(table).getLength(), emptyKeyValueInfo.getFirst().length, emptyKeyValueInfo.getSecond().length);
        }
    }
    return size;
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) Map(java.util.Map) TableRef(org.apache.phoenix.schema.TableRef) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 15 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class RoundFloorCeilExpressionsTest method verifyKeyRange.

/**
     * Checks that a given KeyRange's boundaries match with the given rounding expression type,
     * rounding scale, relational operator, and right hand side decimal.
     * Does so by checking the decimal values immediately on either side of the KeyRange border and
     * verifying that they either match or do not match the "where clause" formed by the
     * rounding type, scale, relation, and rhs decimal. If a relation should produce an unbounded
     * upper or lower range, verifies that that end of the range is unbounded. Finally, if the
     * range is empty, verifies that the rhs decimal required more precision than could be
     * produced by the rounding expression.
     * @param exprType  the rounding expression type used to create this KeyRange
     * @param scale  the rounding scale used to create this KeyRange
     * @param relation  the relational operator used to create this KeyRange
     * @param rhs  the right hand side decimal used to create this KeyRange
     * @param range  the KeyRange to test
     */
private void verifyKeyRange(RoundingType exprType, int scale, Relation relation, BigDecimal rhs, KeyRange range) throws SQLException {
    // dump of values for debugging
    final String dump = getMessage(exprType, scale, relation, rhs, range);
    ImmutableBytesPtr rhsPtr = new ImmutableBytesPtr();
    LiteralExpression.newConstant(rhs, PDecimal.INSTANCE).evaluate(null, rhsPtr);
    ImmutableBytesPtr lhsPtr = new ImmutableBytesPtr();
    // we should only get an empty range if we can verify that precision makes a match impossible
    if (range == KeyRange.EMPTY_RANGE) {
        assertTrue("should only get empty key range for unmatchable rhs precision (" + dump + ")", rhs.scale() > scale);
        assertEquals("should only get empty key range for equals checks (" + dump + ")", Relation.EQUAL, relation);
        return;
    }
    // if it should have an upper bound
    if (relation != Relation.GREATER && relation != Relation.GREATER_OR_EQUAL) {
        // figure out what the upper bound is
        byte[] highestHighIncluded;
        byte[] lowestHighExcluded;
        if (range.isUpperInclusive()) {
            highestHighIncluded = range.getUpperRange();
            lowestHighExcluded = nextDecimalKey(range.getUpperRange());
        } else {
            highestHighIncluded = prevDecimalKey(range.getUpperRange());
            lowestHighExcluded = range.getUpperRange();
        }
        // check on either side of the boundary to validate that it is in fact the boundary
        exprType.getExpression(highestHighIncluded, scale).evaluate(null, lhsPtr);
        assertTrue("incorrectly excluding " + PDecimal.INSTANCE.toObject(highestHighIncluded) + " in upper bound for " + dump, relation.compare(lhsPtr, rhsPtr));
        exprType.getExpression(lowestHighExcluded, scale).evaluate(null, lhsPtr);
        assertFalse("incorrectly including " + PDecimal.INSTANCE.toObject(lowestHighExcluded) + " in upper bound for " + dump, relation.compare(lhsPtr, rhsPtr));
    } else {
        // otherwise verify that it does not have an upper bound
        assertTrue("should not have a upper bound for " + dump, range.upperUnbound());
    }
    // if it should have a lower bound
    if (relation != Relation.LESS && relation != Relation.LESS_OR_EQUAL) {
        // figure out what the lower bound is
        byte[] lowestLowIncluded;
        byte[] highestLowExcluded;
        if (range.isLowerInclusive()) {
            lowestLowIncluded = range.getLowerRange();
            highestLowExcluded = prevDecimalKey(range.getLowerRange());
        } else {
            lowestLowIncluded = nextDecimalKey(range.getLowerRange());
            highestLowExcluded = range.getLowerRange();
        }
        // check on either side of the boundary to validate that it is in fact the boundary
        exprType.getExpression(lowestLowIncluded, scale).evaluate(null, lhsPtr);
        assertTrue("incorrectly excluding " + PDecimal.INSTANCE.toObject(lowestLowIncluded) + " in lower bound for " + dump, relation.compare(lhsPtr, rhsPtr));
        exprType.getExpression(highestLowExcluded, scale).evaluate(null, lhsPtr);
        assertFalse("incorrectly including " + PDecimal.INSTANCE.toObject(highestLowExcluded) + " in lower bound for " + dump, relation.compare(lhsPtr, rhsPtr));
    } else {
        // otherwise verify that it does not have a lower bound
        assertTrue("should not have a lower bound for " + dump, range.lowerUnbound());
    }
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)

Aggregations

ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)120 Mutation (org.apache.hadoop.hbase.client.Mutation)31 PTable (org.apache.phoenix.schema.PTable)28 ArrayList (java.util.ArrayList)27 Region (org.apache.hadoop.hbase.regionserver.Region)22 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)22 Test (org.junit.Test)21 Cell (org.apache.hadoop.hbase.Cell)20 Put (org.apache.hadoop.hbase.client.Put)18 List (java.util.List)15 Scan (org.apache.hadoop.hbase.client.Scan)15 Pair (org.apache.hadoop.hbase.util.Pair)15 IOException (java.io.IOException)14 Expression (org.apache.phoenix.expression.Expression)14 PColumn (org.apache.phoenix.schema.PColumn)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)12 HashMap (java.util.HashMap)11 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)11 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)11