Search in sources :

Example 6 with KeyRange

use of org.apache.phoenix.query.KeyRange in project phoenix by apache.

the class PhoenixTransactionalIndexer method getIndexUpdates.

private Collection<Pair<Mutation, byte[]>> getIndexUpdates(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, Iterator<Mutation> mutationIterator, byte[] txRollbackAttribute) throws IOException {
    Transaction tx = indexMetaData.getTransaction();
    if (tx == null) {
        throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString());
    }
    boolean isRollback = txRollbackAttribute != null;
    boolean isImmutable = indexMetaData.isImmutableRows();
    ResultScanner currentScanner = null;
    TransactionAwareHTable txTable = null;
    // Collect up all mutations in batch
    Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations;
    if (isImmutable && !isRollback) {
        findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    } else {
        findPriorValueMutations = mutations;
    }
    while (mutationIterator.hasNext()) {
        Mutation m = mutationIterator.next();
        // add the mutation to the batch set
        ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
        if (mutations != findPriorValueMutations && isDeleteMutation(m)) {
            addMutation(findPriorValueMutations, row, m);
        }
        addMutation(mutations, row, m);
    }
    // Collect the set of mutable ColumnReferences so that we can first
    // run a scan to get the current state. We'll need this to delete
    // the existing index rows.
    List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers();
    int estimatedSize = indexMaintainers.size() * 10;
    Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize);
    for (IndexMaintainer indexMaintainer : indexMaintainers) {
        // For transactional tables, we use an index maintainer
        // to aid in rollback if there's a KeyValue column in the index. The alternative would be
        // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
        // client side.
        Set<ColumnReference> allColumns = indexMaintainer.getAllColumns();
        mutableColumns.addAll(allColumns);
    }
    Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(mutations.size() * 2 * indexMaintainers.size());
    try {
        // this logic will work there too.
        if (!findPriorValueMutations.isEmpty()) {
            List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
            for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) {
                keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary()));
            }
            Scan scan = new Scan();
            // Project all mutable columns
            for (ColumnReference ref : mutableColumns) {
                scan.addColumn(ref.getFamily(), ref.getQualifier());
            }
            /*
                 * Indexes inherit the storage scheme of the data table which means all the indexes have the same
                 * storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
                 * supporting new indexes over existing data tables to have a different storage scheme than the data
                 * table.
                 */
            byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();
            // Project empty key value column
            scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier);
            ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
            scanRanges.initializeScan(scan);
            TableName tableName = env.getRegion().getRegionInfo().getTable();
            HTableInterface htable = env.getTable(tableName);
            txTable = new TransactionAwareHTable(htable);
            txTable.startTx(tx);
            // For rollback, we need to see all versions, including
            // the last committed version as there may be multiple
            // checkpointed versions.
            SkipScanFilter filter = scanRanges.getSkipScanFilter();
            if (isRollback) {
                filter = new SkipScanFilter(filter, true);
                tx.setVisibility(VisibilityLevel.SNAPSHOT_ALL);
            }
            scan.setFilter(filter);
            currentScanner = txTable.getScanner(scan);
        }
        if (isRollback) {
            processRollback(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations);
        } else {
            processMutation(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
        }
    } finally {
        if (txTable != null)
            txTable.close();
    }
    return indexUpdates;
}
Also used : MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) HashMap(java.util.HashMap) KeyRange(org.apache.phoenix.query.KeyRange) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Pair(org.apache.hadoop.hbase.util.Pair) TransactionAwareHTable(org.apache.tephra.hbase.TransactionAwareHTable) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ScanRanges(org.apache.phoenix.compile.ScanRanges) TableName(org.apache.hadoop.hbase.TableName) Transaction(org.apache.tephra.Transaction) Scan(org.apache.hadoop.hbase.client.Scan) Mutation(org.apache.hadoop.hbase.client.Mutation) MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 7 with KeyRange

use of org.apache.phoenix.query.KeyRange in project phoenix by apache.

the class ScanUtil method newScanRanges.

public static ScanRanges newScanRanges(List<? extends Mutation> mutations) throws SQLException {
    List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
    for (Mutation m : mutations) {
        keys.add(PVarbinary.INSTANCE.getKeyRange(m.getRow()));
    }
    ScanRanges keyRanges = ScanRanges.createPointLookup(keys);
    return keyRanges;
}
Also used : KeyRange(org.apache.phoenix.query.KeyRange) Mutation(org.apache.hadoop.hbase.client.Mutation) ScanRanges(org.apache.phoenix.compile.ScanRanges)

Example 8 with KeyRange

use of org.apache.phoenix.query.KeyRange in project phoenix by apache.

the class SchemaUtil method getMaxKeyLength.

public static int getMaxKeyLength(RowKeySchema schema, List<List<KeyRange>> slots) {
    int maxKeyLength = getTerminatorCount(schema) * 2;
    for (List<KeyRange> slot : slots) {
        int maxSlotLength = 0;
        for (KeyRange range : slot) {
            int maxRangeLength = Math.max(range.getLowerRange().length, range.getUpperRange().length);
            if (maxSlotLength < maxRangeLength) {
                maxSlotLength = maxRangeLength;
            }
        }
        maxKeyLength += maxSlotLength;
    }
    return maxKeyLength;
}
Also used : KeyRange(org.apache.phoenix.query.KeyRange)

Example 9 with KeyRange

use of org.apache.phoenix.query.KeyRange in project phoenix by apache.

the class ScanUtil method setKey.

public static int setKey(RowKeySchema schema, List<List<KeyRange>> slots, int[] slotSpan, int[] position, Bound bound, byte[] key, int byteOffset, int slotStartIndex, int slotEndIndex, int schemaStartIndex) {
    int offset = byteOffset;
    boolean lastInclusiveUpperSingleKey = false;
    boolean anyInclusiveUpperRangeKey = false;
    boolean lastUnboundUpper = false;
    // The index used for slots should be incremented by 1,
    // but the index for the field it represents in the schema
    // should be incremented by 1 + value in the current slotSpan index
    // slotSpan stores the number of columns beyond one that the range spans
    Field field = null;
    int i = slotStartIndex, fieldIndex = ScanUtil.getRowKeyPosition(slotSpan, slotStartIndex);
    for (i = slotStartIndex; i < slotEndIndex; i++) {
        // Build up the key by appending the bound of each key range
        // from the current position of each slot. 
        KeyRange range = slots.get(i).get(position[i]);
        // Use last slot in a multi-span column to determine if fixed width
        field = schema.getField(fieldIndex + slotSpan[i]);
        boolean isFixedWidth = field.getDataType().isFixedWidth();
        /*
             * If the current slot is unbound then stop if:
             * 1) setting the upper bound. There's no value in
             *    continuing because nothing will be filtered.
             * 2) setting the lower bound when the type is fixed length
             *    for the same reason. However, if the type is variable width
             *    continue building the key because null values will be filtered
             *    since our separator byte will be appended and incremented.
             * 3) if the range includes everything as we cannot add any more useful
             *    information to the key after that.
             */
        lastUnboundUpper = false;
        if (range.isUnbound(bound) && (bound == Bound.UPPER || isFixedWidth || range == KeyRange.EVERYTHING_RANGE)) {
            lastUnboundUpper = (bound == Bound.UPPER);
            break;
        }
        byte[] bytes = range.getRange(bound);
        System.arraycopy(bytes, 0, key, offset, bytes.length);
        offset += bytes.length;
        /*
             * We must add a terminator to a variable length key even for the last PK column if
             * the lower key is non inclusive or the upper key is inclusive. Otherwise, we'd be
             * incrementing the key value itself, and thus bumping it up too much.
             */
        boolean inclusiveUpper = range.isUpperInclusive() && bound == Bound.UPPER;
        boolean exclusiveLower = !range.isLowerInclusive() && bound == Bound.LOWER && range != KeyRange.EVERYTHING_RANGE;
        boolean exclusiveUpper = !range.isUpperInclusive() && bound == Bound.UPPER;
        // If we are setting the upper bound of using inclusive single key, we remember 
        // to increment the key if we exit the loop after this iteration.
        // 
        // We remember to increment the last slot if we are setting the upper bound with an
        // inclusive range key.
        //
        // We cannot combine the two flags together in case for single-inclusive key followed
        // by the range-exclusive key. In that case, we do not need to increment the end at the
        // end. But if we combine the two flag, the single inclusive key in the middle of the
        // key slots would cause the flag to become true.
        lastInclusiveUpperSingleKey = range.isSingleKey() && inclusiveUpper;
        anyInclusiveUpperRangeKey |= !range.isSingleKey() && inclusiveUpper;
        // A null or empty byte array is always represented as a zero byte
        byte sepByte = SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), bytes.length == 0, field);
        if (!isFixedWidth && (sepByte == QueryConstants.DESC_SEPARATOR_BYTE || (!exclusiveUpper && (fieldIndex < schema.getMaxFields() || inclusiveUpper || exclusiveLower)))) {
            key[offset++] = sepByte;
            // Set lastInclusiveUpperSingleKey back to false if this is the last pk column
            // as we don't want to increment the null byte in this case
            lastInclusiveUpperSingleKey &= i < schema.getMaxFields() - 1;
        }
        if (exclusiveUpper) {
            // would match k1 = 2, k2 = 3 which is wrong.
            break;
        }
        // only after the last key part.
        if (exclusiveLower) {
            if (!ByteUtil.nextKey(key, offset)) {
                // have an end key specified.
                return -byteOffset;
            }
            // just added and incremented.
            if (!isFixedWidth && bytes.length == 0 && SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), false, field) == QueryConstants.DESC_SEPARATOR_BYTE) {
                key[offset++] = QueryConstants.DESC_SEPARATOR_BYTE;
            }
        }
        fieldIndex += slotSpan[i] + 1;
    }
    if (lastInclusiveUpperSingleKey || anyInclusiveUpperRangeKey || lastUnboundUpper) {
        if (!ByteUtil.nextKey(key, offset)) {
            // have an end key specified.
            return -byteOffset;
        }
    }
    // byte.
    if (bound == Bound.LOWER) {
        while (--i >= schemaStartIndex && offset > byteOffset && !(field = schema.getField(--fieldIndex)).getDataType().isFixedWidth() && field.getSortOrder() == SortOrder.ASC && key[offset - 1] == QueryConstants.SEPARATOR_BYTE) {
            offset--;
            fieldIndex -= slotSpan[i];
        }
    }
    return offset - byteOffset;
}
Also used : Field(org.apache.phoenix.schema.ValueSchema.Field) KeyRange(org.apache.phoenix.query.KeyRange)

Example 10 with KeyRange

use of org.apache.phoenix.query.KeyRange in project phoenix by apache.

the class ScanRangesIntersectTest method assertIntersect.

private static void assertIntersect(ScanRanges ranges, String lowerRange, String upperRange, String... expectedPoints) {
    List<KeyRange> expectedKeys = points(expectedPoints);
    Collections.sort(expectedKeys, KeyRange.COMPARATOR);
    Scan scan = new Scan();
    scan.setFilter(ranges.getSkipScanFilter());
    byte[] startKey = lowerRange == null ? KeyRange.UNBOUND : PVarchar.INSTANCE.toBytes(lowerRange);
    byte[] stopKey = upperRange == null ? KeyRange.UNBOUND : PVarchar.INSTANCE.toBytes(upperRange);
    Scan newScan = ranges.intersectScan(scan, startKey, stopKey, 0, true);
    if (expectedPoints.length == 0) {
        assertNull(newScan);
    } else {
        assertNotNull(newScan);
        SkipScanFilter filter = (SkipScanFilter) newScan.getFilter();
        assertEquals(expectedKeys, filter.getSlots().get(0));
    }
}
Also used : KeyRange(org.apache.phoenix.query.KeyRange) Scan(org.apache.hadoop.hbase.client.Scan) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter)

Aggregations

KeyRange (org.apache.phoenix.query.KeyRange)51 Test (org.junit.Test)23 Connection (java.sql.Connection)16 ResultSet (java.sql.ResultSet)14 PreparedStatement (java.sql.PreparedStatement)9 ArrayList (java.util.ArrayList)9 List (java.util.List)8 Properties (java.util.Properties)7 Scan (org.apache.hadoop.hbase.client.Scan)7 ScanRanges (org.apache.phoenix.compile.ScanRanges)6 BigDecimal (java.math.BigDecimal)5 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)5 KeyPart (org.apache.phoenix.compile.KeyPart)4 QueryPlan (org.apache.phoenix.compile.QueryPlan)4 PhoenixStatement (org.apache.phoenix.jdbc.PhoenixStatement)4 Field (org.apache.phoenix.schema.ValueSchema.Field)4 KeyValue (org.apache.hadoop.hbase.KeyValue)3 SkipScanFilter (org.apache.phoenix.filter.SkipScanFilter)3 IOException (java.io.IOException)2 Statement (java.sql.Statement)2