Search in sources :

Example 1 with ScanRanges

use of org.apache.phoenix.compile.ScanRanges in project phoenix by apache.

the class PhoenixTransactionalIndexer method getIndexUpdates.

private Collection<Pair<Mutation, byte[]>> getIndexUpdates(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, Iterator<Mutation> mutationIterator, byte[] txRollbackAttribute) throws IOException {
    Transaction tx = indexMetaData.getTransaction();
    if (tx == null) {
        throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString());
    }
    boolean isRollback = txRollbackAttribute != null;
    boolean isImmutable = indexMetaData.isImmutableRows();
    ResultScanner currentScanner = null;
    TransactionAwareHTable txTable = null;
    // Collect up all mutations in batch
    Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations;
    if (isImmutable && !isRollback) {
        findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    } else {
        findPriorValueMutations = mutations;
    }
    while (mutationIterator.hasNext()) {
        Mutation m = mutationIterator.next();
        // add the mutation to the batch set
        ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
        if (mutations != findPriorValueMutations && isDeleteMutation(m)) {
            addMutation(findPriorValueMutations, row, m);
        }
        addMutation(mutations, row, m);
    }
    // Collect the set of mutable ColumnReferences so that we can first
    // run a scan to get the current state. We'll need this to delete
    // the existing index rows.
    List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers();
    int estimatedSize = indexMaintainers.size() * 10;
    Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize);
    for (IndexMaintainer indexMaintainer : indexMaintainers) {
        // For transactional tables, we use an index maintainer
        // to aid in rollback if there's a KeyValue column in the index. The alternative would be
        // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
        // client side.
        Set<ColumnReference> allColumns = indexMaintainer.getAllColumns();
        mutableColumns.addAll(allColumns);
    }
    Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(mutations.size() * 2 * indexMaintainers.size());
    try {
        // this logic will work there too.
        if (!findPriorValueMutations.isEmpty()) {
            List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
            for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) {
                keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary()));
            }
            Scan scan = new Scan();
            // Project all mutable columns
            for (ColumnReference ref : mutableColumns) {
                scan.addColumn(ref.getFamily(), ref.getQualifier());
            }
            /*
                 * Indexes inherit the storage scheme of the data table which means all the indexes have the same
                 * storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
                 * supporting new indexes over existing data tables to have a different storage scheme than the data
                 * table.
                 */
            byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();
            // Project empty key value column
            scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier);
            ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
            scanRanges.initializeScan(scan);
            TableName tableName = env.getRegion().getRegionInfo().getTable();
            HTableInterface htable = env.getTable(tableName);
            txTable = new TransactionAwareHTable(htable);
            txTable.startTx(tx);
            // For rollback, we need to see all versions, including
            // the last committed version as there may be multiple
            // checkpointed versions.
            SkipScanFilter filter = scanRanges.getSkipScanFilter();
            if (isRollback) {
                filter = new SkipScanFilter(filter, true);
                tx.setVisibility(VisibilityLevel.SNAPSHOT_ALL);
            }
            scan.setFilter(filter);
            currentScanner = txTable.getScanner(scan);
        }
        if (isRollback) {
            processRollback(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations);
        } else {
            processMutation(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
        }
    } finally {
        if (txTable != null)
            txTable.close();
    }
    return indexUpdates;
}
Also used : MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) HashMap(java.util.HashMap) KeyRange(org.apache.phoenix.query.KeyRange) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Pair(org.apache.hadoop.hbase.util.Pair) TransactionAwareHTable(org.apache.tephra.hbase.TransactionAwareHTable) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ScanRanges(org.apache.phoenix.compile.ScanRanges) TableName(org.apache.hadoop.hbase.TableName) Transaction(org.apache.tephra.Transaction) Scan(org.apache.hadoop.hbase.client.Scan) Mutation(org.apache.hadoop.hbase.client.Mutation) MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 2 with ScanRanges

use of org.apache.phoenix.compile.ScanRanges in project phoenix by apache.

the class ScanUtil method newScanRanges.

public static ScanRanges newScanRanges(List<? extends Mutation> mutations) throws SQLException {
    List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
    for (Mutation m : mutations) {
        keys.add(PVarbinary.INSTANCE.getKeyRange(m.getRow()));
    }
    ScanRanges keyRanges = ScanRanges.createPointLookup(keys);
    return keyRanges;
}
Also used : KeyRange(org.apache.phoenix.query.KeyRange) Mutation(org.apache.hadoop.hbase.client.Mutation) ScanRanges(org.apache.phoenix.compile.ScanRanges)

Example 3 with ScanRanges

use of org.apache.phoenix.compile.ScanRanges in project phoenix by apache.

the class ExplainTable method appendKeyRanges.

private void appendKeyRanges(StringBuilder buf) {
    ScanRanges scanRanges = context.getScanRanges();
    if (scanRanges.isDegenerate() || scanRanges.isEverything()) {
        return;
    }
    buf.append(" [");
    StringBuilder buf1 = new StringBuilder();
    appendScanRow(buf1, Bound.LOWER);
    buf.append(buf1);
    buf.setCharAt(buf.length() - 1, ']');
    StringBuilder buf2 = new StringBuilder();
    appendScanRow(buf2, Bound.UPPER);
    if (!StringUtil.equals(buf1, buf2)) {
        buf.append(" - [");
        buf.append(buf2);
    }
    buf.setCharAt(buf.length() - 1, ']');
}
Also used : ScanRanges(org.apache.phoenix.compile.ScanRanges)

Example 4 with ScanRanges

use of org.apache.phoenix.compile.ScanRanges in project phoenix by apache.

the class ParallelIteratorsSplitTest method foreach.

private static Collection<?> foreach(KeyRange[][] ranges, int[] widths, KeyRange[] expectedSplits) {
    RowKeySchema schema = buildSchema(widths);
    List<List<KeyRange>> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST);
    SkipScanFilter filter = new SkipScanFilter(slots, schema);
    // Always set start and stop key to max to verify we are using the information in skipscan
    // filter over the scan's KMIN and KMAX.
    Scan scan = new Scan().setFilter(filter);
    ScanRanges scanRanges = ScanRanges.createSingleSpan(schema, slots);
    List<Object> ret = Lists.newArrayList();
    ret.add(new Object[] { scan, scanRanges, Arrays.<KeyRange>asList(expectedSplits) });
    return ret;
}
Also used : RowKeySchema(org.apache.phoenix.schema.RowKeySchema) List(java.util.List) Scan(org.apache.hadoop.hbase.client.Scan) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) ScanRanges(org.apache.phoenix.compile.ScanRanges)

Example 5 with ScanRanges

use of org.apache.phoenix.compile.ScanRanges in project phoenix by apache.

the class MetaDataEndpointImpl method buildFunctions.

private List<PFunction> buildFunctions(List<byte[]> keys, Region region, long clientTimeStamp, boolean isReplace, List<Mutation> deleteMutationsForReplace) throws IOException, SQLException {
    List<KeyRange> keyRanges = Lists.newArrayListWithExpectedSize(keys.size());
    for (byte[] key : keys) {
        byte[] stopKey = ByteUtil.concat(key, QueryConstants.SEPARATOR_BYTE_ARRAY);
        ByteUtil.nextKey(stopKey, stopKey.length);
        keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, false));
    }
    Scan scan = new Scan();
    scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
    ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
    scanRanges.initializeScan(scan);
    scan.setFilter(scanRanges.getSkipScanFilter());
    RegionScanner scanner = region.getScanner(scan);
    Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
    List<PFunction> functions = new ArrayList<PFunction>();
    PFunction function = null;
    try {
        for (int i = 0; i < keys.size(); i++) {
            function = null;
            function = getFunction(scanner, isReplace, clientTimeStamp, deleteMutationsForReplace);
            if (function == null) {
                return null;
            }
            byte[] functionKey = SchemaUtil.getFunctionKey(function.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : function.getTenantId().getBytes(), Bytes.toBytes(function.getFunctionName()));
            metaDataCache.put(new FunctionBytesPtr(functionKey), function);
            functions.add(function);
        }
        return functions;
    } finally {
        scanner.close();
    }
}
Also used : PFunction(org.apache.phoenix.parse.PFunction) KeyRange(org.apache.phoenix.query.KeyRange) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) ScanRanges(org.apache.phoenix.compile.ScanRanges) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Scan(org.apache.hadoop.hbase.client.Scan) FunctionBytesPtr(org.apache.phoenix.cache.GlobalCache.FunctionBytesPtr)

Aggregations

ScanRanges (org.apache.phoenix.compile.ScanRanges)11 Scan (org.apache.hadoop.hbase.client.Scan)6 KeyRange (org.apache.phoenix.query.KeyRange)6 ArrayList (java.util.ArrayList)4 List (java.util.List)3 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)3 Hint (org.apache.phoenix.parse.HintNode.Hint)3 Mutation (org.apache.hadoop.hbase.client.Mutation)2 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)2 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)2 SkipScanFilter (org.apache.phoenix.filter.SkipScanFilter)2 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)2 RowKeySchema (org.apache.phoenix.schema.RowKeySchema)2 PSmallint (org.apache.phoenix.schema.types.PSmallint)2 PTinyint (org.apache.phoenix.schema.types.PTinyint)2 ImmutableList (com.google.common.collect.ImmutableList)1 ByteArrayInputStream (java.io.ByteArrayInputStream)1 DataInput (java.io.DataInput)1 DataInputStream (java.io.DataInputStream)1 EOFException (java.io.EOFException)1