Search in sources :

Example 1 with IndexMaintainer

use of org.apache.phoenix.index.IndexMaintainer in project phoenix by apache.

the class DataTableLocalIndexRegionScanner method getLocalIndexCellsFromDataTable.

private void getLocalIndexCellsFromDataTable(List<Cell> dataTableResults, List<Cell> localIndexResults) throws IOException {
    if (!dataTableResults.isEmpty()) {
        result.setKeyValues(dataTableResults);
        for (IndexMaintainer maintainer : indexMaintainers) {
            result.getKey(ptr);
            ValueGetter valueGetter = maintainer.createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr), dataTableResults);
            List<Cell> list = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, dataTableResults.get(0).getTimestamp(), startKey, endKey).getFamilyCellMap().get(localIndexFamily);
            if (list != null) {
                localIndexResults.addAll(list);
            }
        }
    }
}
Also used : IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) Cell(org.apache.hadoop.hbase.Cell) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter)

Example 2 with IndexMaintainer

use of org.apache.phoenix.index.IndexMaintainer in project phoenix by apache.

the class DeleteCompiler method deleteRows.

/**
 * Handles client side deletion of rows for a DELETE statement. We determine the "best" plan to drive the query using
 * our standard optimizer. The plan may be based on using an index, in which case we need to translate the index row
 * key to get the data row key used to form the delete mutation. We always collect up the data table mutations, but we
 * only collect and send the index mutations for global, immutable indexes. Local indexes and mutable indexes are always
 * maintained on the server side.
 * @param context StatementContext for the scan being executed
 * @param iterator ResultIterator for the scan being executed
 * @param bestPlan QueryPlan used to produce the iterator
 * @param projectedTableRef TableRef containing all indexed and covered columns across all indexes on the data table
 * @param otherTableRefs other TableRefs needed to be maintained apart from the one over which the scan is executing.
 *  Might be other index tables (if we're driving off of the data table table), the data table (if we're driving off of
 *  an index table), or a mix of the data table and additional index tables.
 * @return MutationState representing the uncommitted data across the data table and indexes. Will be joined with the
 *  MutationState on the connection over which the delete is occurring.
 * @throws SQLException
 */
private static MutationState deleteRows(StatementContext context, ResultIterator iterator, QueryPlan bestPlan, TableRef projectedTableRef, List<TableRef> otherTableRefs) throws SQLException {
    RowProjector projector = bestPlan.getProjector();
    TableRef tableRef = bestPlan.getTableRef();
    PTable table = tableRef.getTable();
    PhoenixStatement statement = context.getStatement();
    PhoenixConnection connection = statement.getConnection();
    PName tenantId = connection.getTenantId();
    byte[] tenantIdBytes = null;
    if (tenantId != null) {
        tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, table.getViewIndexId() != null);
    }
    final boolean isAutoCommit = connection.getAutoCommit();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
    MultiRowMutationState mutations = new MultiRowMutationState(batchSize);
    List<MultiRowMutationState> otherMutations = null;
    // can always get the data table row key from an index row key).
    if (!otherTableRefs.isEmpty()) {
        otherMutations = Lists.newArrayListWithExpectedSize(otherTableRefs.size());
        for (int i = 0; i < otherTableRefs.size(); i++) {
            otherMutations.add(new MultiRowMutationState(batchSize));
        }
    }
    List<PColumn> pkColumns = table.getPKColumns();
    boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
    boolean isSharedViewIndex = table.getViewIndexId() != null;
    int offset = (table.getBucketNum() == null ? 0 : 1);
    byte[][] values = new byte[pkColumns.size()][];
    if (isSharedViewIndex) {
        values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    if (isMultiTenant) {
        values[offset++] = tenantIdBytes;
    }
    try (final PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, context)) {
        ValueGetter getter = null;
        if (!otherTableRefs.isEmpty()) {
            getter = new ValueGetter() {

                final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable();

                final ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable();

                @Override
                public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException {
                    Cell cell = rs.getCurrentRow().getValue(ref.getFamily(), ref.getQualifier());
                    if (cell == null) {
                        return null;
                    }
                    valuePtr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
                    return valuePtr;
                }

                @Override
                public byte[] getRowKey() {
                    rs.getCurrentRow().getKey(rowKeyPtr);
                    return ByteUtil.copyKeyBytesIfNecessary(rowKeyPtr);
                }
            };
        }
        IndexMaintainer scannedIndexMaintainer = null;
        IndexMaintainer[] maintainers = null;
        PTable dataTable = table;
        if (table.getType() == PTableType.INDEX) {
            if (!otherTableRefs.isEmpty()) {
                // The data table is always the last one in the list if it's
                // not chosen as the best of the possible plans.
                dataTable = otherTableRefs.get(otherTableRefs.size() - 1).getTable();
                scannedIndexMaintainer = IndexMaintainer.create(dataTable, table, connection);
            }
            maintainers = new IndexMaintainer[otherTableRefs.size()];
            for (int i = 0; i < otherTableRefs.size(); i++) {
                // Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that client-side
                // expressions are used instead of server-side ones.
                PTable otherTable = otherTableRefs.get(i).getTable();
                if (otherTable.getType() == PTableType.INDEX) {
                    // In this case, we'll convert from index row -> data row -> other index row
                    maintainers[i] = IndexMaintainer.create(dataTable, otherTable, connection);
                } else {
                    maintainers[i] = scannedIndexMaintainer;
                }
            }
        } else if (!otherTableRefs.isEmpty()) {
            dataTable = table;
            maintainers = new IndexMaintainer[otherTableRefs.size()];
            for (int i = 0; i < otherTableRefs.size(); i++) {
                // Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that client-side
                // expressions are used instead of server-side ones.
                maintainers[i] = IndexMaintainer.create(projectedTableRef.getTable(), otherTableRefs.get(i).getTable(), connection);
            }
        }
        byte[][] viewConstants = IndexUtil.getViewConstants(dataTable);
        int rowCount = 0;
        while (rs.next()) {
            // allocate new as this is a key in a Map
            ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr();
            rs.getCurrentRow().getKey(rowKeyPtr);
            // Check for otherTableRefs being empty required when deleting directly from the index
            if (otherTableRefs.isEmpty() || isMaintainedOnClient(table)) {
                mutations.put(rowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
            }
            for (int i = 0; i < otherTableRefs.size(); i++) {
                PTable otherTable = otherTableRefs.get(i).getTable();
                // allocate new as this is a key in a Map
                ImmutableBytesPtr otherRowKeyPtr = new ImmutableBytesPtr();
                // Translate the data table row to the index table row
                if (table.getType() == PTableType.INDEX) {
                    otherRowKeyPtr.set(scannedIndexMaintainer.buildDataRowKey(rowKeyPtr, viewConstants));
                    if (otherTable.getType() == PTableType.INDEX) {
                        otherRowKeyPtr.set(maintainers[i].buildRowKey(getter, otherRowKeyPtr, null, null, HConstants.LATEST_TIMESTAMP));
                    }
                } else {
                    otherRowKeyPtr.set(maintainers[i].buildRowKey(getter, rowKeyPtr, null, null, HConstants.LATEST_TIMESTAMP));
                }
                otherMutations.get(i).put(otherRowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
            }
            if (mutations.size() > maxSize) {
                throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
            }
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
                MutationState state = new MutationState(tableRef, mutations, 0, maxSize, maxSizeBytes, connection);
                connection.getMutationState().join(state);
                for (int i = 0; i < otherTableRefs.size(); i++) {
                    MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection);
                    connection.getMutationState().join(indexState);
                }
                connection.getMutationState().send();
                mutations.clear();
                if (otherMutations != null) {
                    for (MultiRowMutationState multiRowMutationState : otherMutations) {
                        multiRowMutationState.clear();
                    }
                }
            }
        }
        // If auto commit is true, this last batch will be committed upon return
        int nCommittedRows = isAutoCommit ? (rowCount / batchSize * batchSize) : 0;
        MutationState state = new MutationState(tableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
        for (int i = 0; i < otherTableRefs.size(); i++) {
            MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection);
            state.join(indexState);
        }
        return state;
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter) PColumn(org.apache.phoenix.schema.PColumn) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) Cell(org.apache.hadoop.hbase.Cell) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) IOException(java.io.IOException) Hint(org.apache.phoenix.parse.HintNode.Hint) MultiRowMutationState(org.apache.phoenix.execute.MutationState.MultiRowMutationState) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) MultiRowMutationState(org.apache.phoenix.execute.MutationState.MultiRowMutationState) PName(org.apache.phoenix.schema.PName) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) TableRef(org.apache.phoenix.schema.TableRef) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) MultiRowMutationState(org.apache.phoenix.execute.MutationState.MultiRowMutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 3 with IndexMaintainer

use of org.apache.phoenix.index.IndexMaintainer in project phoenix by apache.

the class UpsertCompiler method upsertSelect.

public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes, boolean useServerTimestamp, boolean prefixSysColValues) throws SQLException {
    PhoenixStatement statement = childContext.getStatement();
    PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
    boolean isAutoCommit = connection.getAutoCommit();
    int numSplColumns = (tableRef.getTable().isMultiTenant() ? 1 : 0) + (tableRef.getTable().getViewIndexId() != null ? 1 : 0);
    byte[][] values = new byte[columnIndexes.length + numSplColumns][];
    if (prefixSysColValues) {
        int i = 0;
        if (tableRef.getTable().isMultiTenant()) {
            values[i++] = connection.getTenantId().getBytes();
        }
        if (tableRef.getTable().getViewIndexId() != null) {
            values[i++] = PSmallint.INSTANCE.toBytes(tableRef.getTable().getViewIndexId());
        }
    }
    int rowCount = 0;
    MultiRowMutationState mutation = new MultiRowMutationState(batchSize);
    PTable table = tableRef.getTable();
    IndexMaintainer indexMaintainer = null;
    byte[][] viewConstants = null;
    if (table.getIndexType() == IndexType.LOCAL) {
        PTable parentTable = statement.getConnection().getMetaDataCache().getTableRef(new PTableKey(statement.getConnection().getTenantId(), table.getParentName().getString())).getTable();
        indexMaintainer = table.getIndexMaintainer(parentTable, connection);
        viewConstants = IndexUtil.getViewConstants(parentTable);
    }
    try (ResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        while (rs.next()) {
            for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
                PColumn column = table.getColumns().get(columnIndexes[i]);
                byte[] bytes = rs.getBytes(i + 1);
                ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes);
                Object value = rs.getObject(i + 1);
                int rsPrecision = rs.getMetaData().getPrecision(i + 1);
                Integer precision = rsPrecision == 0 ? null : rsPrecision;
                int rsScale = rs.getMetaData().getScale(i + 1);
                Integer scale = rsScale == 0 ? null : rsScale;
                // as we checked that before.
                if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), SortOrder.getDefault(), precision, scale, column.getMaxLength(), column.getScale())) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(column.getName().getString()).setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build().buildException();
                }
                column.getDataType().coerceBytes(ptr, value, column.getDataType(), precision, scale, SortOrder.getDefault(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable());
                values[j] = ByteUtil.copyKeyBytesIfNecessary(ptr);
            }
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, useServerTimestamp, indexMaintainer, viewConstants, null, numSplColumns);
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
                MutationState state = new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
                connection.getMutationState().join(state);
                connection.getMutationState().send();
                mutation.clear();
            }
        }
        // If auto commit is true, this last batch will be committed upon return
        return new MutationState(tableRef, mutation, rowCount / batchSize * batchSize, maxSize, maxSizeBytes, connection);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) Hint(org.apache.phoenix.parse.HintNode.Hint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) MultiRowMutationState(org.apache.phoenix.execute.MutationState.MultiRowMutationState) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) MultiRowMutationState(org.apache.phoenix.execute.MutationState.MultiRowMutationState) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) ResultSet(java.sql.ResultSet) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices)

Example 4 with IndexMaintainer

use of org.apache.phoenix.index.IndexMaintainer in project phoenix by apache.

the class MutationState method newTxIndexMutationGenerator.

private PhoenixTxIndexMutationGenerator newTxIndexMutationGenerator(PTable table, List<PTable> indexes, Map<String, byte[]> attributes) {
    final List<IndexMaintainer> indexMaintainers = Lists.newArrayListWithExpectedSize(indexes.size());
    for (PTable index : indexes) {
        IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
        indexMaintainers.add(maintainer);
    }
    IndexMetaDataCache indexMetaDataCache = new IndexMetaDataCache() {

        @Override
        public void close() throws IOException {
        }

        @Override
        public List<IndexMaintainer> getIndexMaintainers() {
            return indexMaintainers;
        }

        @Override
        public PhoenixTransactionContext getTransactionContext() {
            return phoenixTransactionContext;
        }

        @Override
        public int getClientVersion() {
            return MetaDataProtocol.PHOENIX_VERSION;
        }
    };
    try {
        PhoenixIndexMetaData indexMetaData = new PhoenixIndexMetaData(indexMetaDataCache, attributes);
        return new PhoenixTxIndexMutationGenerator(connection.getQueryServices().getConfiguration(), indexMetaData, table.getPhysicalName().getBytes());
    } catch (IOException e) {
        // Impossible
        throw new RuntimeException(e);
    }
}
Also used : IndexMetaDataCache(org.apache.phoenix.cache.IndexMetaDataCache) PhoenixIndexMetaData(org.apache.phoenix.index.PhoenixIndexMetaData) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) IOException(java.io.IOException) PTable(org.apache.phoenix.schema.PTable)

Example 5 with IndexMaintainer

use of org.apache.phoenix.index.IndexMaintainer in project phoenix by apache.

the class PhoenixTxIndexMutationGenerator method getIndexUpdates.

public Collection<Pair<Mutation, byte[]>> getIndexUpdates(HTableInterface htable, Iterator<Mutation> mutationIterator) throws IOException, SQLException {
    if (!mutationIterator.hasNext()) {
        return Collections.emptyList();
    }
    List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers();
    ResultScanner currentScanner = null;
    // Collect up all mutations in batch
    Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    // Collect the set of mutable ColumnReferences so that we can first
    // run a scan to get the current state. We'll need this to delete
    // the existing index rows.
    int estimatedSize = indexMaintainers.size() * 10;
    Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize);
    for (IndexMaintainer indexMaintainer : indexMaintainers) {
        // For transactional tables, we use an index maintainer
        // to aid in rollback if there's a KeyValue column in the index. The alternative would be
        // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
        // client side.
        Set<ColumnReference> allColumns = indexMaintainer.getAllColumns();
        mutableColumns.addAll(allColumns);
    }
    Mutation m = mutationIterator.next();
    Map<String, byte[]> updateAttributes = m.getAttributesMap();
    byte[] txRollbackAttribute = updateAttributes.get(PhoenixTransactionContext.TX_ROLLBACK_ATTRIBUTE_KEY);
    boolean isRollback = txRollbackAttribute != null;
    boolean isImmutable = indexMetaData.isImmutableRows();
    Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations;
    if (isImmutable && !isRollback) {
        findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
    } else {
        findPriorValueMutations = mutations;
    }
    while (true) {
        // add the mutation to the batch set
        ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
        // if we have no non PK columns, no need to find the prior values
        if (mutations != findPriorValueMutations && indexMetaData.requiresPriorRowState(m)) {
            addMutation(findPriorValueMutations, row, m);
        }
        addMutation(mutations, row, m);
        if (!mutationIterator.hasNext()) {
            break;
        }
        m = mutationIterator.next();
    }
    Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(mutations.size() * 2 * indexMaintainers.size());
    // this logic will work there too.
    if (!findPriorValueMutations.isEmpty()) {
        List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
        for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) {
            keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary()));
        }
        Scan scan = new Scan();
        // Project all mutable columns
        for (ColumnReference ref : mutableColumns) {
            scan.addColumn(ref.getFamily(), ref.getQualifier());
        }
        /*
             * Indexes inherit the storage scheme of the data table which means all the indexes have the same
             * storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
             * supporting new indexes over existing data tables to have a different storage scheme than the data
             * table.
             */
        byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();
        // Project empty key value column
        scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier);
        ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
        scanRanges.initializeScan(scan);
        PhoenixTransactionalTable txTable = TransactionFactory.getTransactionProvider().getTransactionalTable(indexMetaData.getTransactionContext(), htable);
        // For rollback, we need to see all versions, including
        // the last committed version as there may be multiple
        // checkpointed versions.
        SkipScanFilter filter = scanRanges.getSkipScanFilter();
        if (isRollback) {
            filter = new SkipScanFilter(filter, true);
            indexMetaData.getTransactionContext().setVisibilityLevel(PhoenixVisibilityLevel.SNAPSHOT_ALL);
        }
        scan.setFilter(filter);
        currentScanner = txTable.getScanner(scan);
    }
    if (isRollback) {
        processRollback(indexMetaData, txRollbackAttribute, currentScanner, mutableColumns, indexUpdates, mutations);
    } else {
        processMutation(indexMetaData, txRollbackAttribute, currentScanner, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
    }
    return indexUpdates;
}
Also used : MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) HashMap(java.util.HashMap) KeyRange(org.apache.phoenix.query.KeyRange) ArrayList(java.util.ArrayList) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) PhoenixTransactionalTable(org.apache.phoenix.transaction.PhoenixTransactionalTable) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) Pair(org.apache.hadoop.hbase.util.Pair) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ScanRanges(org.apache.phoenix.compile.ScanRanges) Scan(org.apache.hadoop.hbase.client.Scan) Mutation(org.apache.hadoop.hbase.client.Mutation) MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Aggregations

IndexMaintainer (org.apache.phoenix.index.IndexMaintainer)25 ColumnReference (org.apache.phoenix.hbase.index.covered.update.ColumnReference)13 PTable (org.apache.phoenix.schema.PTable)12 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)10 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)9 IOException (java.io.IOException)8 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)8 Cell (org.apache.hadoop.hbase.Cell)7 Mutation (org.apache.hadoop.hbase.client.Mutation)7 SQLException (java.sql.SQLException)6 ArrayList (java.util.ArrayList)6 ValueGetter (org.apache.phoenix.hbase.index.ValueGetter)6 List (java.util.List)5 Delete (org.apache.hadoop.hbase.client.Delete)5 Put (org.apache.hadoop.hbase.client.Put)5 Scan (org.apache.hadoop.hbase.client.Scan)5 Pair (org.apache.hadoop.hbase.util.Pair)5 PColumn (org.apache.phoenix.schema.PColumn)4 HashMap (java.util.HashMap)3 Properties (java.util.Properties)3