Search in sources :

Example 66 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class IndexUtil method generateIndexData.

public static List<Mutation> generateIndexData(final PTable table, PTable index, final Map<ImmutableBytesPtr, RowMutationState> valuesMap, List<Mutation> dataMutations, final KeyValueBuilder kvBuilder, PhoenixConnection connection) throws SQLException {
    try {
        final ImmutableBytesPtr ptr = new ImmutableBytesPtr();
        IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
        List<Mutation> indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size());
        for (final Mutation dataMutation : dataMutations) {
            long ts = MetaDataUtil.getClientTimeStamp(dataMutation);
            ptr.set(dataMutation.getRow());
            /*
                 * We only need to generate the additional mutations for a Put for immutable indexes.
                 * Deletes of rows are handled by running a re-written query against the index table,
                 * and Deletes of column values should never be necessary, as you should never be
                 * updating an existing row.
                 */
            if (dataMutation instanceof Put) {
                ValueGetter valueGetter = new ValueGetter() {

                    @Override
                    public byte[] getRowKey() {
                        return dataMutation.getRow();
                    }

                    @Override
                    public ImmutableBytesWritable getLatestValue(ColumnReference ref) {
                        // maintainer to always treat this Put as a new row.
                        if (isEmptyKeyValue(table, ref)) {
                            return null;
                        }
                        byte[] family = ref.getFamily();
                        byte[] qualifier = ref.getQualifier();
                        Map<byte[], List<Cell>> familyMap = dataMutation.getFamilyCellMap();
                        List<Cell> kvs = familyMap.get(family);
                        if (kvs == null) {
                            return null;
                        }
                        for (Cell kv : kvs) {
                            if (Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), family, 0, family.length) == 0 && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0) {
                                ImmutableBytesPtr ptr = new ImmutableBytesPtr();
                                kvBuilder.getValueAsPtr(kv, ptr);
                                return ptr;
                            }
                        }
                        return null;
                    }
                };
                byte[] regionStartKey = null;
                byte[] regionEndkey = null;
                if (maintainer.isLocalIndex()) {
                    HRegionLocation tableRegionLocation = connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(), dataMutation.getRow());
                    regionStartKey = tableRegionLocation.getRegionInfo().getStartKey();
                    regionEndkey = tableRegionLocation.getRegionInfo().getEndKey();
                }
                indexMutations.add(maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, ts, regionStartKey, regionEndkey));
            }
        }
        return indexMutations;
    } catch (IOException e) {
        throw new SQLException(e);
    }
}
Also used : SQLException(java.sql.SQLException) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) List(java.util.List) ArrayList(java.util.ArrayList) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 67 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class QueryUtil method getRemainingOffset.

public static Integer getRemainingOffset(Tuple offsetTuple) {
    if (offsetTuple != null) {
        ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr();
        offsetTuple.getKey(rowKeyPtr);
        if (QueryConstants.OFFSET_ROW_KEY_PTR.compareTo(rowKeyPtr) == 0) {
            Cell cell = offsetTuple.getValue(QueryConstants.OFFSET_FAMILY, QueryConstants.OFFSET_COLUMN);
            return PInteger.INSTANCE.toObject(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), PInteger.INSTANCE, SortOrder.ASC, null, null);
        }
    }
    return null;
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Cell(org.apache.hadoop.hbase.Cell)

Example 68 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class TestNonTxIndexBuilder method testGetMutableIndexUpdate.

/**
     * Tests that updating an indexed column results in a DeleteFamily (prior index cell) and a Put
     * (new index cell)
     */
@Test
public void testGetMutableIndexUpdate() throws IOException {
    setCurrentRowState(FAM, INDEXED_QUALIFIER, 1, VALUE_1);
    // update ts and value
    Put put = new Put(ROW);
    put.addImmutable(FAM, INDEXED_QUALIFIER, 2, VALUE_2);
    MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW));
    mutation.addAll(put);
    Collection<Pair<Mutation, byte[]>> indexUpdates = indexBuilder.getIndexUpdate(mutation, mockIndexMetaData);
    assertEquals(2, indexUpdates.size());
    assertContains(indexUpdates, 2, ROW, KeyValue.Type.DeleteFamily, FAM, new byte[0], /* qual not needed */
    2);
    assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, KeyValue.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 2);
}
Also used : MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Put(org.apache.hadoop.hbase.client.Put) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Example 69 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class DeleteCompiler method compile.

public MutationPlan compile(DeleteStatement delete) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    final boolean isAutoCommit = connection.getAutoCommit();
    final boolean hasLimit = delete.getLimit() != null;
    final ConnectionQueryServices services = connection.getQueryServices();
    List<QueryPlan> queryPlans;
    NamedTableNode tableNode = delete.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    boolean retryOnce = !isAutoCommit;
    TableRef tableRefToBe;
    boolean noQueryReqd = false;
    boolean runOnServer = false;
    SelectStatement select = null;
    ColumnResolver resolverToBe = null;
    Map<PTableKey, PTable> immutableIndex = Collections.emptyMap();
    DeletingParallelIteratorFactory parallelIteratorFactory;
    QueryPlan dataPlanToBe = null;
    while (true) {
        try {
            resolverToBe = FromCompiler.getResolverForMutation(delete, connection);
            tableRefToBe = resolverToBe.getTables().get(0);
            PTable table = tableRefToBe.getTable();
            // TODO: SchemaUtil.isReadOnly(PTable, connection)?
            if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
                throw new ReadOnlyTableException(schemaName, tableName);
            } else if (table.isTransactional() && connection.getSCN() != null) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
            boolean mayHaveImmutableIndexes = !immutableIndex.isEmpty();
            noQueryReqd = !hasLimit;
            // Can't run on same server for transactional data, as we need the row keys for the data
            // that is being upserted for conflict detection purposes.
            runOnServer = isAutoCommit && noQueryReqd && !table.isTransactional();
            HintNode hint = delete.getHint();
            if (runOnServer && !delete.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
                hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
            }
            List<AliasedNode> aliasedNodes = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
            boolean isSalted = table.getBucketNum() != null;
            boolean isMultiTenant = connection.getTenantId() != null && table.isMultiTenant();
            boolean isSharedViewIndex = table.getViewIndexId() != null;
            for (int i = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); i < table.getPKColumns().size(); i++) {
                PColumn column = table.getPKColumns().get(i);
                aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, '"' + column.getName().getString() + '"', null)));
            }
            select = FACTORY.select(delete.getTable(), hint, false, aliasedNodes, delete.getWhere(), Collections.<ParseNode>emptyList(), null, delete.getOrderBy(), delete.getLimit(), null, delete.getBindCount(), false, false, Collections.<SelectStatement>emptyList(), delete.getUdfParseNodes());
            select = StatementNormalizer.normalize(select, resolverToBe);
            SelectStatement transformedSelect = SubqueryRewriter.transform(select, resolverToBe, connection);
            if (transformedSelect != select) {
                resolverToBe = FromCompiler.getResolverForQuery(transformedSelect, connection, false, delete.getTable().getName());
                select = StatementNormalizer.normalize(transformedSelect, resolverToBe);
            }
            parallelIteratorFactory = hasLimit ? null : new DeletingParallelIteratorFactory(connection);
            QueryOptimizer optimizer = new QueryOptimizer(services);
            QueryCompiler compiler = new QueryCompiler(statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory, new SequenceManager(statement));
            dataPlanToBe = compiler.compile();
            queryPlans = Lists.newArrayList(mayHaveImmutableIndexes ? optimizer.getApplicablePlans(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory) : optimizer.getBestPlan(dataPlanToBe, statement, select, resolverToBe, Collections.<PColumn>emptyList(), parallelIteratorFactory));
            if (mayHaveImmutableIndexes) {
                // FIXME: this is ugly
                // Lookup the table being deleted from in the cache, as it's possible that the
                // optimizer updated the cache if it found indexes that were out of date.
                // If the index was marked as disabled, it should not be in the list
                // of immutable indexes.
                table = connection.getTable(new PTableKey(table.getTenantId(), table.getName().getString()));
                tableRefToBe.setTable(table);
                immutableIndex = getNonDisabledImmutableIndexes(tableRefToBe);
            }
        } catch (MetaDataEntityNotFoundException e) {
            // Otherwise throw, as we'll just get the same error next time.
            if (retryOnce) {
                retryOnce = false;
                MetaDataMutationResult result = new MetaDataClient(connection).updateCache(schemaName, tableName);
                if (result.wasUpdated()) {
                    continue;
                }
            }
            throw e;
        }
        break;
    }
    boolean isBuildingImmutable = false;
    final boolean hasImmutableIndexes = !immutableIndex.isEmpty();
    if (hasImmutableIndexes) {
        for (PTable index : immutableIndex.values()) {
            if (index.getIndexState() == PIndexState.BUILDING) {
                isBuildingImmutable = true;
                break;
            }
        }
    }
    final QueryPlan dataPlan = dataPlanToBe;
    // tableRefs is parallel with queryPlans
    TableRef[] tableRefs = new TableRef[hasImmutableIndexes ? immutableIndex.size() : 1];
    if (hasImmutableIndexes) {
        int i = 0;
        Iterator<QueryPlan> plans = queryPlans.iterator();
        while (plans.hasNext()) {
            QueryPlan plan = plans.next();
            PTable table = plan.getTableRef().getTable();
            if (table.getType() == PTableType.INDEX) {
                // index plans
                tableRefs[i++] = plan.getTableRef();
                immutableIndex.remove(table.getKey());
            } else if (!isBuildingImmutable) {
                // data plan
                /*
                     * If we have immutable indexes that we need to maintain, don't execute the data plan
                     * as we can save a query by piggy-backing on any of the other index queries, since the
                     * PK columns that we need are always in each index row.
                     */
                plans.remove();
            }
        }
        /*
             * If we have any immutable indexes remaining, then that means that the plan for that index got filtered out
             * because it could not be executed. This would occur if a column in the where clause is not found in the
             * immutable index.
             */
        if (!immutableIndex.isEmpty()) {
            Collection<PTable> immutableIndexes = immutableIndex.values();
            if (!isBuildingImmutable || hasNonPKIndexedColumns(immutableIndexes)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_FILTER_ON_IMMUTABLE_ROWS).setSchemaName(tableRefToBe.getTable().getSchemaName().getString()).setTableName(tableRefToBe.getTable().getTableName().getString()).build().buildException();
            }
            runOnServer = false;
        }
    }
    List<TableRef> buildingImmutableIndexes = Lists.newArrayListWithExpectedSize(immutableIndex.values().size());
    for (PTable index : immutableIndex.values()) {
        buildingImmutableIndexes.add(new TableRef(index, dataPlan.getTableRef().getTimeStamp(), dataPlan.getTableRef().getLowerBoundTimeStamp()));
    }
    // Make sure the first plan is targeting deletion from the data table
    // In the case of an immutable index, we'll also delete from the index.
    final TableRef dataTableRef = tableRefs[0] = tableRefToBe;
    /*
         * Create a mutationPlan for each queryPlan. One plan will be for the deletion of the rows
         * from the data table, while the others will be for deleting rows from immutable indexes.
         */
    List<MutationPlan> mutationPlans = Lists.newArrayListWithExpectedSize(tableRefs.length);
    for (int i = 0; i < tableRefs.length; i++) {
        final TableRef tableRef = tableRefs[i];
        final QueryPlan plan = queryPlans.get(i);
        if (!plan.getTableRef().equals(tableRef) || !(plan instanceof BaseQueryPlan)) {
            runOnServer = false;
            // FIXME: why set this to false in this case?
            noQueryReqd = false;
        }
        final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
        final StatementContext context = plan.getContext();
        // may have been optimized out. Instead, we check that there's a single SkipScanFilter
        if (noQueryReqd && (!context.getScan().hasFilter() || context.getScan().getFilter() instanceof SkipScanFilter) && context.getScanRanges().isPointLookup()) {
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public MutationState execute() throws SQLException {
                    // We have a point lookup, so we know we have a simple set of fully qualified
                    // keys for our ranges
                    ScanRanges ranges = context.getScanRanges();
                    Iterator<KeyRange> iterator = ranges.getPointLookupKeyIterator();
                    Map<ImmutableBytesPtr, RowMutationState> mutation = Maps.newHashMapWithExpectedSize(ranges.getPointLookupCount());
                    while (iterator.hasNext()) {
                        mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
                    }
                    return new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    return new ExplainPlan(Collections.singletonList("DELETE SINGLE ROW"));
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    // Don't include the target
                    return Collections.emptySet();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return 0l;
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return 0l;
                }
            });
        } else if (runOnServer) {
            // TODO: better abstraction
            Scan scan = context.getScan();
            // Propagate IGNORE_NEWER_MUTATIONS when replaying mutations since there will be
            // future dated data row mutations that will get in the way of generating the
            // correct index rows on replay.
            scan.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
            scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
            // Build an ungrouped aggregate query: select COUNT(*) from <table> where <where>
            // The coprocessor will delete each row returned from the scan
            // Ignoring ORDER BY, since with auto commit on and no limit makes no difference
            SelectStatement aggSelect = SelectStatement.create(SelectStatement.COUNT_ONE, delete.getHint());
            RowProjector projectorToBe = ProjectionCompiler.compile(context, aggSelect, GroupBy.EMPTY_GROUP_BY);
            context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
            if (plan.getProjector().projectEveryRow()) {
                projectorToBe = new RowProjector(projectorToBe, true);
            }
            final RowProjector projector = projectorToBe;
            final QueryPlan aggPlan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null);
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    return dataPlan.getSourceRefs();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public MutationState execute() throws SQLException {
                    // TODO: share this block of code with UPSERT SELECT
                    ImmutableBytesWritable ptr = context.getTempPtr();
                    PTable table = tableRef.getTable();
                    table.getIndexMaintainers(ptr, context.getConnection());
                    byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
                    ServerCache cache = null;
                    try {
                        if (ptr.getLength() > 0) {
                            byte[] uuidValue = ServerCacheClient.generateId();
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
                            context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
                        }
                        ResultIterator iterator = aggPlan.iterator();
                        try {
                            Tuple row = iterator.next();
                            final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            return new MutationState(maxSize, maxSizeBytes, connection) {

                                @Override
                                public long getUpdateCount() {
                                    return mutationCount;
                                }
                            };
                        } finally {
                            iterator.close();
                        }
                    } finally {
                        if (cache != null) {
                            cache.close();
                        }
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return aggPlan.getEstimatedRowsToScan();
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return aggPlan.getEstimatedBytesToScan();
                }
            });
        } else {
            List<TableRef> immutableIndexRefsToBe = Lists.newArrayListWithExpectedSize(dataPlan.getTableRef().getTable().getIndexes().size());
            if (!buildingImmutableIndexes.isEmpty()) {
                immutableIndexRefsToBe = buildingImmutableIndexes;
            } else if (hasImmutableIndexes && !plan.getTableRef().equals(tableRef)) {
                immutableIndexRefsToBe = Collections.singletonList(plan.getTableRef());
            }
            final List<TableRef> immutableIndexRefs = immutableIndexRefsToBe;
            final DeletingParallelIteratorFactory parallelIteratorFactory2 = parallelIteratorFactory;
            mutationPlans.add(new MutationPlan() {

                @Override
                public ParameterMetaData getParameterMetaData() {
                    return context.getBindManager().getParameterMetaData();
                }

                @Override
                public StatementContext getContext() {
                    return context;
                }

                @Override
                public TableRef getTargetRef() {
                    return dataTableRef;
                }

                @Override
                public Set<TableRef> getSourceRefs() {
                    return dataPlan.getSourceRefs();
                }

                @Override
                public Operation getOperation() {
                    return operation;
                }

                @Override
                public MutationState execute() throws SQLException {
                    ResultIterator iterator = plan.iterator();
                    try {
                        if (!hasLimit) {
                            Tuple tuple;
                            long totalRowCount = 0;
                            if (parallelIteratorFactory2 != null) {
                                parallelIteratorFactory2.setRowProjector(plan.getProjector());
                                parallelIteratorFactory2.setTargetTableRef(tableRef);
                                parallelIteratorFactory2.setSourceTableRef(plan.getTableRef());
                                parallelIteratorFactory2.setIndexTargetTableRefs(immutableIndexRefs);
                            }
                            while ((tuple = iterator.next()) != null) {
                                // Runs query
                                Cell kv = tuple.getValue(0);
                                totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
                            }
                            // Return total number of rows that have been delete. In the case of auto commit being off
                            // the mutations will all be in the mutation state of the current connection.
                            MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount);
                            // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
                            state.setReadMetricQueue(plan.getContext().getReadMetricsQueue());
                            return state;
                        } else {
                            return deleteRows(plan.getContext(), tableRef, immutableIndexRefs, iterator, plan.getProjector(), plan.getTableRef());
                        }
                    } finally {
                        iterator.close();
                    }
                }

                @Override
                public ExplainPlan getExplainPlan() throws SQLException {
                    List<String> queryPlanSteps = plan.getExplainPlan().getPlanSteps();
                    List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
                    planSteps.add("DELETE ROWS");
                    planSteps.addAll(queryPlanSteps);
                    return new ExplainPlan(planSteps);
                }

                @Override
                public Long getEstimatedRowsToScan() throws SQLException {
                    return plan.getEstimatedRowsToScan();
                }

                @Override
                public Long getEstimatedBytesToScan() throws SQLException {
                    return plan.getEstimatedBytesToScan();
                }
            });
        }
    }
    return mutationPlans.size() == 1 ? mutationPlans.get(0) : new MultiDeleteMutationPlan(mutationPlans);
}
Also used : PTable(org.apache.phoenix.schema.PTable) ParseNode(org.apache.phoenix.parse.ParseNode) List(java.util.List) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Cell(org.apache.hadoop.hbase.Cell) MetaDataClient(org.apache.phoenix.schema.MetaDataClient) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ResultIterator(org.apache.phoenix.iterate.ResultIterator) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) AliasedNode(org.apache.phoenix.parse.AliasedNode) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) PLong(org.apache.phoenix.schema.types.PLong) Scan(org.apache.hadoop.hbase.client.Scan) Map(java.util.Map) HashMap(java.util.HashMap) ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Set(java.util.Set) PhoenixResultSet(org.apache.phoenix.jdbc.PhoenixResultSet) SQLException(java.sql.SQLException) Operation(org.apache.phoenix.jdbc.PhoenixStatement.Operation) BaseQueryPlan(org.apache.phoenix.execute.BaseQueryPlan) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) MetaDataEntityNotFoundException(org.apache.phoenix.schema.MetaDataEntityNotFoundException) ResultIterator(org.apache.phoenix.iterate.ResultIterator) Iterator(java.util.Iterator) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) SkipScanFilter(org.apache.phoenix.filter.SkipScanFilter) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) Hint(org.apache.phoenix.parse.HintNode.Hint) MutationState(org.apache.phoenix.execute.MutationState) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) PTableKey(org.apache.phoenix.schema.PTableKey) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef) ParameterMetaData(java.sql.ParameterMetaData) Tuple(org.apache.phoenix.schema.tuple.Tuple) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 70 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class SpillManager method getAggregators.

// Instantiate Aggregators from a serialized byte array
private Aggregator[] getAggregators(byte[] data) throws IOException {
    DataInputStream input = null;
    try {
        input = new DataInputStream(new ByteArrayInputStream(data));
        // key length
        int keyLength = WritableUtils.readVInt(input);
        int vIntKeyLength = WritableUtils.getVIntSize(keyLength);
        ImmutableBytesPtr ptr = new ImmutableBytesPtr(data, vIntKeyLength, keyLength);
        // value length
        input.skip(keyLength);
        int valueLength = WritableUtils.readVInt(input);
        int vIntValLength = WritableUtils.getVIntSize(keyLength);
        KeyValue keyValue = KeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(), QueryConstants.SINGLE_COLUMN_FAMILY, QueryConstants.SINGLE_COLUMN, QueryConstants.AGG_TIMESTAMP, data, vIntKeyLength + keyLength + vIntValLength, valueLength);
        Tuple result = new SingleKeyValueTuple(keyValue);
        TupleUtil.getAggregateValue(result, ptr);
        KeyValueSchema schema = aggregators.getValueSchema();
        ValueBitSet tempValueSet = ValueBitSet.newInstance(schema);
        tempValueSet.clear();
        tempValueSet.or(ptr);
        int i = 0, maxOffset = ptr.getOffset() + ptr.getLength();
        SingleAggregateFunction[] funcArray = aggregators.getFunctions();
        Aggregator[] sAggs = new Aggregator[funcArray.length];
        Boolean hasValue;
        schema.iterator(ptr);
        while ((hasValue = schema.next(ptr, i, maxOffset, tempValueSet)) != null) {
            SingleAggregateFunction func = funcArray[i];
            sAggs[i++] = hasValue ? func.newServerAggregator(conf, ptr) : func.newServerAggregator(conf);
        }
        return sAggs;
    } finally {
        Closeables.closeQuietly(input);
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) ValueBitSet(org.apache.phoenix.schema.ValueBitSet) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) DataInputStream(java.io.DataInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) SingleAggregateFunction(org.apache.phoenix.expression.function.SingleAggregateFunction) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) Tuple(org.apache.phoenix.schema.tuple.Tuple) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple)

Aggregations

ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)120 Mutation (org.apache.hadoop.hbase.client.Mutation)31 PTable (org.apache.phoenix.schema.PTable)28 ArrayList (java.util.ArrayList)27 Region (org.apache.hadoop.hbase.regionserver.Region)22 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)22 Test (org.junit.Test)21 Cell (org.apache.hadoop.hbase.Cell)20 Put (org.apache.hadoop.hbase.client.Put)18 List (java.util.List)15 Scan (org.apache.hadoop.hbase.client.Scan)15 Pair (org.apache.hadoop.hbase.util.Pair)15 IOException (java.io.IOException)14 Expression (org.apache.phoenix.expression.Expression)14 PColumn (org.apache.phoenix.schema.PColumn)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)12 HashMap (java.util.HashMap)11 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)11 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)11