Search in sources :

Example 1 with FilterableStatement

use of org.apache.phoenix.parse.FilterableStatement in project phoenix by apache.

the class BaseResultIterators method initializeScan.

private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, Scan scan) throws SQLException {
    StatementContext context = plan.getContext();
    TableRef tableRef = plan.getTableRef();
    PTable table = tableRef.getTable();
    Map<byte[], NavigableSet<byte[]>> familyMap = scan.getFamilyMap();
    // Hack for PHOENIX-2067 to force raw scan over all KeyValues to fix their row keys
    if (context.getConnection().isDescVarLengthRowKeyUpgrade()) {
        // We project *all* KeyValues across all column families as we make a pass over
        // a physical table and we want to make sure we catch all KeyValues that may be
        // dynamic or part of an updatable view.
        familyMap.clear();
        scan.setMaxVersions();
        // Remove any filter
        scan.setFilter(null);
        // Traverse (and subsequently clone) all KeyValues
        scan.setRaw(true);
        // Pass over PTable so we can re-write rows according to the row key schema
        scan.setAttribute(BaseScannerRegionObserver.UPGRADE_DESC_ROW_KEY, UngroupedAggregateRegionObserver.serialize(table));
    } else {
        FilterableStatement statement = plan.getStatement();
        RowProjector projector = plan.getProjector();
        boolean optimizeProjection = false;
        boolean keyOnlyFilter = familyMap.isEmpty() && context.getWhereConditionColumns().isEmpty();
        if (!projector.projectEverything()) {
            // not match the actual column families of the table (which is bad).
            if (keyOnlyFilter && table.getColumnFamilies().size() == 1) {
                // Project the one column family. We must project a column family since it's possible
                // that there are other non declared column families that we need to ignore.
                scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes());
            } else {
                optimizeProjection = true;
                if (projector.projectEveryRow()) {
                    if (table.getViewType() == ViewType.MAPPED) {
                        // Since we don't have the empty key value in MAPPED tables, 
                        // we must project all CFs in HRS. However, only the
                        // selected column values are returned back to client.
                        context.getWhereConditionColumns().clear();
                        for (PColumnFamily family : table.getColumnFamilies()) {
                            context.addWhereConditionColumn(family.getName().getBytes(), null);
                        }
                    } else {
                        byte[] ecf = SchemaUtil.getEmptyColumnFamily(table);
                        // been projected in its entirety.
                        if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) {
                            scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst());
                        }
                    }
                }
            }
        }
        // Add FirstKeyOnlyFilter if there are no references to key value columns
        if (keyOnlyFilter) {
            ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter());
        }
        if (perScanLimit != null) {
            ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
        }
        if (offset != null) {
            ScanUtil.addOffsetAttribute(scan, offset);
        }
        int cols = plan.getGroupBy().getOrderPreservingColumnCount();
        if (cols > 0 && keyOnlyFilter && !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() && (context.getAggregationManager().isEmpty() || plan.getGroupBy().isUngroupedAggregate())) {
            ScanUtil.andFilterAtEnd(scan, new DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(), cols));
            if (plan.getLimit() != null) {
                // We can push the limit to the server
                ScanUtil.andFilterAtEnd(scan, new PageFilter(plan.getLimit()));
            }
        }
        scan.setAttribute(BaseScannerRegionObserver.QUALIFIER_ENCODING_SCHEME, new byte[] { table.getEncodingScheme().getSerializedMetadataValue() });
        scan.setAttribute(BaseScannerRegionObserver.IMMUTABLE_STORAGE_ENCODING_SCHEME, new byte[] { table.getImmutableStorageScheme().getSerializedMetadataValue() });
        // we use this flag on the server side to determine which value column qualifier to use in the key value we return from server.
        scan.setAttribute(BaseScannerRegionObserver.USE_NEW_VALUE_COLUMN_QUALIFIER, Bytes.toBytes(true));
        // So there is no point setting the range.
        if (!ScanUtil.isAnalyzeTable(scan)) {
            setQualifierRanges(keyOnlyFilter, table, scan, context);
        }
        if (optimizeProjection) {
            optimizeProjection(context, scan, table, statement);
        }
    }
}
Also used : NavigableSet(java.util.NavigableSet) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) DistinctPrefixFilter(org.apache.phoenix.filter.DistinctPrefixFilter) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) StatementContext(org.apache.phoenix.compile.StatementContext) RowProjector(org.apache.phoenix.compile.RowProjector) FilterableStatement(org.apache.phoenix.parse.FilterableStatement) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) TableRef(org.apache.phoenix.schema.TableRef)

Example 2 with FilterableStatement

use of org.apache.phoenix.parse.FilterableStatement in project phoenix by apache.

the class ParallelIteratorsSplitTest method getSplits.

private static List<KeyRange> getSplits(final TableRef tableRef, final Scan scan, final List<HRegionLocation> regions, final ScanRanges scanRanges) throws SQLException {
    final List<TableRef> tableRefs = Collections.singletonList(tableRef);
    ColumnResolver resolver = new ColumnResolver() {

        @Override
        public List<PFunction> getFunctions() {
            return Collections.emptyList();
        }

        @Override
        public List<TableRef> getTables() {
            return tableRefs;
        }

        @Override
        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public PFunction resolveFunction(String functionName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public boolean hasUDFs() {
            return false;
        }

        @Override
        public PSchema resolveSchema(String schemaName) throws SQLException {
            return null;
        }

        @Override
        public List<PSchema> getSchemas() {
            return null;
        }
    };
    PhoenixConnection connection = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    final PhoenixStatement statement = new PhoenixStatement(connection);
    final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
    context.setScanRanges(scanRanges);
    ParallelIterators parallelIterators = new ParallelIterators(new QueryPlan() {

        private final Set<TableRef> tableRefs = ImmutableSet.of(tableRef);

        @Override
        public StatementContext getContext() {
            return context;
        }

        @Override
        public ParameterMetaData getParameterMetaData() {
            return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            return ExplainPlan.EMPTY_PLAN;
        }

        @Override
        public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public ResultIterator iterator() throws SQLException {
            return ResultIterator.EMPTY_ITERATOR;
        }

        @Override
        public long getEstimatedSize() {
            return 0;
        }

        @Override
        public Set<TableRef> getSourceRefs() {
            return tableRefs;
        }

        @Override
        public TableRef getTableRef() {
            return tableRef;
        }

        @Override
        public RowProjector getProjector() {
            return RowProjector.EMPTY_PROJECTOR;
        }

        @Override
        public Integer getLimit() {
            return null;
        }

        @Override
        public Integer getOffset() {
            return null;
        }

        @Override
        public OrderBy getOrderBy() {
            return OrderBy.EMPTY_ORDER_BY;
        }

        @Override
        public GroupBy getGroupBy() {
            return GroupBy.EMPTY_GROUP_BY;
        }

        @Override
        public List<KeyRange> getSplits() {
            return null;
        }

        @Override
        public FilterableStatement getStatement() {
            return SelectStatement.SELECT_ONE;
        }

        @Override
        public boolean isDegenerate() {
            return false;
        }

        @Override
        public boolean isRowKeyOrdered() {
            return true;
        }

        @Override
        public List<List<Scan>> getScans() {
            return null;
        }

        @Override
        public Operation getOperation() {
            return Operation.QUERY;
        }

        @Override
        public boolean useRoundRobinIterator() {
            return false;
        }

        @Override
        public Long getEstimatedRowsToScan() {
            return null;
        }

        @Override
        public Long getEstimatedBytesToScan() {
            return null;
        }
    }, null, new SpoolingResultIterator.SpoolingResultIteratorFactory(context.getConnection().getQueryServices()), context.getScan(), false);
    List<KeyRange> keyRanges = parallelIterators.getSplits();
    return keyRanges;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) PFunction(org.apache.phoenix.parse.PFunction) SQLException(java.sql.SQLException) Operation(org.apache.phoenix.jdbc.PhoenixStatement.Operation) QueryPlan(org.apache.phoenix.compile.QueryPlan) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) SequenceManager(org.apache.phoenix.compile.SequenceManager) StatementContext(org.apache.phoenix.compile.StatementContext) FilterableStatement(org.apache.phoenix.parse.FilterableStatement) List(java.util.List) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) OrderBy(org.apache.phoenix.compile.OrderByCompiler.OrderBy) ParallelIterators(org.apache.phoenix.iterate.ParallelIterators) GroupBy(org.apache.phoenix.compile.GroupByCompiler.GroupBy) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) PSchema(org.apache.phoenix.parse.PSchema) ParallelScanGrouper(org.apache.phoenix.iterate.ParallelScanGrouper) RowProjector(org.apache.phoenix.compile.RowProjector) Scan(org.apache.hadoop.hbase.client.Scan) SpoolingResultIterator(org.apache.phoenix.iterate.SpoolingResultIterator) TableRef(org.apache.phoenix.schema.TableRef) ParameterMetaData(java.sql.ParameterMetaData) PhoenixParameterMetaData(org.apache.phoenix.jdbc.PhoenixParameterMetaData) ExplainPlan(org.apache.phoenix.compile.ExplainPlan)

Aggregations

RowProjector (org.apache.phoenix.compile.RowProjector)2 StatementContext (org.apache.phoenix.compile.StatementContext)2 FilterableStatement (org.apache.phoenix.parse.FilterableStatement)2 TableRef (org.apache.phoenix.schema.TableRef)2 ImmutableSet (com.google.common.collect.ImmutableSet)1 ParameterMetaData (java.sql.ParameterMetaData)1 SQLException (java.sql.SQLException)1 List (java.util.List)1 NavigableSet (java.util.NavigableSet)1 Set (java.util.Set)1 Scan (org.apache.hadoop.hbase.client.Scan)1 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)1 PageFilter (org.apache.hadoop.hbase.filter.PageFilter)1 ColumnResolver (org.apache.phoenix.compile.ColumnResolver)1 ExplainPlan (org.apache.phoenix.compile.ExplainPlan)1 GroupBy (org.apache.phoenix.compile.GroupByCompiler.GroupBy)1 OrderBy (org.apache.phoenix.compile.OrderByCompiler.OrderBy)1 QueryPlan (org.apache.phoenix.compile.QueryPlan)1 SequenceManager (org.apache.phoenix.compile.SequenceManager)1 DistinctPrefixFilter (org.apache.phoenix.filter.DistinctPrefixFilter)1