Search in sources :

Example 6 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class TraceQueryPlan method iterator.

@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
    final PhoenixConnection conn = stmt.getConnection();
    if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) {
        return ResultIterator.EMPTY_ITERATOR;
    }
    return new ResultIterator() {

        @Override
        public void close() throws SQLException {
        }

        @Override
        public Tuple next() throws SQLException {
            if (!first)
                return null;
            TraceScope traceScope = conn.getTraceScope();
            if (traceStatement.isTraceOn()) {
                conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
                if (conn.getSampler() == Sampler.NEVER) {
                    closeTraceScope(conn);
                }
                if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) {
                    traceScope = Tracing.startNewSpan(conn, "Enabling trace");
                    if (traceScope.getSpan() != null) {
                        conn.setTraceScope(traceScope);
                    } else {
                        closeTraceScope(conn);
                    }
                }
            } else {
                closeTraceScope(conn);
                conn.setSampler(Sampler.NEVER);
            }
            if (traceScope == null || traceScope.getSpan() == null)
                return null;
            first = false;
            ImmutableBytesWritable ptr = new ImmutableBytesWritable();
            ParseNodeFactory factory = new ParseNodeFactory();
            LiteralParseNode literal = factory.literal(traceScope.getSpan().getTraceId());
            LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, Determinism.ALWAYS);
            expression.evaluate(null, ptr);
            byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
            Cell cell = CellUtil.createCell(rowKey, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, EnvironmentEdgeManager.currentTimeMillis(), Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
            List<Cell> cells = new ArrayList<Cell>(1);
            cells.add(cell);
            return new ResultTuple(Result.create(cells));
        }

        private void closeTraceScope(final PhoenixConnection conn) {
            if (conn.getTraceScope() != null) {
                conn.getTraceScope().close();
                conn.setTraceScope(null);
            }
        }

        @Override
        public void explain(List<String> planSteps) {
        }
    };
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ResultIterator(org.apache.phoenix.iterate.ResultIterator) TraceScope(org.apache.htrace.TraceScope) ArrayList(java.util.ArrayList) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) List(java.util.List) ArrayList(java.util.ArrayList) Cell(org.apache.hadoop.hbase.Cell) ParseNodeFactory(org.apache.phoenix.parse.ParseNodeFactory)

Example 7 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class UnionPlan method getExplainPlan.

@Override
public ExplainPlan getExplainPlan() throws SQLException {
    List<String> steps = new ArrayList<String>();
    steps.add("UNION ALL OVER " + this.plans.size() + " QUERIES");
    ResultIterator iterator = iterator();
    iterator.explain(steps);
    // Indent plans steps nested under union, except last client-side merge/concat step (if there is one)
    int offset = !orderBy.getOrderByExpressions().isEmpty() && limit != null ? 2 : limit != null ? 1 : 0;
    for (int i = 1; i < steps.size() - offset; i++) {
        steps.set(i, "    " + steps.get(i));
    }
    return new ExplainPlan(steps);
}
Also used : ArrayList(java.util.ArrayList) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) ConcatResultIterator(org.apache.phoenix.iterate.ConcatResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) MergeSortTopNResultIterator(org.apache.phoenix.iterate.MergeSortTopNResultIterator) ExplainPlan(org.apache.phoenix.compile.ExplainPlan)

Example 8 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class PostDDLCompiler method compile.

public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList, final long timestamp) throws SQLException {
    PhoenixStatement statement = new PhoenixStatement(connection);
    final StatementContext context = new StatementContext(statement, new ColumnResolver() {

        @Override
        public List<TableRef> getTables() {
            return tableRefs;
        }

        @Override
        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public List<PFunction> getFunctions() {
            return Collections.<PFunction>emptyList();
        }

        @Override
        public PFunction resolveFunction(String functionName) throws SQLException {
            throw new FunctionNotFoundException(functionName);
        }

        @Override
        public boolean hasUDFs() {
            return false;
        }

        @Override
        public PSchema resolveSchema(String schemaName) throws SQLException {
            throw new SchemaNotFoundException(schemaName);
        }

        @Override
        public List<PSchema> getSchemas() {
            throw new UnsupportedOperationException();
        }
    }, scan, new SequenceManager(statement));
    return new BaseMutationPlan(context, Operation.UPSERT) {

        /* FIXME */
        @Override
        public MutationState execute() throws SQLException {
            if (tableRefs.isEmpty()) {
                return new MutationState(0, 1000, connection);
            }
            boolean wasAutoCommit = connection.getAutoCommit();
            try {
                connection.setAutoCommit(true);
                SQLException sqlE = null;
                /*
                     * Handles:
                     * 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
                     * 2) deletion of all column values for a ALTER TABLE DROP COLUMN
                     * 3) updating the necessary rows to have an empty KV
                     * 4) updating table stats
                     */
                long totalMutationCount = 0;
                for (final TableRef tableRef : tableRefs) {
                    Scan scan = ScanUtil.newScan(context.getScan());
                    SelectStatement select = SelectStatement.COUNT_ONE;
                    // We need to use this tableRef
                    ColumnResolver resolver = new ColumnResolver() {

                        @Override
                        public List<TableRef> getTables() {
                            return Collections.singletonList(tableRef);
                        }

                        @Override
                        public java.util.List<PFunction> getFunctions() {
                            return Collections.emptyList();
                        }

                        @Override
                        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
                            PColumn column = tableName != null ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) : tableRef.getTable().getColumnForColumnName(colName);
                            return new ColumnRef(tableRef, column.getPosition());
                        }

                        @Override
                        public PFunction resolveFunction(String functionName) throws SQLException {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public boolean hasUDFs() {
                            return false;
                        }

                        @Override
                        public List<PSchema> getSchemas() {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public PSchema resolveSchema(String schemaName) throws SQLException {
                            throw new SchemaNotFoundException(schemaName);
                        }
                    };
                    PhoenixStatement statement = new PhoenixStatement(connection);
                    StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
                    long ts = timestamp;
                    // in this case, so maybe this is ok.
                    if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
                        ts = TransactionUtil.convertToNanoseconds(ts);
                    }
                    ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
                    if (emptyCF != null) {
                        scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
                        scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
                    }
                    ServerCache cache = null;
                    try {
                        if (deleteList != null) {
                            if (deleteList.isEmpty()) {
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
                            // In the case of a row deletion, add index metadata so mutable secondary indexing works
                            /* TODO: we currently manually run a scan to delete the index data here
                                    ImmutableBytesWritable ptr = context.getTempPtr();
                                    tableRef.getTable().getIndexMaintainers(ptr);
                                    if (ptr.getLength() > 0) {
                                        IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                                        cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                                        byte[] uuidValue = cache.getId();
                                        scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                    }
                                    */
                            } else {
                                // In the case of the empty key value column family changing, do not send the index
                                // metadata, as we're currently managing this from the client. It's possible for the
                                // data empty column family to stay the same, while the index empty column family
                                // changes.
                                PColumn column = deleteList.get(0);
                                byte[] cq = column.getColumnQualifierBytes();
                                if (emptyCF == null) {
                                    scan.addColumn(column.getFamilyName().getBytes(), cq);
                                }
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
                            }
                        }
                        List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
                        if (projectCFs == null) {
                            for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
                                columnFamilies.add(family.getName().getBytes());
                            }
                        } else {
                            for (byte[] projectCF : projectCFs) {
                                columnFamilies.add(projectCF);
                            }
                        }
                        // Need to project all column families into the scan, since we haven't yet created our empty key value
                        RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
                        context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
                        // since at this point we haven't added the empty key value everywhere.
                        if (columnFamilies != null) {
                            scan.getFamilyMap().clear();
                            for (byte[] family : columnFamilies) {
                                scan.addFamily(family);
                            }
                            projector = new RowProjector(projector, false);
                        }
                        // any other Post DDL operations.
                        try {
                            // Since dropping a VIEW does not affect the underlying data, we do
                            // not need to pass through the view statement here.
                            // Push where clause into scan
                            WhereCompiler.compile(context, select);
                        } catch (ColumnFamilyNotFoundException e) {
                            continue;
                        } catch (ColumnNotFoundException e) {
                            continue;
                        } catch (AmbiguousColumnException e) {
                            continue;
                        }
                        QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, null);
                        try {
                            ResultIterator iterator = plan.iterator();
                            try {
                                Tuple row = iterator.next();
                                ImmutableBytesWritable ptr = context.getTempPtr();
                                totalMutationCount += (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            } catch (SQLException e) {
                                sqlE = e;
                            } finally {
                                try {
                                    iterator.close();
                                } catch (SQLException e) {
                                    if (sqlE == null) {
                                        sqlE = e;
                                    } else {
                                        sqlE.setNextException(e);
                                    }
                                } finally {
                                    if (sqlE != null) {
                                        throw sqlE;
                                    }
                                }
                            }
                        } catch (TableNotFoundException e) {
                        // Ignore and continue, as HBase throws when table hasn't been written to
                        // FIXME: Remove if this is fixed in 0.96
                        }
                    } finally {
                        if (cache != null) {
                            // Remove server cache if there is one
                            cache.close();
                        }
                    }
                }
                final long count = totalMutationCount;
                return new MutationState(1, 1000, connection) {

                    @Override
                    public long getUpdateCount() {
                        return count;
                    }
                };
            } finally {
                if (!wasAutoCommit)
                    connection.setAutoCommit(wasAutoCommit);
            }
        }
    };
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PFunction(org.apache.phoenix.parse.PFunction) SQLException(java.sql.SQLException) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) List(java.util.List) AmbiguousColumnException(org.apache.phoenix.schema.AmbiguousColumnException) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ResultIterator(org.apache.phoenix.iterate.ResultIterator) PSchema(org.apache.phoenix.parse.PSchema) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) FunctionNotFoundException(org.apache.phoenix.schema.FunctionNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) SchemaNotFoundException(org.apache.phoenix.schema.SchemaNotFoundException) TableRef(org.apache.phoenix.schema.TableRef) Tuple(org.apache.phoenix.schema.tuple.Tuple)

Example 9 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class ClientScanPlan method iterator.

@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
    ResultIterator iterator = delegate.iterator(scanGrouper, scan);
    if (where != null) {
        iterator = new FilterResultIterator(iterator, where);
    }
    if (!orderBy.getOrderByExpressions().isEmpty()) {
        // TopN
        int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
        iterator = new OrderedResultIterator(iterator, orderBy.getOrderByExpressions(), thresholdBytes, limit, offset, projector.getEstimatedRowByteSize());
    } else {
        if (offset != null) {
            iterator = new OffsetResultIterator(iterator, offset);
        }
        if (limit != null) {
            iterator = new LimitingResultIterator(iterator, limit);
        }
    }
    if (context.getSequenceManager().getSequenceCount() > 0) {
        iterator = new SequenceResultIterator(iterator, context.getSequenceManager());
    }
    return iterator;
}
Also used : OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) OrderedResultIterator(org.apache.phoenix.iterate.OrderedResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) OrderedResultIterator(org.apache.phoenix.iterate.OrderedResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) OffsetResultIterator(org.apache.phoenix.iterate.OffsetResultIterator) FilterResultIterator(org.apache.phoenix.iterate.FilterResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) LimitingResultIterator(org.apache.phoenix.iterate.LimitingResultIterator) FilterResultIterator(org.apache.phoenix.iterate.FilterResultIterator)

Example 10 with ResultIterator

use of org.apache.phoenix.iterate.ResultIterator in project phoenix by apache.

the class LiteralResultIterationPlan method newIterator.

@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, final Map<ImmutableBytesPtr, ServerCache> caches) throws SQLException {
    ResultIterator scanner = new ResultIterator() {

        private final Iterator<Tuple> tupleIterator = tuples.iterator();

        private boolean closed = false;

        private int count = 0;

        private int offsetCount = 0;

        @Override
        public void close() throws SQLException {
            SQLCloseables.closeAll(caches.values());
            this.closed = true;
        }

        @Override
        public Tuple next() throws SQLException {
            while (!this.closed && (offset != null && offsetCount < offset) && tupleIterator.hasNext()) {
                offsetCount++;
                tupleIterator.next();
            }
            if (!this.closed && (limit == null || count++ < limit) && tupleIterator.hasNext()) {
                return tupleIterator.next();
            }
            return null;
        }

        @Override
        public void explain(List<String> planSteps) {
        }
    };
    if (context.getSequenceManager().getSequenceCount() > 0) {
        scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
    }
    return scanner;
}
Also used : SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) SequenceResultIterator(org.apache.phoenix.iterate.SequenceResultIterator) Iterator(java.util.Iterator) ResultIterator(org.apache.phoenix.iterate.ResultIterator) List(java.util.List)

Aggregations

ResultIterator (org.apache.phoenix.iterate.ResultIterator)26 SequenceResultIterator (org.apache.phoenix.iterate.SequenceResultIterator)9 List (java.util.List)8 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)8 LimitingResultIterator (org.apache.phoenix.iterate.LimitingResultIterator)8 OffsetResultIterator (org.apache.phoenix.iterate.OffsetResultIterator)8 SQLException (java.sql.SQLException)7 ConcatResultIterator (org.apache.phoenix.iterate.ConcatResultIterator)7 PColumn (org.apache.phoenix.schema.PColumn)7 TableRef (org.apache.phoenix.schema.TableRef)7 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)6 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)5 DelegateResultIterator (org.apache.phoenix.iterate.DelegateResultIterator)5 ParallelIterators (org.apache.phoenix.iterate.ParallelIterators)5 SpoolingResultIterator (org.apache.phoenix.iterate.SpoolingResultIterator)5 PTable (org.apache.phoenix.schema.PTable)5 Tuple (org.apache.phoenix.schema.tuple.Tuple)5 ArrayList (java.util.ArrayList)4 QueryPlan (org.apache.phoenix.compile.QueryPlan)4 StatementContext (org.apache.phoenix.compile.StatementContext)4