use of org.apache.phoenix.compile.ExplainPlan in project phoenix by apache.
the class UnionPlan method getExplainPlan.
@Override
public ExplainPlan getExplainPlan() throws SQLException {
List<String> steps = new ArrayList<String>();
steps.add("UNION ALL OVER " + this.plans.size() + " QUERIES");
ResultIterator iterator = iterator();
iterator.explain(steps);
// Indent plans steps nested under union, except last client-side merge/concat step (if there is one)
int offset = !orderBy.getOrderByExpressions().isEmpty() && limit != null ? 2 : limit != null ? 1 : 0;
for (int i = 1; i < steps.size() - offset; i++) {
steps.set(i, " " + steps.get(i));
}
return new ExplainPlan(steps);
}
use of org.apache.phoenix.compile.ExplainPlan in project phoenix by apache.
the class BaseQueryPlan method getExplainPlan.
@Override
public ExplainPlan getExplainPlan() throws SQLException {
if (context.getScanRanges() == ScanRanges.NOTHING) {
return new ExplainPlan(Collections.singletonList("DEGENERATE SCAN OVER " + getTableRef().getTable().getName().getString()));
}
ResultIterator iterator = iterator();
ExplainPlan explainPlan = new ExplainPlan(getPlanSteps(iterator));
iterator.close();
return explainPlan;
}
use of org.apache.phoenix.compile.ExplainPlan in project phoenix by apache.
the class ParallelIteratorsSplitTest method getSplits.
private static List<KeyRange> getSplits(final TableRef tableRef, final Scan scan, final List<HRegionLocation> regions, final ScanRanges scanRanges) throws SQLException {
final List<TableRef> tableRefs = Collections.singletonList(tableRef);
ColumnResolver resolver = new ColumnResolver() {
@Override
public List<PFunction> getFunctions() {
return Collections.emptyList();
}
@Override
public List<TableRef> getTables() {
return tableRefs;
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
return null;
}
@Override
public List<PSchema> getSchemas() {
return null;
}
};
PhoenixConnection connection = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
final PhoenixStatement statement = new PhoenixStatement(connection);
final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
context.setScanRanges(scanRanges);
ParallelIterators parallelIterators = new ParallelIterators(new QueryPlan() {
private final Set<TableRef> tableRefs = ImmutableSet.of(tableRef);
@Override
public StatementContext getContext() {
return context;
}
@Override
public ParameterMetaData getParameterMetaData() {
return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return ExplainPlan.EMPTY_PLAN;
}
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public ResultIterator iterator() throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public long getEstimatedSize() {
return 0;
}
@Override
public Set<TableRef> getSourceRefs() {
return tableRefs;
}
@Override
public TableRef getTableRef() {
return tableRef;
}
@Override
public RowProjector getProjector() {
return RowProjector.EMPTY_PROJECTOR;
}
@Override
public Integer getLimit() {
return null;
}
@Override
public Integer getOffset() {
return null;
}
@Override
public OrderBy getOrderBy() {
return OrderBy.EMPTY_ORDER_BY;
}
@Override
public GroupBy getGroupBy() {
return GroupBy.EMPTY_GROUP_BY;
}
@Override
public List<KeyRange> getSplits() {
return null;
}
@Override
public FilterableStatement getStatement() {
return SelectStatement.SELECT_ONE;
}
@Override
public boolean isDegenerate() {
return false;
}
@Override
public boolean isRowKeyOrdered() {
return true;
}
@Override
public List<List<Scan>> getScans() {
return null;
}
@Override
public Operation getOperation() {
return Operation.QUERY;
}
@Override
public boolean useRoundRobinIterator() {
return false;
}
@Override
public <T> T accept(QueryPlanVisitor<T> visitor) {
return visitor.defaultReturn(this);
}
@Override
public Long getEstimatedRowsToScan() {
return null;
}
@Override
public Long getEstimatedBytesToScan() {
return null;
}
@Override
public Long getEstimateInfoTimestamp() throws SQLException {
return null;
}
@Override
public Cost getCost() {
return Cost.ZERO;
}
}, null, new SpoolingResultIterator.SpoolingResultIteratorFactory(context.getConnection().getQueryServices()), context.getScan(), false, null, null);
List<KeyRange> keyRanges = parallelIterators.getSplits();
return keyRanges;
}
Aggregations