use of io.crate.execution.dsl.projection.builder.ProjectionBuilder in project crate by crate.
the class Get method build.
@Override
public ExecutionPlan build(PlannerContext plannerContext, Set<PlanHint> hints, ProjectionBuilder projectionBuilder, int limitHint, int offsetHint, @Nullable OrderBy order, @Nullable Integer pageSizeHint, Row params, SubQueryResults subQueryResults) {
HashMap<String, Map<ShardId, List<PKAndVersion>>> idsByShardByNode = new HashMap<>();
DocTableInfo docTableInfo = tableRelation.tableInfo();
for (DocKeys.DocKey docKey : docKeys) {
String id = docKey.getId(plannerContext.transactionContext(), plannerContext.nodeContext(), params, subQueryResults);
if (id == null) {
continue;
}
List<String> partitionValues = docKey.getPartitionValues(plannerContext.transactionContext(), plannerContext.nodeContext(), params, subQueryResults);
String indexName = indexName(docTableInfo, partitionValues);
String routing = docKey.getRouting(plannerContext.transactionContext(), plannerContext.nodeContext(), params, subQueryResults);
ShardRouting shardRouting;
try {
shardRouting = plannerContext.resolveShard(indexName, id, routing);
} catch (IndexNotFoundException e) {
if (docTableInfo.isPartitioned()) {
continue;
}
throw e;
}
String currentNodeId = shardRouting.currentNodeId();
if (currentNodeId == null) {
// If relocating is fast enough this will work, otherwise it will result in a shard failure which
// will cause a statement retry
currentNodeId = shardRouting.relocatingNodeId();
if (currentNodeId == null) {
throw new ShardNotFoundException(shardRouting.shardId());
}
}
Map<ShardId, List<PKAndVersion>> idsByShard = idsByShardByNode.get(currentNodeId);
if (idsByShard == null) {
idsByShard = new HashMap<>();
idsByShardByNode.put(currentNodeId, idsByShard);
}
List<PKAndVersion> pkAndVersions = idsByShard.get(shardRouting.shardId());
if (pkAndVersions == null) {
pkAndVersions = new ArrayList<>();
idsByShard.put(shardRouting.shardId(), pkAndVersions);
}
long version = docKey.version(plannerContext.transactionContext(), plannerContext.nodeContext(), params, subQueryResults).orElse(Versions.MATCH_ANY);
long sequenceNumber = docKey.sequenceNo(plannerContext.transactionContext(), plannerContext.nodeContext(), params, subQueryResults).orElse(SequenceNumbers.UNASSIGNED_SEQ_NO);
long primaryTerm = docKey.primaryTerm(plannerContext.transactionContext(), plannerContext.nodeContext(), params, subQueryResults).orElse(SequenceNumbers.UNASSIGNED_PRIMARY_TERM);
pkAndVersions.add(new PKAndVersion(id, version, sequenceNumber, primaryTerm));
}
var docKeyColumns = new ArrayList<>(docTableInfo.primaryKey());
docKeyColumns.addAll(docTableInfo.partitionedBy());
docKeyColumns.add(docTableInfo.clusteredBy());
docKeyColumns.add(DocSysColumns.VERSION);
docKeyColumns.add(DocSysColumns.SEQ_NO);
docKeyColumns.add(DocSysColumns.PRIMARY_TERM);
var binder = new SubQueryAndParamBinder(params, subQueryResults);
List<Symbol> boundOutputs = Lists2.map(outputs, binder);
var boundQuery = binder.apply(query);
// Collect all columns which are used inside the query
// If the query contains only DocKeys, no filter is needed as all DocKeys are handled by the PKLookupOperation
AtomicBoolean requiresAdditionalFilteringOnNonDocKeyColumns = new AtomicBoolean(false);
var toCollectSet = new LinkedHashSet<>(boundOutputs);
Consumer<Reference> addRefIfMatch = ref -> {
toCollectSet.add(ref);
if (docKeyColumns.contains(ref.column()) == false) {
requiresAdditionalFilteringOnNonDocKeyColumns.set(true);
}
};
RefVisitor.visitRefs(boundQuery, addRefIfMatch);
var toCollect = boundOutputs;
ArrayList<Projection> projections = new ArrayList<>();
if (requiresAdditionalFilteringOnNonDocKeyColumns.get()) {
toCollect = List.copyOf(toCollectSet);
var filterProjection = ProjectionBuilder.filterProjection(toCollect, boundQuery);
filterProjection.requiredGranularity(RowGranularity.SHARD);
projections.add(filterProjection);
// reduce outputs which have been added for the filter projection
var evalProjection = new EvalProjection(InputColumn.mapToInputColumns(boundOutputs), RowGranularity.SHARD);
projections.add(evalProjection);
}
var collect = new Collect(new PKLookupPhase(plannerContext.jobId(), plannerContext.nextExecutionPhaseId(), docTableInfo.partitionedBy(), toCollect, idsByShardByNode), TopN.NO_LIMIT, 0, toCollect.size(), docKeys.size(), null);
for (var projection : projections) {
collect.addProjection(projection);
}
return collect;
}
use of io.crate.execution.dsl.projection.builder.ProjectionBuilder in project crate by crate.
the class SQLExecutor method planInternal.
private <T> T planInternal(AnalyzedStatement analyzedStatement, UUID jobId, int fetchSize, Row params) {
RoutingProvider routingProvider = new RoutingProvider(random.nextInt(), emptyList());
PlannerContext plannerContext = new PlannerContext(planner.currentClusterState(), routingProvider, jobId, coordinatorTxnCtx, nodeCtx, fetchSize, null);
Plan plan = planner.plan(analyzedStatement, plannerContext);
if (plan instanceof LogicalPlan) {
return (T) ((LogicalPlan) plan).build(plannerContext, Set.of(), new ProjectionBuilder(nodeCtx), TopN.NO_LIMIT, 0, null, null, params, new SubQueryResults(emptyMap()) {
@Override
public Object getSafe(SelectSymbol key) {
return Literal.of(key.valueType(), null);
}
});
}
return (T) plan;
}
use of io.crate.execution.dsl.projection.builder.ProjectionBuilder in project crate by crate.
the class LimitTest method testLimitOnLimitOperator.
@Test
public void testLimitOnLimitOperator() throws Exception {
SQLExecutor e = SQLExecutor.builder(clusterService, 2, RandomizedTest.getRandom(), List.of()).addTable(TableDefinitions.USER_TABLE_DEFINITION).build();
QueriedSelectRelation queriedDocTable = e.analyze("select name from users");
LogicalPlan plan = Limit.create(Limit.create(Collect.create(((AbstractTableRelation<?>) queriedDocTable.from().get(0)), queriedDocTable.outputs(), new WhereClause(queriedDocTable.where()), new TableStats(), null), Literal.of(10L), Literal.of(5L)), Literal.of(20L), Literal.of(7L));
assertThat(plan, isPlan("Limit[20::bigint;7::bigint]\n" + " └ Limit[10::bigint;5::bigint]\n" + " └ Collect[doc.users | [name] | true]"));
PlannerContext ctx = e.getPlannerContext(clusterService.state());
Merge merge = (Merge) plan.build(ctx, Set.of(), new ProjectionBuilder(e.nodeCtx), TopN.NO_LIMIT, 0, null, null, Row.EMPTY, SubQueryResults.EMPTY);
io.crate.planner.node.dql.Collect collect = (io.crate.planner.node.dql.Collect) merge.subPlan();
assertThat(collect.collectPhase().projections(), contains(ProjectionMatchers.isTopN(15, 0)));
// noinspection unchecked
assertThat(merge.mergePhase().projections(), contains(ProjectionMatchers.isTopN(10, 5), ProjectionMatchers.isTopN(20, 7)));
}
use of io.crate.execution.dsl.projection.builder.ProjectionBuilder in project crate by crate.
the class CollectTest method test.
@Test
public void test() throws Exception {
var e = SQLExecutor.builder(clusterService).addTable("CREATE TABLE t (x int)").build();
TableStats tableStats = new TableStats();
PlannerContext plannerCtx = e.getPlannerContext(clusterService.state());
ProjectionBuilder projectionBuilder = new ProjectionBuilder(e.nodeCtx);
QueriedSelectRelation analyzedRelation = e.analyze("SELECT 123 AS alias, 456 AS alias2 FROM t ORDER BY alias, 2");
LogicalPlanner logicalPlanner = new LogicalPlanner(e.nodeCtx, tableStats, () -> clusterService.state().nodes().getMinNodeVersion());
LogicalPlan operator = logicalPlanner.plan(analyzedRelation, plannerCtx);
ExecutionPlan build = operator.build(plannerCtx, Set.of(), projectionBuilder, -1, 0, null, null, Row.EMPTY, SubQueryResults.EMPTY);
assertThat((((RoutedCollectPhase) ((io.crate.planner.node.dql.Collect) build).collectPhase())).orderBy(), is(nullValue()));
}
use of io.crate.execution.dsl.projection.builder.ProjectionBuilder in project crate by crate.
the class CollectQueryCastRulesTest method assertCollectQuery.
private void assertCollectQuery(String query, String expected) {
var collect = new Collect(tr1, Collections.emptyList(), new WhereClause(e.asSymbol(query)), 100, 10);
var plan = (io.crate.planner.node.dql.Collect) collect.build(plannerContext, Set.of(), new ProjectionBuilder(e.nodeCtx), TopN.NO_LIMIT, 0, null, null, Row.EMPTY, new SubQueryResults(emptyMap()) {
@Override
public Object getSafe(SelectSymbol key) {
return Literal.of(key.valueType(), null);
}
});
assertThat(((RoutedCollectPhase) plan.collectPhase()).where().toString(), is(expected));
}
Aggregations