use of io.crate.analyze.EvaluatingNormalizer in project crate by crate.
the class WhereClauseAnalyzer method resolvePartitions.
private static WhereClause resolvePartitions(WhereClause whereClause, DocTableInfo tableInfo, Functions functions, TransactionContext transactionContext) {
assert tableInfo.isPartitioned() : "table must be partitioned in order to resolve partitions";
assert whereClause.partitions().isEmpty() : "partitions must not be analyzed twice";
if (tableInfo.partitions().isEmpty()) {
// table is partitioned but has no data / no partitions
return WhereClause.NO_MATCH;
}
PartitionReferenceResolver partitionReferenceResolver = preparePartitionResolver(tableInfo.partitionedByColumns());
EvaluatingNormalizer normalizer = new EvaluatingNormalizer(functions, RowGranularity.PARTITION, ReplaceMode.COPY, partitionReferenceResolver, null);
Symbol normalized;
Map<Symbol, List<Literal>> queryPartitionMap = new HashMap<>();
for (PartitionName partitionName : tableInfo.partitions()) {
for (PartitionExpression partitionExpression : partitionReferenceResolver.expressions()) {
partitionExpression.setNextRow(partitionName);
}
normalized = normalizer.normalize(whereClause.query(), transactionContext);
assert normalized != null : "normalizing a query must not return null";
if (normalized.equals(whereClause.query())) {
// no partition columns inside the where clause
return whereClause;
}
boolean canMatch = WhereClause.canMatch(normalized);
if (canMatch) {
List<Literal> partitions = queryPartitionMap.get(normalized);
if (partitions == null) {
partitions = new ArrayList<>();
queryPartitionMap.put(normalized, partitions);
}
partitions.add(Literal.of(partitionName.asIndexName()));
}
}
if (queryPartitionMap.size() == 1) {
Map.Entry<Symbol, List<Literal>> entry = Iterables.getOnlyElement(queryPartitionMap.entrySet());
whereClause = new WhereClause(entry.getKey(), whereClause.docKeys().orElse(null), new ArrayList<String>(entry.getValue().size()));
whereClause.partitions(entry.getValue());
return whereClause;
} else if (queryPartitionMap.size() > 0) {
return tieBreakPartitionQueries(normalizer, queryPartitionMap, whereClause, transactionContext);
} else {
return WhereClause.NO_MATCH;
}
}
use of io.crate.analyze.EvaluatingNormalizer in project crate by crate.
the class SelectPlannerTest method testNoSoftLimitOnUnlimitedChildRelation.
@Test
public void testNoSoftLimitOnUnlimitedChildRelation() throws Exception {
int softLimit = 10_000;
EvaluatingNormalizer normalizer = EvaluatingNormalizer.functionOnlyNormalizer(e.functions(), ReplaceMode.COPY);
Planner.Context plannerContext = new Planner.Context(e.planner, clusterService, UUID.randomUUID(), null, normalizer, new TransactionContext(SessionContext.SYSTEM_SESSION), softLimit, 0);
Limits limits = plannerContext.getLimits(new QuerySpec());
assertThat(limits.finalLimit(), is(TopN.NO_LIMIT));
}
use of io.crate.analyze.EvaluatingNormalizer in project crate by crate.
the class NodeStatsCollectSource method nodeIds.
@Nullable
static Collection<DiscoveryNode> nodeIds(WhereClause whereClause, Collection<DiscoveryNode> nodes, Functions functions) {
if (!whereClause.hasQuery()) {
return nodes;
}
LocalSysColReferenceResolver localSysColReferenceResolver = new LocalSysColReferenceResolver(ImmutableList.of(SysNodesTableInfo.Columns.NAME, SysNodesTableInfo.Columns.ID));
EvaluatingNormalizer normalizer = new EvaluatingNormalizer(functions, RowGranularity.DOC, ReplaceMode.COPY, localSysColReferenceResolver, null);
List<DiscoveryNode> newNodes = new ArrayList<>();
for (DiscoveryNode node : nodes) {
String nodeId = node.getId();
for (RowCollectExpression<NodeStatsContext, ?> expression : localSysColReferenceResolver.expressions()) {
expression.setNextRow(new NodeStatsContext(nodeId, node.name()));
}
Symbol normalized = normalizer.normalize(whereClause.query(), null);
if (normalized.equals(whereClause.query())) {
// No local available sys nodes columns in where clause
return nodes;
}
if (WhereClause.canMatch(normalized)) {
newNodes.add(node);
}
}
return newNodes;
}
use of io.crate.analyze.EvaluatingNormalizer in project crate by crate.
the class SystemCollectSource method getCollector.
@Override
public CrateCollector getCollector(CollectPhase phase, BatchConsumer consumer, JobCollectContext jobCollectContext) {
RoutedCollectPhase collectPhase = (RoutedCollectPhase) phase;
// sys.operations can contain a _node column - these refs need to be normalized into literals
EvaluatingNormalizer normalizer = new EvaluatingNormalizer(functions, RowGranularity.DOC, ReplaceMode.COPY, new NodeSysReferenceResolver(nodeSysExpression), null);
final RoutedCollectPhase routedCollectPhase = collectPhase.normalize(normalizer, null);
Map<String, Map<String, List<Integer>>> locations = collectPhase.routing().locations();
String table = Iterables.getOnlyElement(locations.get(clusterService.localNode().getId()).keySet());
Supplier<CompletableFuture<? extends Iterable<?>>> iterableGetter = iterableGetters.get(table);
assert iterableGetter != null : "iterableGetter for " + table + " must exist";
boolean requiresScroll = consumer.requiresScroll();
return BatchIteratorCollectorBridge.newInstance(() -> iterableGetter.get().thenApply(dataIterable -> RowsBatchIterator.newInstance(dataIterableToRowsIterable(routedCollectPhase, requiresScroll, dataIterable), collectPhase.toCollect().size())), consumer);
}
use of io.crate.analyze.EvaluatingNormalizer in project crate by crate.
the class NestedLoopConsumerTest method initPlanner.
@Before
public void initPlanner() throws Exception {
ClusterService clusterService = new NoopClusterService();
TableStats tableStats = getTableStats();
e = SQLExecutor.builder(clusterService).enableDefaultTables().setTableStats(tableStats).addDocTable(emptyRoutingTable).build();
Functions functions = e.functions();
EvaluatingNormalizer normalizer = EvaluatingNormalizer.functionOnlyNormalizer(functions, ReplaceMode.COPY);
plannerContext = new Planner.Context(e.planner, clusterService, UUID.randomUUID(), new ConsumingPlanner(clusterService, functions, tableStats), normalizer, new TransactionContext(SessionContext.SYSTEM_SESSION), 0, 0);
consumer = new NestedLoopConsumer(clusterService, functions, tableStats);
}
Aggregations