use of io.crate.analyze.relations.DocTableRelation in project crate by crate.
the class InsertFromValuesAnalyzer method analyze.
public AnalyzedStatement analyze(InsertFromValues node, Analysis analysis) {
DocTableInfo tableInfo = schemas.getWritableTable(TableIdent.of(node.table(), analysis.sessionContext().defaultSchema()));
Operation.blockedRaiseException(tableInfo, Operation.INSERT);
DocTableRelation tableRelation = new DocTableRelation(tableInfo);
FieldProvider fieldProvider = new NameFieldProvider(tableRelation);
Function<ParameterExpression, Symbol> convertParamFunction = analysis.parameterContext();
ExpressionAnalyzer expressionAnalyzer = new ExpressionAnalyzer(functions, analysis.sessionContext(), convertParamFunction, fieldProvider, null);
ExpressionAnalysisContext expressionAnalysisContext = new ExpressionAnalysisContext();
expressionAnalyzer.setResolveFieldsOperation(Operation.INSERT);
ValuesResolver valuesResolver = new ValuesResolver(tableRelation);
ExpressionAnalyzer valuesAwareExpressionAnalyzer = new ValuesAwareExpressionAnalyzer(functions, analysis.sessionContext(), convertParamFunction, fieldProvider, valuesResolver);
InsertFromValuesAnalyzedStatement statement = new InsertFromValuesAnalyzedStatement(tableInfo, analysis.parameterContext().numBulkParams());
handleInsertColumns(node, node.maxValuesLength(), statement);
Set<Reference> allReferencedReferences = new HashSet<>();
for (GeneratedReference reference : tableInfo.generatedColumns()) {
allReferencedReferences.addAll(reference.referencedReferences());
}
ReferenceToLiteralConverter.Context referenceToLiteralContext = new ReferenceToLiteralConverter.Context(statement.columns(), allReferencedReferences);
ValueNormalizer valuesNormalizer = new ValueNormalizer(schemas);
EvaluatingNormalizer normalizer = new EvaluatingNormalizer(functions, RowGranularity.CLUSTER, ReplaceMode.COPY, null, tableRelation);
analyzeColumns(statement.tableInfo(), statement.columns());
for (ValuesList valuesList : node.valuesLists()) {
analyzeValues(tableRelation, valuesNormalizer, normalizer, expressionAnalyzer, expressionAnalysisContext, analysis.transactionContext(), valuesResolver, valuesAwareExpressionAnalyzer, valuesList, node.onDuplicateKeyAssignments(), statement, analysis.parameterContext(), referenceToLiteralContext);
}
return statement;
}
use of io.crate.analyze.relations.DocTableRelation in project crate by crate.
the class WhereClauseAnalyzerTest method testWhereSinglePKColumnEq.
@Test
public void testWhereSinglePKColumnEq() throws Exception {
DeleteAnalyzedStatement statement = e.analyze("delete from users where id = ?", new Object[][] { new Object[] { 1 }, new Object[] { 2 }, new Object[] { 3 } });
DocTableRelation tableRelation = statement.analyzedRelation();
WhereClauseAnalyzer whereClauseAnalyzer = new WhereClauseAnalyzer(e.functions(), tableRelation);
assertThat(whereClauseAnalyzer.analyze(statement.whereClauses().get(0), transactionContext).docKeys().get(), contains(isDocKey("1")));
assertThat(whereClauseAnalyzer.analyze(statement.whereClauses().get(1), transactionContext).docKeys().get(), contains(isDocKey("2")));
assertThat(whereClauseAnalyzer.analyze(statement.whereClauses().get(2), transactionContext).docKeys().get(), contains(isDocKey("3")));
}
use of io.crate.analyze.relations.DocTableRelation in project crate by crate.
the class AbstractScalarFunctionsTest method prepareFunctions.
@Before
public void prepareFunctions() throws Exception {
DocTableInfo tableInfo = TestingTableInfo.builder(new TableIdent(DocSchemaInfo.NAME, "users"), null).add("id", DataTypes.INTEGER).add("name", DataTypes.STRING).add("tags", new ArrayType(DataTypes.STRING)).add("age", DataTypes.INTEGER).add("a", DataTypes.INTEGER).add("x", DataTypes.LONG).add("shape", DataTypes.GEO_SHAPE).add("timestamp", DataTypes.TIMESTAMP).add("timezone", DataTypes.STRING).add("interval", DataTypes.STRING).add("time_format", DataTypes.STRING).add("long_array", new ArrayType(DataTypes.LONG)).add("int_array", new ArrayType(DataTypes.INTEGER)).add("long_set", new SetType(DataTypes.LONG)).add("regex_pattern", DataTypes.STRING).add("geoshape", DataTypes.GEO_SHAPE).add("geopoint", DataTypes.GEO_POINT).add("geostring", DataTypes.STRING).add("is_awesome", DataTypes.BOOLEAN).add("double_val", DataTypes.DOUBLE).add("float_val", DataTypes.DOUBLE).add("short_val", DataTypes.SHORT).add("obj", DataTypes.OBJECT, ImmutableList.of()).build();
DocTableRelation tableRelation = new DocTableRelation(tableInfo);
tableSources = ImmutableMap.of(new QualifiedName("users"), tableRelation);
sqlExpressions = new SqlExpressions(tableSources);
functions = sqlExpressions.getInstance(Functions.class);
}
use of io.crate.analyze.relations.DocTableRelation in project crate by crate.
the class WhereClauseAnalyzer method resolvePartitions.
/**
* Replace parameters and sub-queries with the related values and analyze the query afterwards.
*/
public static WhereClause resolvePartitions(WhereClause where, AbstractTableRelation<?> tableRelation, CoordinatorTxnCtx coordinatorTxnCtx, NodeContext nodeCtx) {
if (!where.hasQuery() || !(tableRelation instanceof DocTableRelation) || where.query().equals(Literal.BOOLEAN_TRUE)) {
return where;
}
DocTableInfo table = ((DocTableRelation) tableRelation).tableInfo();
if (!table.isPartitioned()) {
return where;
}
if (table.partitions().isEmpty()) {
return WhereClause.NO_MATCH;
}
PartitionResult partitionResult = resolvePartitions(where.queryOrFallback(), table, coordinatorTxnCtx, nodeCtx);
if (!where.partitions().isEmpty() && !partitionResult.partitions.isEmpty() && !partitionResult.partitions.equals(where.partitions())) {
throw new IllegalArgumentException("Given partition ident does not match partition evaluated from where clause");
}
return new WhereClause(partitionResult.query, partitionResult.partitions, where.clusteredBy());
}
use of io.crate.analyze.relations.DocTableRelation in project crate by crate.
the class NestedLoopJoin method build.
@Override
public ExecutionPlan build(PlannerContext plannerContext, Set<PlanHint> hints, ProjectionBuilder projectionBuilder, int limit, int offset, @Nullable OrderBy order, @Nullable Integer pageSizeHint, Row params, SubQueryResults subQueryResults) {
/*
* Benchmarks reveal that if rows are filtered out distributed execution gives better performance.
* Therefore if `filterNeeded` is true (there is joinCondition or a filtering after the join operation)
* then it's a good indication that distributed execution will be faster.
*
* We may at some point add some kind of session-settings to override this behaviour
* or otherwise come up with a better heuristic.
*/
Integer childPageSizeHint = !isFiltered && limit != TopN.NO_LIMIT ? limitAndOffset(limit, offset) : null;
ExecutionPlan left = lhs.build(plannerContext, hints, projectionBuilder, NO_LIMIT, 0, null, childPageSizeHint, params, subQueryResults);
ExecutionPlan right = rhs.build(plannerContext, hints, projectionBuilder, NO_LIMIT, 0, null, childPageSizeHint, params, subQueryResults);
PositionalOrderBy orderByFromLeft = left.resultDescription().orderBy();
boolean hasDocTables = baseTables.stream().anyMatch(r -> r instanceof DocTableRelation);
boolean isDistributed = hasDocTables && isFiltered && !joinType.isOuter();
LogicalPlan leftLogicalPlan = lhs;
LogicalPlan rightLogicalPlan = rhs;
isDistributed = isDistributed && (!left.resultDescription().nodeIds().isEmpty() && !right.resultDescription().nodeIds().isEmpty());
boolean blockNlPossible = !isDistributed && isBlockNlPossible(left, right);
JoinType joinType = this.joinType;
if (!orderByWasPushedDown && joinType.supportsInversion() && (isDistributed && lhs.numExpectedRows() < rhs.numExpectedRows() && orderByFromLeft == null) || (blockNlPossible && lhs.numExpectedRows() > rhs.numExpectedRows())) {
// 1) The right side is always broadcast-ed, so for performance reasons we switch the tables so that
// the right table is the smaller (numOfRows). If left relation has a pushed-down OrderBy that needs
// to be preserved, then the switch is not possible.
// 2) For block nested loop, the left side should always be smaller. Benchmarks have shown that the
// performance decreases if the left side is much larger and no limit is applied.
ExecutionPlan tmpExecutionPlan = left;
left = right;
right = tmpExecutionPlan;
leftLogicalPlan = rhs;
rightLogicalPlan = lhs;
joinType = joinType.invert();
}
Tuple<Collection<String>, List<MergePhase>> joinExecutionNodesAndMergePhases = configureExecution(left, right, plannerContext, isDistributed);
List<Symbol> joinOutputs = Lists2.concat(leftLogicalPlan.outputs(), rightLogicalPlan.outputs());
SubQueryAndParamBinder paramBinder = new SubQueryAndParamBinder(params, subQueryResults);
Symbol joinInput = null;
if (joinCondition != null) {
joinInput = InputColumns.create(paramBinder.apply(joinCondition), joinOutputs);
}
NestedLoopPhase nlPhase = new NestedLoopPhase(plannerContext.jobId(), plannerContext.nextExecutionPhaseId(), isDistributed ? "distributed-nested-loop" : "nested-loop", Collections.singletonList(JoinOperations.createJoinProjection(outputs, joinOutputs)), joinExecutionNodesAndMergePhases.v2().get(0), joinExecutionNodesAndMergePhases.v2().get(1), leftLogicalPlan.outputs().size(), rightLogicalPlan.outputs().size(), joinExecutionNodesAndMergePhases.v1(), joinType, joinInput, Symbols.typeView(leftLogicalPlan.outputs()), leftLogicalPlan.estimatedRowSize(), leftLogicalPlan.numExpectedRows(), blockNlPossible);
return new Join(nlPhase, left, right, TopN.NO_LIMIT, 0, TopN.NO_LIMIT, outputs.size(), orderByFromLeft);
}
Aggregations