use of org.apache.calcite.plan.Convention in project calcite by apache.
the class CalcitePrepareImpl method prepare2_.
<T> CalciteSignature<T> prepare2_(Context context, Query<T> query, Type elementType, long maxRowCount, CalciteCatalogReader catalogReader, RelOptPlanner planner) {
final JavaTypeFactory typeFactory = context.getTypeFactory();
final EnumerableRel.Prefer prefer;
if (elementType == Object[].class) {
prefer = EnumerableRel.Prefer.ARRAY;
} else {
prefer = EnumerableRel.Prefer.CUSTOM;
}
final Convention resultConvention = enableBindable ? BindableConvention.INSTANCE : EnumerableConvention.INSTANCE;
final CalcitePreparingStmt preparingStmt = new CalcitePreparingStmt(this, context, catalogReader, typeFactory, context.getRootSchema(), prefer, planner, resultConvention, createConvertletTable());
final RelDataType x;
final Prepare.PreparedResult preparedResult;
final Meta.StatementType statementType;
if (query.sql != null) {
final CalciteConnectionConfig config = context.config();
final SqlParser.ConfigBuilder parserConfig = createParserConfig().setQuotedCasing(config.quotedCasing()).setUnquotedCasing(config.unquotedCasing()).setQuoting(config.quoting()).setConformance(config.conformance()).setCaseSensitive(config.caseSensitive());
final SqlParserImplFactory parserFactory = config.parserFactory(SqlParserImplFactory.class, null);
if (parserFactory != null) {
parserConfig.setParserFactory(parserFactory);
}
SqlParser parser = createParser(query.sql, parserConfig);
SqlNode sqlNode;
try {
sqlNode = parser.parseStmt();
statementType = getStatementType(sqlNode.getKind());
} catch (SqlParseException e) {
throw new RuntimeException("parse failed: " + e.getMessage(), e);
}
Hook.PARSE_TREE.run(new Object[] { query.sql, sqlNode });
if (sqlNode.getKind().belongsTo(SqlKind.DDL)) {
executeDdl(context, sqlNode);
return new CalciteSignature<>(query.sql, ImmutableList.<AvaticaParameter>of(), ImmutableMap.<String, Object>of(), null, ImmutableList.<ColumnMetaData>of(), Meta.CursorFactory.OBJECT, null, ImmutableList.<RelCollation>of(), -1, null, Meta.StatementType.OTHER_DDL);
}
final SqlValidator validator = createSqlValidator(context, catalogReader);
validator.setIdentifierExpansion(true);
validator.setDefaultNullCollation(config.defaultNullCollation());
preparedResult = preparingStmt.prepareSql(sqlNode, Object.class, validator, true);
switch(sqlNode.getKind()) {
case INSERT:
case DELETE:
case UPDATE:
case EXPLAIN:
// FIXME: getValidatedNodeType is wrong for DML
x = RelOptUtil.createDmlRowType(sqlNode.getKind(), typeFactory);
break;
default:
x = validator.getValidatedNodeType(sqlNode);
}
} else if (query.queryable != null) {
x = context.getTypeFactory().createType(elementType);
preparedResult = preparingStmt.prepareQueryable(query.queryable, x);
statementType = getStatementType(preparedResult);
} else {
assert query.rel != null;
x = query.rel.getRowType();
preparedResult = preparingStmt.prepareRel(query.rel);
statementType = getStatementType(preparedResult);
}
final List<AvaticaParameter> parameters = new ArrayList<>();
final RelDataType parameterRowType = preparedResult.getParameterRowType();
for (RelDataTypeField field : parameterRowType.getFieldList()) {
RelDataType type = field.getType();
parameters.add(new AvaticaParameter(false, getPrecision(type), getScale(type), getTypeOrdinal(type), getTypeName(type), getClassName(type), field.getName()));
}
RelDataType jdbcType = makeStruct(typeFactory, x);
final List<List<String>> originList = preparedResult.getFieldOrigins();
final List<ColumnMetaData> columns = getColumnMetaDataList(typeFactory, x, jdbcType, originList);
Class resultClazz = null;
if (preparedResult instanceof Typed) {
resultClazz = (Class) ((Typed) preparedResult).getElementType();
}
final Meta.CursorFactory cursorFactory = preparingStmt.resultConvention == BindableConvention.INSTANCE ? Meta.CursorFactory.ARRAY : Meta.CursorFactory.deduce(columns, resultClazz);
// noinspection unchecked
final Bindable<T> bindable = preparedResult.getBindable(cursorFactory);
return new CalciteSignature<>(query.sql, parameters, preparingStmt.internalParameters, jdbcType, columns, cursorFactory, context.getRootSchema(), preparedResult instanceof Prepare.PreparedResultImpl ? ((Prepare.PreparedResultImpl) preparedResult).collations : ImmutableList.<RelCollation>of(), maxRowCount, bindable, statementType);
}
use of org.apache.calcite.plan.Convention in project calcite by apache.
the class CalcitePrepareImpl method convert_.
private ParseResult convert_(Context context, String sql, boolean analyze, boolean fail, CalciteCatalogReader catalogReader, SqlValidator validator, SqlNode sqlNode1) {
final JavaTypeFactory typeFactory = context.getTypeFactory();
final Convention resultConvention = enableBindable ? BindableConvention.INSTANCE : EnumerableConvention.INSTANCE;
final HepPlanner planner = new HepPlanner(new HepProgramBuilder().build());
planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
final SqlToRelConverter.ConfigBuilder configBuilder = SqlToRelConverter.configBuilder().withTrimUnusedFields(true);
if (analyze) {
configBuilder.withConvertTableAccess(false);
}
final CalcitePreparingStmt preparingStmt = new CalcitePreparingStmt(this, context, catalogReader, typeFactory, context.getRootSchema(), null, planner, resultConvention, createConvertletTable());
final SqlToRelConverter converter = preparingStmt.getSqlToRelConverter(validator, catalogReader, configBuilder.build());
final RelRoot root = converter.convertQuery(sqlNode1, false, true);
if (analyze) {
return analyze_(validator, sql, sqlNode1, root, fail);
}
return new ConvertResult(this, validator, sql, sqlNode1, validator.getValidatedNodeType(sqlNode1), root);
}
use of org.apache.calcite.plan.Convention in project calcite by apache.
the class VolcanoPlanner method registerImpl.
/**
* Registers a new expression <code>exp</code> and queues up rule matches.
* If <code>set</code> is not null, makes the expression part of that
* equivalence set. If an identical expression is already registered, we
* don't need to register this one and nor should we queue up rule matches.
*
* @param rel relational expression to register. Must be either a
* {@link RelSubset}, or an unregistered {@link RelNode}
* @param set set that rel belongs to, or <code>null</code>
* @return the equivalence-set
*/
private RelSubset registerImpl(RelNode rel, RelSet set) {
if (rel instanceof RelSubset) {
return registerSubset(set, (RelSubset) rel);
}
assert !isRegistered(rel) : "already been registered: " + rel;
if (rel.getCluster().getPlanner() != this) {
throw new AssertionError("Relational expression " + rel + " belongs to a different planner than is currently being used.");
}
// Now is a good time to ensure that the relational expression
// implements the interface required by its calling convention.
final RelTraitSet traits = rel.getTraitSet();
final Convention convention = traits.getTrait(ConventionTraitDef.INSTANCE);
assert convention != null;
if (!convention.getInterface().isInstance(rel) && !(rel instanceof Converter)) {
throw new AssertionError("Relational expression " + rel + " has calling-convention " + convention + " but does not implement the required interface '" + convention.getInterface() + "' of that convention");
}
if (traits.size() != traitDefs.size()) {
throw new AssertionError("Relational expression " + rel + " does not have the correct number of traits: " + traits.size() + " != " + traitDefs.size());
}
// Ensure that its sub-expressions are registered.
rel = rel.onRegister(this);
// Record its provenance. (Rule call may be null.)
if (ruleCallStack.isEmpty()) {
provenanceMap.put(rel, Provenance.EMPTY);
} else {
final VolcanoRuleCall ruleCall = ruleCallStack.peek();
provenanceMap.put(rel, new RuleProvenance(ruleCall.rule, ImmutableList.copyOf(ruleCall.rels), ruleCall.id));
}
// If it is equivalent to an existing expression, return the set that
// the equivalent expression belongs to.
Pair<String, RelDataType> key = key(rel);
RelNode equivExp = mapDigestToRel.get(key);
if (equivExp == null) {
// do nothing
} else if (equivExp == rel) {
return getSubset(rel);
} else {
assert RelOptUtil.equal("left", equivExp.getRowType(), "right", rel.getRowType(), Litmus.THROW);
RelSet equivSet = getSet(equivExp);
if (equivSet != null) {
LOGGER.trace("Register: rel#{} is equivalent to {}", rel.getId(), equivExp.getDescription());
return registerSubset(set, getSubset(equivExp));
}
}
// Converters are in the same set as their children.
if (rel instanceof Converter) {
final RelNode input = ((Converter) rel).getInput();
final RelSet childSet = getSet(input);
if ((set != null) && (set != childSet) && (set.equivalentSet == null)) {
LOGGER.trace("Register #{} {} (and merge sets, because it is a conversion)", rel.getId(), rel.getDigest());
merge(set, childSet);
registerCount++;
// expression.
if (fixUpInputs(rel)) {
rel.recomputeDigest();
key = key(rel);
RelNode equivRel = mapDigestToRel.get(key);
if ((equivRel != rel) && (equivRel != null)) {
// make sure this bad rel didn't get into the
// set in any way (fixupInputs will do this but it
// doesn't know if it should so it does it anyway)
set.obliterateRelNode(rel);
// one, and forget about this one.
return getSubset(equivRel);
}
}
} else {
set = childSet;
}
}
// Place the expression in the appropriate equivalence set.
if (set == null) {
set = new RelSet(nextSetId++, Util.minus(RelOptUtil.getVariablesSet(rel), rel.getVariablesSet()), RelOptUtil.getVariablesUsed(rel));
this.allSets.add(set);
}
// merging at the same time.
while (set.equivalentSet != null) {
set = set.equivalentSet;
}
// Allow each rel to register its own rules.
registerClass(rel);
registerCount++;
final int subsetBeforeCount = set.subsets.size();
RelSubset subset = addRelToSet(rel, set);
final RelNode xx = mapDigestToRel.put(key, rel);
assert xx == null || xx == rel : rel.getDigest();
LOGGER.trace("Register {} in {}", rel.getDescription(), subset.getDescription());
// recursively registered its children. If this is the case, we're done.
if (xx != null) {
return subset;
}
// important.
if (rel == this.root) {
ruleQueue.subsetImportances.put(subset, // todo: remove
1.0);
}
for (RelNode input : rel.getInputs()) {
RelSubset childSubset = (RelSubset) input;
childSubset.set.parents.add(rel);
// Child subset is more important now a new parent uses it.
ruleQueue.recompute(childSubset);
}
if (rel == this.root) {
ruleQueue.subsetImportances.remove(subset);
}
// Remember abstract converters until they're satisfied
if (rel instanceof AbstractConverter) {
set.abstractConverters.add((AbstractConverter) rel);
}
// If this set has any unsatisfied converters, try to satisfy them.
checkForSatisfiedConverters(set, rel);
// Make sure this rel's subset importance is updated
ruleQueue.recompute(subset, true);
// Queue up all rules triggered by this relexp's creation.
fireRules(rel, true);
// It's a new subset.
if (set.subsets.size() > subsetBeforeCount) {
fireRules(subset, true);
}
return subset;
}
Aggregations