use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project calcite by apache.
the class CalcitePrepareImpl method prepare2_.
<T> CalciteSignature<T> prepare2_(Context context, Query<T> query, Type elementType, long maxRowCount, CalciteCatalogReader catalogReader, RelOptPlanner planner) {
final JavaTypeFactory typeFactory = context.getTypeFactory();
final EnumerableRel.Prefer prefer;
if (elementType == Object[].class) {
prefer = EnumerableRel.Prefer.ARRAY;
} else {
prefer = EnumerableRel.Prefer.CUSTOM;
}
final Convention resultConvention = enableBindable ? BindableConvention.INSTANCE : EnumerableConvention.INSTANCE;
final CalcitePreparingStmt preparingStmt = new CalcitePreparingStmt(this, context, catalogReader, typeFactory, context.getRootSchema(), prefer, planner, resultConvention, createConvertletTable());
final RelDataType x;
final Prepare.PreparedResult preparedResult;
final Meta.StatementType statementType;
if (query.sql != null) {
final CalciteConnectionConfig config = context.config();
final SqlParser.ConfigBuilder parserConfig = createParserConfig().setQuotedCasing(config.quotedCasing()).setUnquotedCasing(config.unquotedCasing()).setQuoting(config.quoting()).setConformance(config.conformance()).setCaseSensitive(config.caseSensitive());
final SqlParserImplFactory parserFactory = config.parserFactory(SqlParserImplFactory.class, null);
if (parserFactory != null) {
parserConfig.setParserFactory(parserFactory);
}
SqlParser parser = createParser(query.sql, parserConfig);
SqlNode sqlNode;
try {
sqlNode = parser.parseStmt();
statementType = getStatementType(sqlNode.getKind());
} catch (SqlParseException e) {
throw new RuntimeException("parse failed: " + e.getMessage(), e);
}
Hook.PARSE_TREE.run(new Object[] { query.sql, sqlNode });
if (sqlNode.getKind().belongsTo(SqlKind.DDL)) {
executeDdl(context, sqlNode);
return new CalciteSignature<>(query.sql, ImmutableList.<AvaticaParameter>of(), ImmutableMap.<String, Object>of(), null, ImmutableList.<ColumnMetaData>of(), Meta.CursorFactory.OBJECT, null, ImmutableList.<RelCollation>of(), -1, null, Meta.StatementType.OTHER_DDL);
}
final SqlValidator validator = createSqlValidator(context, catalogReader);
validator.setIdentifierExpansion(true);
validator.setDefaultNullCollation(config.defaultNullCollation());
preparedResult = preparingStmt.prepareSql(sqlNode, Object.class, validator, true);
switch(sqlNode.getKind()) {
case INSERT:
case DELETE:
case UPDATE:
case EXPLAIN:
// FIXME: getValidatedNodeType is wrong for DML
x = RelOptUtil.createDmlRowType(sqlNode.getKind(), typeFactory);
break;
default:
x = validator.getValidatedNodeType(sqlNode);
}
} else if (query.queryable != null) {
x = context.getTypeFactory().createType(elementType);
preparedResult = preparingStmt.prepareQueryable(query.queryable, x);
statementType = getStatementType(preparedResult);
} else {
assert query.rel != null;
x = query.rel.getRowType();
preparedResult = preparingStmt.prepareRel(query.rel);
statementType = getStatementType(preparedResult);
}
final List<AvaticaParameter> parameters = new ArrayList<>();
final RelDataType parameterRowType = preparedResult.getParameterRowType();
for (RelDataTypeField field : parameterRowType.getFieldList()) {
RelDataType type = field.getType();
parameters.add(new AvaticaParameter(false, getPrecision(type), getScale(type), getTypeOrdinal(type), getTypeName(type), getClassName(type), field.getName()));
}
RelDataType jdbcType = makeStruct(typeFactory, x);
final List<List<String>> originList = preparedResult.getFieldOrigins();
final List<ColumnMetaData> columns = getColumnMetaDataList(typeFactory, x, jdbcType, originList);
Class resultClazz = null;
if (preparedResult instanceof Typed) {
resultClazz = (Class) ((Typed) preparedResult).getElementType();
}
final Meta.CursorFactory cursorFactory = preparingStmt.resultConvention == BindableConvention.INSTANCE ? Meta.CursorFactory.ARRAY : Meta.CursorFactory.deduce(columns, resultClazz);
// noinspection unchecked
final Bindable<T> bindable = preparedResult.getBindable(cursorFactory);
return new CalciteSignature<>(query.sql, parameters, preparingStmt.internalParameters, jdbcType, columns, cursorFactory, context.getRootSchema(), preparedResult instanceof Prepare.PreparedResultImpl ? ((Prepare.PreparedResultImpl) preparedResult).collations : ImmutableList.<RelCollation>of(), maxRowCount, bindable, statementType);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project calcite by apache.
the class DruidQuery method getJsonAggregation.
@Nullable
private static JsonAggregation getJsonAggregation(String name, AggregateCall aggCall, RexNode filterNode, String fieldName, String aggExpression, DruidQuery druidQuery) {
final boolean fractional;
final RelDataType type = aggCall.getType();
final SqlTypeName sqlTypeName = type.getSqlTypeName();
final JsonAggregation aggregation;
final CalciteConnectionConfig config = druidQuery.getConnectionConfig();
if (SqlTypeFamily.APPROXIMATE_NUMERIC.getTypeNames().contains(sqlTypeName)) {
fractional = true;
} else if (SqlTypeFamily.INTEGER.getTypeNames().contains(sqlTypeName)) {
fractional = false;
} else if (SqlTypeFamily.EXACT_NUMERIC.getTypeNames().contains(sqlTypeName)) {
// Decimal
assert sqlTypeName == SqlTypeName.DECIMAL;
if (type.getScale() == 0) {
fractional = false;
} else {
fractional = true;
}
} else {
// Cannot handle this aggregate function type
return null;
}
// Convert from a complex metric
ComplexMetric complexMetric = druidQuery.druidTable.resolveComplexMetric(fieldName, aggCall);
switch(aggCall.getAggregation().getKind()) {
case COUNT:
if (aggCall.isDistinct()) {
if (aggCall.isApproximate() || config.approximateDistinctCount()) {
if (complexMetric == null) {
aggregation = new JsonCardinalityAggregation("cardinality", name, ImmutableList.of(fieldName));
} else {
aggregation = new JsonAggregation(complexMetric.getMetricType(), name, complexMetric.getMetricName(), null);
}
break;
} else {
// when approximate results were not told be acceptable.
return null;
}
}
if (aggCall.getArgList().size() == 1 && !aggCall.isDistinct()) {
// case we have count(column) push it as count(*) where column is not null
final DruidJsonFilter matchNulls;
if (fieldName == null) {
matchNulls = new DruidJsonFilter.JsonExpressionFilter(aggExpression + " == null");
} else {
matchNulls = DruidJsonFilter.getSelectorFilter(fieldName, null, null);
}
aggregation = new JsonFilteredAggregation(DruidJsonFilter.toNotDruidFilter(matchNulls), new JsonAggregation("count", name, fieldName, aggExpression));
} else if (!aggCall.isDistinct()) {
aggregation = new JsonAggregation("count", name, fieldName, aggExpression);
} else {
aggregation = null;
}
break;
case SUM:
case SUM0:
aggregation = new JsonAggregation(fractional ? "doubleSum" : "longSum", name, fieldName, aggExpression);
break;
case MIN:
aggregation = new JsonAggregation(fractional ? "doubleMin" : "longMin", name, fieldName, aggExpression);
break;
case MAX:
aggregation = new JsonAggregation(fractional ? "doubleMax" : "longMax", name, fieldName, aggExpression);
break;
default:
return null;
}
if (aggregation == null) {
return null;
}
// translate filters
if (filterNode != null) {
DruidJsonFilter druidFilter = DruidJsonFilter.toDruidFilters(filterNode, druidQuery.table.getRowType(), druidQuery);
if (druidFilter == null) {
// can not translate filter
return null;
}
return new JsonFilteredAggregation(druidFilter, aggregation);
}
return aggregation;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project beam by apache.
the class ZetaSQLQueryPlanner method defaultConfig.
private static FrameworkConfig defaultConfig(JdbcConnection connection, Collection<RuleSet> ruleSets) {
final CalciteConnectionConfig config = connection.config();
final SqlParser.ConfigBuilder parserConfig = SqlParser.configBuilder().setQuotedCasing(config.quotedCasing()).setUnquotedCasing(config.unquotedCasing()).setQuoting(config.quoting()).setConformance(config.conformance()).setCaseSensitive(config.caseSensitive());
final SqlParserImplFactory parserFactory = config.parserFactory(SqlParserImplFactory.class, null);
if (parserFactory != null) {
parserConfig.setParserFactory(parserFactory);
}
final SchemaPlus schema = connection.getRootSchema();
final SchemaPlus defaultSchema = connection.getCurrentSchemaPlus();
final ImmutableList<RelTraitDef> traitDefs = ImmutableList.of(ConventionTraitDef.INSTANCE);
final CalciteCatalogReader catalogReader = new CalciteCatalogReader(CalciteSchema.from(schema), ImmutableList.of(defaultSchema.getName()), connection.getTypeFactory(), connection.config());
final SqlOperatorTable opTab0 = connection.config().fun(SqlOperatorTable.class, SqlStdOperatorTable.instance());
return Frameworks.newConfigBuilder().parserConfig(parserConfig.build()).defaultSchema(defaultSchema).traitDefs(traitDefs).ruleSets(ruleSets.toArray(new RuleSet[0])).costFactory(BeamCostModel.FACTORY).typeSystem(connection.getTypeFactory().getTypeSystem()).operatorTable(SqlOperatorTables.chain(opTab0, catalogReader)).build();
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project hive by apache.
the class CalcitePlanner method createPlanner.
private static RelOptPlanner createPlanner(HiveConf conf, Set<RelNode> corrScalarRexSQWithAgg, Set<RelNode> scalarAggNoGbyNoWin) {
final Double maxSplitSize = (double) HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE);
final Double maxMemory = (double) HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory);
HiveRulesRegistry registry = new HiveRulesRegistry();
Properties calciteConfigProperties = new Properties();
calciteConfigProperties.setProperty(CalciteConnectionProperty.TIME_ZONE.camelName(), conf.getLocalTimeZone().getId());
calciteConfigProperties.setProperty(CalciteConnectionProperty.MATERIALIZATIONS_ENABLED.camelName(), Boolean.FALSE.toString());
CalciteConnectionConfig calciteConfig = new CalciteConnectionConfigImpl(calciteConfigProperties);
boolean isCorrelatedColumns = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_CORRELATED_MULTI_KEY_JOINS);
HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, calciteConfig, corrScalarRexSQWithAgg, scalarAggNoGbyNoWin, new HiveConfPlannerContext(isCorrelatedColumns));
return HiveVolcanoPlanner.createPlanner(confContext);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project hive by apache.
the class CalcitePlanner method createPlanner.
private static RelOptPlanner createPlanner(HiveConf conf, Set<RelNode> corrScalarRexSQWithAgg, StatsSource statsSource, boolean isExplainPlan) {
final Double maxSplitSize = (double) HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE);
final Double maxMemory = (double) HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory);
HiveRulesRegistry registry = new HiveRulesRegistry();
Properties calciteConfigProperties = new Properties();
calciteConfigProperties.setProperty(CalciteConnectionProperty.TIME_ZONE.camelName(), conf.getLocalTimeZone().getId());
calciteConfigProperties.setProperty(CalciteConnectionProperty.MATERIALIZATIONS_ENABLED.camelName(), Boolean.FALSE.toString());
CalciteConnectionConfig calciteConfig = new CalciteConnectionConfigImpl(calciteConfigProperties);
boolean isCorrelatedColumns = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_STATS_CORRELATED_MULTI_KEY_JOINS);
boolean heuristicMaterializationStrategy = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_SELECTION_STRATEGY).equals("heuristic");
HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, calciteConfig, corrScalarRexSQWithAgg, new HiveConfPlannerContext(isCorrelatedColumns, heuristicMaterializationStrategy, isExplainPlan), statsSource);
RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext);
planner.addListener(new RuleEventLogger());
return planner;
}
Aggregations