Search in sources :

Example 6 with CalciteConnectionConfig

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project calcite by apache.

the class CalcitePrepareImpl method prepare2_.

<T> CalciteSignature<T> prepare2_(Context context, Query<T> query, Type elementType, long maxRowCount, CalciteCatalogReader catalogReader, RelOptPlanner planner) {
    final JavaTypeFactory typeFactory = context.getTypeFactory();
    final EnumerableRel.Prefer prefer;
    if (elementType == Object[].class) {
        prefer = EnumerableRel.Prefer.ARRAY;
    } else {
        prefer = EnumerableRel.Prefer.CUSTOM;
    }
    final Convention resultConvention = enableBindable ? BindableConvention.INSTANCE : EnumerableConvention.INSTANCE;
    final CalcitePreparingStmt preparingStmt = new CalcitePreparingStmt(this, context, catalogReader, typeFactory, context.getRootSchema(), prefer, planner, resultConvention, createConvertletTable());
    final RelDataType x;
    final Prepare.PreparedResult preparedResult;
    final Meta.StatementType statementType;
    if (query.sql != null) {
        final CalciteConnectionConfig config = context.config();
        final SqlParser.ConfigBuilder parserConfig = createParserConfig().setQuotedCasing(config.quotedCasing()).setUnquotedCasing(config.unquotedCasing()).setQuoting(config.quoting()).setConformance(config.conformance()).setCaseSensitive(config.caseSensitive());
        final SqlParserImplFactory parserFactory = config.parserFactory(SqlParserImplFactory.class, null);
        if (parserFactory != null) {
            parserConfig.setParserFactory(parserFactory);
        }
        SqlParser parser = createParser(query.sql, parserConfig);
        SqlNode sqlNode;
        try {
            sqlNode = parser.parseStmt();
            statementType = getStatementType(sqlNode.getKind());
        } catch (SqlParseException e) {
            throw new RuntimeException("parse failed: " + e.getMessage(), e);
        }
        Hook.PARSE_TREE.run(new Object[] { query.sql, sqlNode });
        if (sqlNode.getKind().belongsTo(SqlKind.DDL)) {
            executeDdl(context, sqlNode);
            return new CalciteSignature<>(query.sql, ImmutableList.<AvaticaParameter>of(), ImmutableMap.<String, Object>of(), null, ImmutableList.<ColumnMetaData>of(), Meta.CursorFactory.OBJECT, null, ImmutableList.<RelCollation>of(), -1, null, Meta.StatementType.OTHER_DDL);
        }
        final SqlValidator validator = createSqlValidator(context, catalogReader);
        validator.setIdentifierExpansion(true);
        validator.setDefaultNullCollation(config.defaultNullCollation());
        preparedResult = preparingStmt.prepareSql(sqlNode, Object.class, validator, true);
        switch(sqlNode.getKind()) {
            case INSERT:
            case DELETE:
            case UPDATE:
            case EXPLAIN:
                // FIXME: getValidatedNodeType is wrong for DML
                x = RelOptUtil.createDmlRowType(sqlNode.getKind(), typeFactory);
                break;
            default:
                x = validator.getValidatedNodeType(sqlNode);
        }
    } else if (query.queryable != null) {
        x = context.getTypeFactory().createType(elementType);
        preparedResult = preparingStmt.prepareQueryable(query.queryable, x);
        statementType = getStatementType(preparedResult);
    } else {
        assert query.rel != null;
        x = query.rel.getRowType();
        preparedResult = preparingStmt.prepareRel(query.rel);
        statementType = getStatementType(preparedResult);
    }
    final List<AvaticaParameter> parameters = new ArrayList<>();
    final RelDataType parameterRowType = preparedResult.getParameterRowType();
    for (RelDataTypeField field : parameterRowType.getFieldList()) {
        RelDataType type = field.getType();
        parameters.add(new AvaticaParameter(false, getPrecision(type), getScale(type), getTypeOrdinal(type), getTypeName(type), getClassName(type), field.getName()));
    }
    RelDataType jdbcType = makeStruct(typeFactory, x);
    final List<List<String>> originList = preparedResult.getFieldOrigins();
    final List<ColumnMetaData> columns = getColumnMetaDataList(typeFactory, x, jdbcType, originList);
    Class resultClazz = null;
    if (preparedResult instanceof Typed) {
        resultClazz = (Class) ((Typed) preparedResult).getElementType();
    }
    final Meta.CursorFactory cursorFactory = preparingStmt.resultConvention == BindableConvention.INSTANCE ? Meta.CursorFactory.ARRAY : Meta.CursorFactory.deduce(columns, resultClazz);
    // noinspection unchecked
    final Bindable<T> bindable = preparedResult.getBindable(cursorFactory);
    return new CalciteSignature<>(query.sql, parameters, preparingStmt.internalParameters, jdbcType, columns, cursorFactory, context.getRootSchema(), preparedResult instanceof Prepare.PreparedResultImpl ? ((Prepare.PreparedResultImpl) preparedResult).collations : ImmutableList.<RelCollation>of(), maxRowCount, bindable, statementType);
}
Also used : Meta(org.apache.calcite.avatica.Meta) ArrayList(java.util.ArrayList) RelDataType(org.apache.calcite.rel.type.RelDataType) AvaticaParameter(org.apache.calcite.avatica.AvaticaParameter) Typed(org.apache.calcite.runtime.Typed) SqlValidator(org.apache.calcite.sql.validate.SqlValidator) JavaTypeFactory(org.apache.calcite.adapter.java.JavaTypeFactory) CalcitePrepare(org.apache.calcite.jdbc.CalcitePrepare) SqlParserImplFactory(org.apache.calcite.sql.parser.SqlParserImplFactory) ArrayList(java.util.ArrayList) ImmutableIntList(org.apache.calcite.util.ImmutableIntList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) ColumnMetaData(org.apache.calcite.avatica.ColumnMetaData) EnumerableRel(org.apache.calcite.adapter.enumerable.EnumerableRel) SqlNode(org.apache.calcite.sql.SqlNode) SqlParseException(org.apache.calcite.sql.parser.SqlParseException) CalciteConnectionConfig(org.apache.calcite.config.CalciteConnectionConfig) SqlParser(org.apache.calcite.sql.parser.SqlParser) BindableConvention(org.apache.calcite.interpreter.BindableConvention) Convention(org.apache.calcite.plan.Convention) EnumerableConvention(org.apache.calcite.adapter.enumerable.EnumerableConvention) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelCollation(org.apache.calcite.rel.RelCollation)

Example 7 with CalciteConnectionConfig

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project calcite by apache.

the class DruidQuery method getJsonAggregation.

@Nullable
private static JsonAggregation getJsonAggregation(String name, AggregateCall aggCall, RexNode filterNode, String fieldName, String aggExpression, DruidQuery druidQuery) {
    final boolean fractional;
    final RelDataType type = aggCall.getType();
    final SqlTypeName sqlTypeName = type.getSqlTypeName();
    final JsonAggregation aggregation;
    final CalciteConnectionConfig config = druidQuery.getConnectionConfig();
    if (SqlTypeFamily.APPROXIMATE_NUMERIC.getTypeNames().contains(sqlTypeName)) {
        fractional = true;
    } else if (SqlTypeFamily.INTEGER.getTypeNames().contains(sqlTypeName)) {
        fractional = false;
    } else if (SqlTypeFamily.EXACT_NUMERIC.getTypeNames().contains(sqlTypeName)) {
        // Decimal
        assert sqlTypeName == SqlTypeName.DECIMAL;
        if (type.getScale() == 0) {
            fractional = false;
        } else {
            fractional = true;
        }
    } else {
        // Cannot handle this aggregate function type
        return null;
    }
    // Convert from a complex metric
    ComplexMetric complexMetric = druidQuery.druidTable.resolveComplexMetric(fieldName, aggCall);
    switch(aggCall.getAggregation().getKind()) {
        case COUNT:
            if (aggCall.isDistinct()) {
                if (aggCall.isApproximate() || config.approximateDistinctCount()) {
                    if (complexMetric == null) {
                        aggregation = new JsonCardinalityAggregation("cardinality", name, ImmutableList.of(fieldName));
                    } else {
                        aggregation = new JsonAggregation(complexMetric.getMetricType(), name, complexMetric.getMetricName(), null);
                    }
                    break;
                } else {
                    // when approximate results were not told be acceptable.
                    return null;
                }
            }
            if (aggCall.getArgList().size() == 1 && !aggCall.isDistinct()) {
                // case we have count(column) push it as count(*) where column is not null
                final DruidJsonFilter matchNulls;
                if (fieldName == null) {
                    matchNulls = new DruidJsonFilter.JsonExpressionFilter(aggExpression + " == null");
                } else {
                    matchNulls = DruidJsonFilter.getSelectorFilter(fieldName, null, null);
                }
                aggregation = new JsonFilteredAggregation(DruidJsonFilter.toNotDruidFilter(matchNulls), new JsonAggregation("count", name, fieldName, aggExpression));
            } else if (!aggCall.isDistinct()) {
                aggregation = new JsonAggregation("count", name, fieldName, aggExpression);
            } else {
                aggregation = null;
            }
            break;
        case SUM:
        case SUM0:
            aggregation = new JsonAggregation(fractional ? "doubleSum" : "longSum", name, fieldName, aggExpression);
            break;
        case MIN:
            aggregation = new JsonAggregation(fractional ? "doubleMin" : "longMin", name, fieldName, aggExpression);
            break;
        case MAX:
            aggregation = new JsonAggregation(fractional ? "doubleMax" : "longMax", name, fieldName, aggExpression);
            break;
        default:
            return null;
    }
    if (aggregation == null) {
        return null;
    }
    // translate filters
    if (filterNode != null) {
        DruidJsonFilter druidFilter = DruidJsonFilter.toDruidFilters(filterNode, druidQuery.table.getRowType(), druidQuery);
        if (druidFilter == null) {
            // can not translate filter
            return null;
        }
        return new JsonFilteredAggregation(druidFilter, aggregation);
    }
    return aggregation;
}
Also used : SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) CalciteConnectionConfig(org.apache.calcite.config.CalciteConnectionConfig) RelDataType(org.apache.calcite.rel.type.RelDataType) Nullable(javax.annotation.Nullable)

Example 8 with CalciteConnectionConfig

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project beam by apache.

the class ZetaSQLQueryPlanner method defaultConfig.

private static FrameworkConfig defaultConfig(JdbcConnection connection, Collection<RuleSet> ruleSets) {
    final CalciteConnectionConfig config = connection.config();
    final SqlParser.ConfigBuilder parserConfig = SqlParser.configBuilder().setQuotedCasing(config.quotedCasing()).setUnquotedCasing(config.unquotedCasing()).setQuoting(config.quoting()).setConformance(config.conformance()).setCaseSensitive(config.caseSensitive());
    final SqlParserImplFactory parserFactory = config.parserFactory(SqlParserImplFactory.class, null);
    if (parserFactory != null) {
        parserConfig.setParserFactory(parserFactory);
    }
    final SchemaPlus schema = connection.getRootSchema();
    final SchemaPlus defaultSchema = connection.getCurrentSchemaPlus();
    final ImmutableList<RelTraitDef> traitDefs = ImmutableList.of(ConventionTraitDef.INSTANCE);
    final CalciteCatalogReader catalogReader = new CalciteCatalogReader(CalciteSchema.from(schema), ImmutableList.of(defaultSchema.getName()), connection.getTypeFactory(), connection.config());
    final SqlOperatorTable opTab0 = connection.config().fun(SqlOperatorTable.class, SqlStdOperatorTable.instance());
    return Frameworks.newConfigBuilder().parserConfig(parserConfig.build()).defaultSchema(defaultSchema).traitDefs(traitDefs).ruleSets(ruleSets.toArray(new RuleSet[0])).costFactory(BeamCostModel.FACTORY).typeSystem(connection.getTypeFactory().getTypeSystem()).operatorTable(SqlOperatorTables.chain(opTab0, catalogReader)).build();
}
Also used : RuleSet(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.tools.RuleSet) CalciteCatalogReader(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.prepare.CalciteCatalogReader) CalciteConnectionConfig(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig) RelTraitDef(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelTraitDef) SqlOperatorTable(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperatorTable) SqlParser(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.parser.SqlParser) SchemaPlus(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.schema.SchemaPlus) SqlParserImplFactory(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.parser.SqlParserImplFactory)

Example 9 with CalciteConnectionConfig

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project hive by apache.

the class CalcitePlanner method createPlanner.

private static RelOptPlanner createPlanner(HiveConf conf, Set<RelNode> corrScalarRexSQWithAgg, Set<RelNode> scalarAggNoGbyNoWin) {
    final Double maxSplitSize = (double) HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE);
    final Double maxMemory = (double) HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
    HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory);
    HiveRulesRegistry registry = new HiveRulesRegistry();
    Properties calciteConfigProperties = new Properties();
    calciteConfigProperties.setProperty(CalciteConnectionProperty.TIME_ZONE.camelName(), conf.getLocalTimeZone().getId());
    calciteConfigProperties.setProperty(CalciteConnectionProperty.MATERIALIZATIONS_ENABLED.camelName(), Boolean.FALSE.toString());
    CalciteConnectionConfig calciteConfig = new CalciteConnectionConfigImpl(calciteConfigProperties);
    boolean isCorrelatedColumns = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_CORRELATED_MULTI_KEY_JOINS);
    HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, calciteConfig, corrScalarRexSQWithAgg, scalarAggNoGbyNoWin, new HiveConfPlannerContext(isCorrelatedColumns));
    return HiveVolcanoPlanner.createPlanner(confContext);
}
Also used : CalciteConnectionConfigImpl(org.apache.calcite.config.CalciteConnectionConfigImpl) HivePlannerContext(org.apache.hadoop.hive.ql.optimizer.calcite.HivePlannerContext) HiveRulesRegistry(org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry) CalciteConnectionConfig(org.apache.calcite.config.CalciteConnectionConfig) HiveConfPlannerContext(org.apache.hadoop.hive.ql.optimizer.calcite.HiveConfPlannerContext) QueryProperties(org.apache.hadoop.hive.ql.QueryProperties) Properties(java.util.Properties) HiveAlgorithmsConf(org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf)

Example 10 with CalciteConnectionConfig

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project hive by apache.

the class CalcitePlanner method createPlanner.

private static RelOptPlanner createPlanner(HiveConf conf, Set<RelNode> corrScalarRexSQWithAgg, StatsSource statsSource, boolean isExplainPlan) {
    final Double maxSplitSize = (double) HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE);
    final Double maxMemory = (double) HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
    HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory);
    HiveRulesRegistry registry = new HiveRulesRegistry();
    Properties calciteConfigProperties = new Properties();
    calciteConfigProperties.setProperty(CalciteConnectionProperty.TIME_ZONE.camelName(), conf.getLocalTimeZone().getId());
    calciteConfigProperties.setProperty(CalciteConnectionProperty.MATERIALIZATIONS_ENABLED.camelName(), Boolean.FALSE.toString());
    CalciteConnectionConfig calciteConfig = new CalciteConnectionConfigImpl(calciteConfigProperties);
    boolean isCorrelatedColumns = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_STATS_CORRELATED_MULTI_KEY_JOINS);
    boolean heuristicMaterializationStrategy = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_SELECTION_STRATEGY).equals("heuristic");
    HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, calciteConfig, corrScalarRexSQWithAgg, new HiveConfPlannerContext(isCorrelatedColumns, heuristicMaterializationStrategy, isExplainPlan), statsSource);
    RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext);
    planner.addListener(new RuleEventLogger());
    return planner;
}
Also used : CalciteConnectionConfigImpl(org.apache.calcite.config.CalciteConnectionConfigImpl) HivePlannerContext(org.apache.hadoop.hive.ql.optimizer.calcite.HivePlannerContext) HiveRulesRegistry(org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry) CalciteConnectionConfig(org.apache.calcite.config.CalciteConnectionConfig) HiveConfPlannerContext(org.apache.hadoop.hive.ql.optimizer.calcite.HiveConfPlannerContext) QueryProperties(org.apache.hadoop.hive.ql.QueryProperties) Properties(java.util.Properties) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) HiveAlgorithmsConf(org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf) RuleEventLogger(org.apache.hadoop.hive.ql.optimizer.calcite.RuleEventLogger)

Aggregations

CalciteConnectionConfig (org.apache.calcite.config.CalciteConnectionConfig)14 RelDataType (org.apache.calcite.rel.type.RelDataType)5 SqlNode (org.apache.calcite.sql.SqlNode)5 Properties (java.util.Properties)4 DataContext (org.apache.calcite.DataContext)4 CalciteConnectionConfigImpl (org.apache.calcite.config.CalciteConnectionConfigImpl)4 RelDataTypeFactory (org.apache.calcite.rel.type.RelDataTypeFactory)4 ScannableTable (org.apache.calcite.schema.ScannableTable)4 Schema (org.apache.calcite.schema.Schema)4 SqlCall (org.apache.calcite.sql.SqlCall)4 ImmutableList (com.google.common.collect.ImmutableList)2 ArrayList (java.util.ArrayList)2 List (java.util.List)2 CalciteConnectionConfig (org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig)2 RelTraitDef (org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelTraitDef)2 CalciteCatalogReader (org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.prepare.CalciteCatalogReader)2 SchemaPlus (org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.schema.SchemaPlus)2 SqlOperatorTable (org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperatorTable)2 SqlParser (org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.parser.SqlParser)2 SqlParserImplFactory (org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.parser.SqlParserImplFactory)2