use of org.apache.calcite.config.CalciteConnectionConfigImpl in project hive by apache.
the class CalcitePlanner method createPlanner.
private static RelOptPlanner createPlanner(HiveConf conf, Set<RelNode> corrScalarRexSQWithAgg, StatsSource statsSource, boolean isExplainPlan) {
final Double maxSplitSize = (double) HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE);
final Double maxMemory = (double) HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory);
HiveRulesRegistry registry = new HiveRulesRegistry();
Properties calciteConfigProperties = new Properties();
calciteConfigProperties.setProperty(CalciteConnectionProperty.TIME_ZONE.camelName(), conf.getLocalTimeZone().getId());
calciteConfigProperties.setProperty(CalciteConnectionProperty.MATERIALIZATIONS_ENABLED.camelName(), Boolean.FALSE.toString());
CalciteConnectionConfig calciteConfig = new CalciteConnectionConfigImpl(calciteConfigProperties);
boolean isCorrelatedColumns = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_STATS_CORRELATED_MULTI_KEY_JOINS);
boolean heuristicMaterializationStrategy = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_SELECTION_STRATEGY).equals("heuristic");
HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, calciteConfig, corrScalarRexSQWithAgg, new HiveConfPlannerContext(isCorrelatedColumns, heuristicMaterializationStrategy, isExplainPlan), statsSource);
RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext);
planner.addListener(new RuleEventLogger());
return planner;
}
use of org.apache.calcite.config.CalciteConnectionConfigImpl in project beam by apache.
the class TableScanConverter method convert.
@Override
public RelNode convert(ResolvedTableScan zetaNode, List<RelNode> inputs) {
List<String> tablePath = getTablePath(zetaNode.getTable());
SchemaPlus defaultSchemaPlus = getConfig().getDefaultSchema();
if (defaultSchemaPlus == null) {
throw new AssertionError("Default schema is null.");
}
// TODO: reject incorrect top-level schema
Table calciteTable = TableResolution.resolveCalciteTable(defaultSchemaPlus, tablePath);
// we already resolved the table before passing the query to Analyzer, so it should be there
checkNotNull(calciteTable, "Unable to resolve the table path %s in schema %s", tablePath, defaultSchemaPlus.getName());
String defaultSchemaName = defaultSchemaPlus.getName();
final CalciteCatalogReader catalogReader = new CalciteCatalogReader(CalciteSchema.from(defaultSchemaPlus), ImmutableList.of(defaultSchemaName), getCluster().getTypeFactory(), new CalciteConnectionConfigImpl(new Properties()));
RelOptTableImpl relOptTable = RelOptTableImpl.create(catalogReader, calciteTable.getRowType(getCluster().getTypeFactory()), calciteTable, ImmutableList.<String>builder().add(defaultSchemaName).addAll(tablePath).build());
if (calciteTable instanceof TranslatableTable) {
return ((TranslatableTable) calciteTable).toRel(createToRelContext(), relOptTable);
} else {
throw new UnsupportedOperationException("Does not support non TranslatableTable type table!");
}
}
use of org.apache.calcite.config.CalciteConnectionConfigImpl in project storm by apache.
the class StormSqlContext method buildFrameWorkConfig.
public FrameworkConfig buildFrameWorkConfig() {
if (hasUdf) {
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema), Collections.emptyList(), typeFactory, new CalciteConnectionConfigImpl(new Properties())));
return Frameworks.newConfigBuilder().defaultSchema(schema).operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build();
} else {
return Frameworks.newConfigBuilder().defaultSchema(schema).build();
}
}
use of org.apache.calcite.config.CalciteConnectionConfigImpl in project calcite by apache.
the class PlannerImpl method createCatalogReader.
// CalciteCatalogReader is stateless; no need to store one
private CalciteCatalogReader createCatalogReader() {
final SchemaPlus rootSchema = rootSchema(defaultSchema);
final Context context = config.getContext();
final CalciteConnectionConfig connectionConfig;
if (context != null) {
connectionConfig = context.unwrap(CalciteConnectionConfig.class);
} else {
Properties properties = new Properties();
properties.setProperty(CalciteConnectionProperty.CASE_SENSITIVE.camelName(), String.valueOf(parserConfig.caseSensitive()));
connectionConfig = new CalciteConnectionConfigImpl(properties);
}
return new CalciteCatalogReader(CalciteSchema.from(rootSchema), CalciteSchema.from(defaultSchema).path(null), typeFactory, connectionConfig);
}
use of org.apache.calcite.config.CalciteConnectionConfigImpl in project calcite by apache.
the class RelOptRulesTest method testExtractYearMonthToRange.
@Test
public void testExtractYearMonthToRange() {
final String sql = "select *\n" + "from sales.emp_b as e\n" + "where extract(year from birthdate) = 2014" + "and extract(month from birthdate) = 4";
HepProgram program = new HepProgramBuilder().addRuleInstance(DateRangeRules.FILTER_INSTANCE).build();
final Context context = Contexts.of(new CalciteConnectionConfigImpl(new Properties()));
sql(sql).with(program).withContext(context).check();
}
Aggregations