use of org.apache.calcite.prepare.CalciteCatalogReader in project beam by apache.
the class TableScanConverter method convert.
@Override
public RelNode convert(ResolvedTableScan zetaNode, List<RelNode> inputs) {
List<String> tablePath = getTablePath(zetaNode.getTable());
SchemaPlus defaultSchemaPlus = getConfig().getDefaultSchema();
if (defaultSchemaPlus == null) {
throw new AssertionError("Default schema is null.");
}
// TODO: reject incorrect top-level schema
Table calciteTable = TableResolution.resolveCalciteTable(defaultSchemaPlus, tablePath);
// we already resolved the table before passing the query to Analyzer, so it should be there
checkNotNull(calciteTable, "Unable to resolve the table path %s in schema %s", tablePath, defaultSchemaPlus.getName());
String defaultSchemaName = defaultSchemaPlus.getName();
final CalciteCatalogReader catalogReader = new CalciteCatalogReader(CalciteSchema.from(defaultSchemaPlus), ImmutableList.of(defaultSchemaName), getCluster().getTypeFactory(), new CalciteConnectionConfigImpl(new Properties()));
RelOptTableImpl relOptTable = RelOptTableImpl.create(catalogReader, calciteTable.getRowType(getCluster().getTypeFactory()), calciteTable, ImmutableList.<String>builder().add(defaultSchemaName).addAll(tablePath).build());
if (calciteTable instanceof TranslatableTable) {
return ((TranslatableTable) calciteTable).toRel(createToRelContext(), relOptTable);
} else {
throw new UnsupportedOperationException("Does not support non TranslatableTable type table!");
}
}
use of org.apache.calcite.prepare.CalciteCatalogReader in project storm by apache.
the class StormSqlContext method buildFrameWorkConfig.
public FrameworkConfig buildFrameWorkConfig() {
if (hasUdf) {
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema), Collections.emptyList(), typeFactory, new CalciteConnectionConfigImpl(new Properties())));
return Frameworks.newConfigBuilder().defaultSchema(schema).operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build();
} else {
return Frameworks.newConfigBuilder().defaultSchema(schema).build();
}
}
use of org.apache.calcite.prepare.CalciteCatalogReader in project druid by druid-io.
the class DruidPlanner method getValidator.
/**
* Constructs an SQL validator, just like papa {@link #planner} uses.
*/
private SqlValidator getValidator() {
// this is sort of lame, planner won't cough up its validator, which is nice and seeded after validating a query,
// but it is private and has no accessors, so make another one so we can get the parameter types... but i suppose
// beats creating our own Prepare and Planner implementations
Preconditions.checkNotNull(planner.getTypeFactory());
final CalciteConnectionConfig connectionConfig;
if (frameworkConfig.getContext() != null) {
connectionConfig = frameworkConfig.getContext().unwrap(CalciteConnectionConfig.class);
} else {
Properties properties = new Properties();
properties.setProperty(CalciteConnectionProperty.CASE_SENSITIVE.camelName(), String.valueOf(PlannerFactory.PARSER_CONFIG.caseSensitive()));
connectionConfig = new CalciteConnectionConfigImpl(properties);
}
Prepare.CatalogReader catalogReader = new CalciteCatalogReader(CalciteSchema.from(frameworkConfig.getDefaultSchema().getParentSchema()), CalciteSchema.from(frameworkConfig.getDefaultSchema()).path(null), planner.getTypeFactory(), connectionConfig);
return SqlValidatorUtil.newValidator(frameworkConfig.getOperatorTable(), catalogReader, planner.getTypeFactory(), DruidConformance.instance());
}
Aggregations