use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.schema.Table in project storm by apache.
the class TestCompilerUtils method sqlOverSimpleEquiJoinTables.
public static CalciteState sqlOverSimpleEquiJoinTables(String sql) throws RelConversionException, ValidationException, SqlParseException {
SchemaPlus schema = Frameworks.createRootSchema(true);
JavaTypeFactory typeFactory = new JavaTypeFactoryImpl(RelDataTypeSystem.DEFAULT);
StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory).field("EMPID", SqlTypeName.INTEGER, new ColumnConstraint.PrimaryKey(SqlMonotonicity.MONOTONIC, SqlParserPos.ZERO)).field("EMPNAME", SqlTypeName.VARCHAR).field("DEPTID", SqlTypeName.INTEGER).build();
StreamableTable streamableTable2 = new CompilerUtil.TableBuilderInfo(typeFactory).field("DEPTID", SqlTypeName.INTEGER, new ColumnConstraint.PrimaryKey(SqlMonotonicity.MONOTONIC, SqlParserPos.ZERO)).field("DEPTNAME", SqlTypeName.VARCHAR).build();
Table table = streamableTable.stream();
Table table2 = streamableTable2.stream();
schema.add("EMP", table);
schema.add("DEPT", table2);
QueryPlanner queryPlanner = new QueryPlanner(schema);
StreamsRel tree = queryPlanner.getPlan(sql);
System.out.println(StormRelUtils.explain(tree, SqlExplainLevel.ALL_ATTRIBUTES));
return new CalciteState(schema, tree);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.schema.Table in project storm by apache.
the class TestCompilerUtils method sqlOverNestedTable.
public static CalciteState sqlOverNestedTable(String sql) throws RelConversionException, ValidationException, SqlParseException {
SchemaPlus schema = Frameworks.createRootSchema(true);
JavaTypeFactory typeFactory = new JavaTypeFactoryImpl(RelDataTypeSystem.DEFAULT);
StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory).field("ID", SqlTypeName.INTEGER, new ColumnConstraint.PrimaryKey(SqlMonotonicity.MONOTONIC, SqlParserPos.ZERO)).field("MAPFIELD", typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true)), true)).field("NESTEDMAPFIELD", typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createMapType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.VARCHAR), true), typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true)), true)), true)).field("ARRAYFIELD", typeFactory.createTypeWithNullability(typeFactory.createArrayType(typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.INTEGER), true), -1L), true)).build();
Table table = streamableTable.stream();
schema.add("FOO", table);
schema.add("BAR", table);
schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
QueryPlanner queryPlanner = new QueryPlanner(schema);
StreamsRel tree = queryPlanner.getPlan(sql);
System.out.println(StormRelUtils.explain(tree, SqlExplainLevel.ALL_ATTRIBUTES));
return new CalciteState(schema, tree);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.schema.Table in project flink by apache.
the class SqlValidatorImpl method validateSequenceValue.
public void validateSequenceValue(SqlValidatorScope scope, SqlIdentifier id) {
// Resolve identifier as a table.
final SqlValidatorScope.ResolvedImpl resolved = new SqlValidatorScope.ResolvedImpl();
scope.resolveTable(id.names, catalogReader.nameMatcher(), SqlValidatorScope.Path.EMPTY, resolved);
if (resolved.count() != 1) {
throw newValidationError(id, RESOURCE.tableNameNotFound(id.toString()));
}
// We've found a table. But is it a sequence?
final SqlValidatorNamespace ns = resolved.only().namespace;
if (ns instanceof TableNamespace) {
final Table table = ns.getTable().unwrap(Table.class);
switch(table.getJdbcTableType()) {
case SEQUENCE:
case TEMPORARY_SEQUENCE:
return;
}
}
throw newValidationError(id, RESOURCE.notASequence(id.toString()));
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.schema.Table in project flink by apache.
the class SqlValidatorImpl method checkRollUpInUsing.
private void checkRollUpInUsing(SqlIdentifier identifier, SqlNode leftOrRight, SqlValidatorScope scope) {
SqlValidatorNamespace namespace = getNamespace(leftOrRight, scope);
if (namespace != null) {
SqlValidatorTable sqlValidatorTable = namespace.getTable();
if (sqlValidatorTable != null) {
Table table = sqlValidatorTable.unwrap(Table.class);
String column = Util.last(identifier.names);
if (table.isRolledUp(column)) {
throw newValidationError(identifier, RESOURCE.rolledUpNotAllowed(column, "USING"));
}
}
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.schema.Table in project beam by apache.
the class RelConverter method createOneRow.
// This function creates a single dummy input row for queries that don't read from a table.
// For example: SELECT "hello"
// The code is copy-pasted from Calcite's LogicalValues.createOneRow() with a single line
// change: SqlTypeName.INTEGER replaced by SqlTypeName.BIGINT.
// Would like to call LogicalValues.createOneRow() directly, but it uses type SqlTypeName.INTEGER
// which corresponds to TypeKind.TYPE_INT32 in ZetaSQL, a type not supported in ZetaSQL
// PRODUCT_EXTERNAL mode. See
// https://github.com/google/zetasql/blob/c610a21ffdc110293c1c7bd255a2674ebc7ec7a8/java/com/google/zetasql/TypeFactory.java#L61
static LogicalValues createOneRow(RelOptCluster cluster) {
final RelDataType rowType = cluster.getTypeFactory().builder().add("ZERO", SqlTypeName.BIGINT).nullable(false).build();
final ImmutableList<ImmutableList<RexLiteral>> tuples = ImmutableList.of(ImmutableList.of(cluster.getRexBuilder().makeExactLiteral(BigDecimal.ZERO, rowType.getFieldList().get(0).getType())));
return LogicalValues.create(cluster, rowType, tuples);
}
Aggregations