Search in sources :

Example 31 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class SqlToOperationConverter method convertViewQuery.

/**
 * Convert the query part of a VIEW statement.
 */
private CatalogView convertViewQuery(SqlNode query, List<SqlNode> fieldNames, Map<String, String> props, String comment) {
    // Put the sql string unparse (getQuotedSqlString()) in front of
    // the node conversion (toQueryOperation()),
    // because before Calcite 1.22.0, during sql-to-rel conversion, the SqlWindow
    // bounds state would be mutated as default when they are null (not specified).
    // This bug is fixed in CALCITE-3877 of Calcite 1.23.0.
    String originalQuery = getQuotedSqlString(query);
    SqlNode validateQuery = flinkPlanner.validate(query);
    // The LATERAL operator was eliminated during sql validation, thus the unparsed SQL
    // does not contain LATERAL which is problematic,
    // the issue was resolved in CALCITE-4077
    // (always treat the table function as implicitly LATERAL).
    String expandedQuery = Expander.create(flinkPlanner).expanded(originalQuery).substitute(this::getQuotedSqlString);
    PlannerQueryOperation operation = toQueryOperation(flinkPlanner, validateQuery);
    ResolvedSchema schema = operation.getResolvedSchema();
    // the column name with the names in view column list.
    if (!fieldNames.isEmpty()) {
        // alias column names:
        List<String> inputFieldNames = schema.getColumnNames();
        List<String> aliasFieldNames = fieldNames.stream().map(SqlNode::toString).collect(Collectors.toList());
        if (inputFieldNames.size() != aliasFieldNames.size()) {
            throw new ValidationException(String.format("VIEW definition and input fields not match:\n\tDef fields: %s.\n\tInput fields: %s.", aliasFieldNames, inputFieldNames));
        }
        schema = ResolvedSchema.physical(aliasFieldNames, schema.getColumnDataTypes());
    }
    return CatalogView.of(Schema.newBuilder().fromResolvedSchema(schema).build(), comment, originalQuery, expandedQuery, props);
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) SqlNode(org.apache.calcite.sql.SqlNode)

Example 32 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class DynamicTableSourceSpecSerdeTest method testDynamicTableSinkSpecSerde.

public static Stream<DynamicTableSourceSpec> testDynamicTableSinkSpecSerde() {
    Map<String, String> options1 = new HashMap<>();
    options1.put("connector", FileSystemTableFactory.IDENTIFIER);
    options1.put("format", TestCsvFormatFactory.IDENTIFIER);
    options1.put("path", "/tmp");
    final ResolvedSchema resolvedSchema1 = new ResolvedSchema(Collections.singletonList(Column.physical("a", DataTypes.BIGINT())), Collections.emptyList(), null);
    final CatalogTable catalogTable1 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build(), null, Collections.emptyList(), options1);
    DynamicTableSourceSpec spec1 = new DynamicTableSourceSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable1, resolvedSchema1)), null);
    Map<String, String> options2 = new HashMap<>();
    options2.put("connector", TestValuesTableFactory.IDENTIFIER);
    options2.put("disable-lookup", "true");
    options2.put("enable-watermark-push-down", "true");
    options2.put("filterable-fields", "b");
    options2.put("bounded", "false");
    options2.put("readable-metadata", "m1:INT, m2:STRING");
    final ResolvedSchema resolvedSchema2 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.physical("c", DataTypes.STRING()), Column.physical("p", DataTypes.STRING()), Column.metadata("m1", DataTypes.INT(), null, false), Column.metadata("m2", DataTypes.STRING(), null, false), Column.physical("ts", DataTypes.TIMESTAMP(3))), Collections.emptyList(), null);
    final CatalogTable catalogTable2 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema2).build(), null, Collections.emptyList(), options2);
    FlinkTypeFactory factory = FlinkTypeFactory.INSTANCE();
    RexBuilder rexBuilder = new RexBuilder(factory);
    DynamicTableSourceSpec spec2 = new DynamicTableSourceSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable2, resolvedSchema2)), Arrays.asList(new ProjectPushDownSpec(new int[][] { { 0 }, { 1 }, { 4 }, { 6 } }, RowType.of(new LogicalType[] { new BigIntType(), new IntType(), new IntType(), new TimestampType(3) }, new String[] { "a", "b", "m1", "ts" })), new ReadingMetadataSpec(Arrays.asList("m1", "m2"), RowType.of(new LogicalType[] { new BigIntType(), new IntType(), new IntType(), new TimestampType(3) }, new String[] { "a", "b", "m1", "ts" })), new FilterPushDownSpec(Collections.singletonList(// b >= 10
    rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL, rexBuilder.makeInputRef(factory.createSqlType(SqlTypeName.INTEGER), 1), rexBuilder.makeExactLiteral(new BigDecimal(10))))), new WatermarkPushDownSpec(rexBuilder.makeCall(SqlStdOperatorTable.MINUS, rexBuilder.makeInputRef(factory.createSqlType(SqlTypeName.TIMESTAMP, 3), 3), rexBuilder.makeIntervalLiteral(BigDecimal.valueOf(1000), new SqlIntervalQualifier(TimeUnit.SECOND, 2, TimeUnit.SECOND, 6, SqlParserPos.ZERO))), 5000, RowType.of(new BigIntType(), new IntType(), new IntType(), new TimestampType(false, TimestampKind.ROWTIME, 3))), new SourceWatermarkSpec(true, RowType.of(new BigIntType(), new IntType(), new IntType(), new TimestampType(false, TimestampKind.ROWTIME, 3))), new LimitPushDownSpec(100), new PartitionPushDownSpec(Arrays.asList(new HashMap<String, String>() {

        {
            put("p", "A");
        }
    }, new HashMap<String, String>() {

        {
            put("p", "B");
        }
    }))));
    return Stream.of(spec1, spec2);
}
Also used : WatermarkPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.WatermarkPushDownSpec) HashMap(java.util.HashMap) ProjectPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.ProjectPushDownSpec) SqlIntervalQualifier(org.apache.calcite.sql.SqlIntervalQualifier) DynamicTableSourceSpec(org.apache.flink.table.planner.plan.nodes.exec.spec.DynamicTableSourceSpec) LogicalType(org.apache.flink.table.types.logical.LogicalType) BigIntType(org.apache.flink.table.types.logical.BigIntType) CatalogTable(org.apache.flink.table.catalog.CatalogTable) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) ReadingMetadataSpec(org.apache.flink.table.planner.plan.abilities.source.ReadingMetadataSpec) BigDecimal(java.math.BigDecimal) IntType(org.apache.flink.table.types.logical.IntType) BigIntType(org.apache.flink.table.types.logical.BigIntType) SourceWatermarkSpec(org.apache.flink.table.planner.plan.abilities.source.SourceWatermarkSpec) LimitPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.LimitPushDownSpec) PartitionPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.PartitionPushDownSpec) FilterPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.FilterPushDownSpec) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) RexBuilder(org.apache.calcite.rex.RexBuilder) TimestampType(org.apache.flink.table.types.logical.TimestampType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 33 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class DynamicTableSinkSpecSerdeTest method testDynamicTableSinkSpecSerde.

static Stream<DynamicTableSinkSpec> testDynamicTableSinkSpecSerde() {
    Map<String, String> options1 = new HashMap<>();
    options1.put("connector", FileSystemTableFactory.IDENTIFIER);
    options1.put("format", TestCsvFormatFactory.IDENTIFIER);
    options1.put("path", "/tmp");
    final ResolvedSchema resolvedSchema1 = new ResolvedSchema(Collections.singletonList(Column.physical("a", DataTypes.BIGINT())), Collections.emptyList(), null);
    final CatalogTable catalogTable1 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build(), null, Collections.emptyList(), options1);
    DynamicTableSinkSpec spec1 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable1, resolvedSchema1)), null);
    Map<String, String> options2 = new HashMap<>();
    options2.put("connector", FileSystemTableFactory.IDENTIFIER);
    options2.put("format", TestCsvFormatFactory.IDENTIFIER);
    options2.put("path", "/tmp");
    final ResolvedSchema resolvedSchema2 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.physical("p", DataTypes.STRING())), Collections.emptyList(), null);
    final CatalogTable catalogTable2 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema2).build(), null, Collections.emptyList(), options2);
    DynamicTableSinkSpec spec2 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable2, resolvedSchema2)), Arrays.asList(new OverwriteSpec(true), new PartitioningSpec(new HashMap<String, String>() {

        {
            put("p", "A");
        }
    })));
    Map<String, String> options3 = new HashMap<>();
    options3.put("connector", TestValuesTableFactory.IDENTIFIER);
    options3.put("writable-metadata", "m:STRING");
    final ResolvedSchema resolvedSchema3 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.metadata("m", DataTypes.STRING(), null, false)), Collections.emptyList(), null);
    final CatalogTable catalogTable3 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema3).build(), null, Collections.emptyList(), options3);
    DynamicTableSinkSpec spec3 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable3, resolvedSchema3)), Collections.singletonList(new WritingMetadataSpec(Collections.singletonList("m"), RowType.of(new BigIntType(), new IntType()))));
    return Stream.of(spec1, spec2, spec3);
}
Also used : WritingMetadataSpec(org.apache.flink.table.planner.plan.abilities.sink.WritingMetadataSpec) HashMap(java.util.HashMap) BigIntType(org.apache.flink.table.types.logical.BigIntType) OverwriteSpec(org.apache.flink.table.planner.plan.abilities.sink.OverwriteSpec) CatalogTable(org.apache.flink.table.catalog.CatalogTable) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) DynamicTableSinkSpec(org.apache.flink.table.planner.plan.nodes.exec.spec.DynamicTableSinkSpec) PartitioningSpec(org.apache.flink.table.planner.plan.abilities.sink.PartitioningSpec) IntType(org.apache.flink.table.types.logical.IntType) BigIntType(org.apache.flink.table.types.logical.BigIntType) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 34 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class AliasOperationUtils method createAliasList.

/**
 * Creates a list of valid alias expressions. Resulting expression might still contain {@link
 * UnresolvedReferenceExpression}.
 *
 * @param aliases aliases to validate
 * @param child relational operation on top of which to apply the aliases
 * @return validated list of aliases
 */
static List<Expression> createAliasList(List<Expression> aliases, QueryOperation child) {
    ResolvedSchema childSchema = child.getResolvedSchema();
    if (aliases.size() > childSchema.getColumnCount()) {
        throw new ValidationException("Aliasing more fields than we actually have.");
    }
    List<ValueLiteralExpression> fieldAliases = aliases.stream().map(f -> f.accept(aliasLiteralValidator)).collect(Collectors.toList());
    List<String> childNames = childSchema.getColumnNames();
    return IntStream.range(0, childNames.size()).mapToObj(idx -> {
        UnresolvedReferenceExpression oldField = unresolvedRef(childNames.get(idx));
        if (idx < fieldAliases.size()) {
            ValueLiteralExpression alias = fieldAliases.get(idx);
            return unresolvedCall(BuiltInFunctionDefinitions.AS, oldField, alias);
        } else {
            return oldField;
        }
    }).collect(Collectors.toList());
}
Also used : IntStream(java.util.stream.IntStream) QueryOperation(org.apache.flink.table.operations.QueryOperation) ApiExpressionUtils.valueLiteral(org.apache.flink.table.expressions.ApiExpressionUtils.valueLiteral) UnresolvedReferenceExpression(org.apache.flink.table.expressions.UnresolvedReferenceExpression) ExpressionUtils(org.apache.flink.table.expressions.ExpressionUtils) ApiExpressionDefaultVisitor(org.apache.flink.table.expressions.utils.ApiExpressionDefaultVisitor) BuiltInFunctionDefinitions(org.apache.flink.table.functions.BuiltInFunctionDefinitions) ApiExpressionUtils.unresolvedCall(org.apache.flink.table.expressions.ApiExpressionUtils.unresolvedCall) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Expression(org.apache.flink.table.expressions.Expression) ValueLiteralExpression(org.apache.flink.table.expressions.ValueLiteralExpression) Collectors(java.util.stream.Collectors) ApiExpressionUtils.unresolvedRef(org.apache.flink.table.expressions.ApiExpressionUtils.unresolvedRef) List(java.util.List) ValidationException(org.apache.flink.table.api.ValidationException) Internal(org.apache.flink.annotation.Internal) ValidationException(org.apache.flink.table.api.ValidationException) ValueLiteralExpression(org.apache.flink.table.expressions.ValueLiteralExpression) UnresolvedReferenceExpression(org.apache.flink.table.expressions.UnresolvedReferenceExpression) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 35 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class QueryOperationTest method testWindowAggregationSummaryString.

@Test
public void testWindowAggregationSummaryString() {
    ResolvedSchema schema = ResolvedSchema.physical(Collections.singletonList("a"), Collections.singletonList(DataTypes.INT()));
    FieldReferenceExpression field = new FieldReferenceExpression("a", DataTypes.INT(), 0, 0);
    WindowAggregateQueryOperation tableOperation = new WindowAggregateQueryOperation(Collections.singletonList(field), Collections.singletonList(CallExpression.permanent(BuiltInFunctionDefinitions.SUM, Collections.singletonList(field), DataTypes.INT())), Collections.emptyList(), WindowAggregateQueryOperation.ResolvedGroupWindow.sessionWindow("w", field, intervalOfMillis(10)), new SourceQueryOperation(ContextResolvedTable.temporary(ObjectIdentifier.of("cat1", "db1", "tab1"), new ResolvedCatalogTable(CatalogTable.of(Schema.newBuilder().build(), null, Collections.emptyList(), Collections.emptyMap()), schema))), schema);
    DistinctQueryOperation distinctQueryOperation = new DistinctQueryOperation(tableOperation);
    assertEquals("Distinct:\n" + "    WindowAggregate: (group: [a], agg: [sum(a)], windowProperties: []," + " window: [SessionWindow(field: [a], gap: [10])])\n" + "        CatalogTable: (identifier: [cat1.db1.tab1], fields: [a])", distinctQueryOperation.asSummaryString());
}
Also used : ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) FieldReferenceExpression(org.apache.flink.table.expressions.FieldReferenceExpression) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Aggregations

ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)84 Test (org.junit.Test)50 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)20 DataType (org.apache.flink.table.types.DataType)20 RowData (org.apache.flink.table.data.RowData)17 ValidationException (org.apache.flink.table.api.ValidationException)14 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)14 List (java.util.List)11 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)11 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)10 Column (org.apache.flink.table.catalog.Column)9 LogicalType (org.apache.flink.table.types.logical.LogicalType)9 RowType (org.apache.flink.table.types.logical.RowType)9 HashMap (java.util.HashMap)8 Collectors (java.util.stream.Collectors)8 RelDataType (org.apache.calcite.rel.type.RelDataType)8 Internal (org.apache.flink.annotation.Internal)8 HBaseWriteOptions (org.apache.flink.connector.hbase.options.HBaseWriteOptions)6 FlinkTypeFactory (org.apache.flink.table.planner.calcite.FlinkTypeFactory)6 Row (org.apache.flink.types.Row)6