Search in sources :

Example 1 with Column

use of org.apache.flink.table.catalog.Column in project flink by apache.

the class ElasticsearchDynamicSinkFactoryBase method getPrimaryKeyLogicalTypesWithIndex.

List<LogicalTypeWithIndex> getPrimaryKeyLogicalTypesWithIndex(Context context) {
    DataType physicalRowDataType = context.getPhysicalRowDataType();
    int[] primaryKeyIndexes = context.getPrimaryKeyIndexes();
    if (primaryKeyIndexes.length != 0) {
        DataType pkDataType = Projection.of(primaryKeyIndexes).project(physicalRowDataType);
        ElasticsearchValidationUtils.validatePrimaryKey(pkDataType);
    }
    ResolvedSchema resolvedSchema = context.getCatalogTable().getResolvedSchema();
    return Arrays.stream(primaryKeyIndexes).mapToObj(index -> {
        Optional<Column> column = resolvedSchema.getColumn(index);
        if (!column.isPresent()) {
            throw new IllegalStateException(String.format("No primary key column found with index '%s'.", index));
        }
        LogicalType logicalType = column.get().getDataType().getLogicalType();
        return new LogicalTypeWithIndex(index, logicalType);
    }).collect(Collectors.toList());
}
Also used : DataType(org.apache.flink.table.types.DataType) EncodingFormat(org.apache.flink.table.connector.format.EncodingFormat) Arrays(java.util.Arrays) BULK_FLUSH_MAX_SIZE_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_MAX_SIZE_OPTION) SerializationFormatFactory(org.apache.flink.table.factories.SerializationFormatFactory) HOSTS_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.HOSTS_OPTION) INDEX_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.INDEX_OPTION) Column(org.apache.flink.table.catalog.Column) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) USERNAME_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.USERNAME_OPTION) Supplier(java.util.function.Supplier) TableConfigOptions(org.apache.flink.table.api.config.TableConfigOptions) ReadableConfig(org.apache.flink.configuration.ReadableConfig) BULK_FLUSH_MAX_ACTIONS_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_MAX_ACTIONS_OPTION) SOCKET_TIMEOUT(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.SOCKET_TIMEOUT) ConfigOption(org.apache.flink.configuration.ConfigOption) Preconditions.checkNotNull(org.apache.flink.util.Preconditions.checkNotNull) BULK_FLUSH_BACKOFF_TYPE_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_TYPE_OPTION) PASSWORD_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.PASSWORD_OPTION) Nullable(javax.annotation.Nullable) Projection(org.apache.flink.table.connector.Projection) RowData(org.apache.flink.table.data.RowData) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) CONNECTION_REQUEST_TIMEOUT(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_REQUEST_TIMEOUT) KEY_DELIMITER_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.KEY_DELIMITER_OPTION) Strings.capitalize(org.elasticsearch.common.Strings.capitalize) DynamicTableSinkFactory(org.apache.flink.table.factories.DynamicTableSinkFactory) BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION) Set(java.util.Set) SINK_PARALLELISM(org.apache.flink.table.factories.FactoryUtil.SINK_PARALLELISM) StringUtils(org.apache.flink.util.StringUtils) Collectors(java.util.stream.Collectors) ZoneId(java.time.ZoneId) CONNECTION_TIMEOUT(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_TIMEOUT) BULK_FLUSH_INTERVAL_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_INTERVAL_OPTION) List(java.util.List) Stream(java.util.stream.Stream) DELIVERY_GUARANTEE_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.DELIVERY_GUARANTEE_OPTION) FactoryUtil(org.apache.flink.table.factories.FactoryUtil) LogicalType(org.apache.flink.table.types.logical.LogicalType) FORMAT_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.FORMAT_OPTION) ValidationException(org.apache.flink.table.api.ValidationException) BULK_FLUSH_BACKOFF_DELAY_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.BULK_FLUSH_BACKOFF_DELAY_OPTION) Optional(java.util.Optional) Internal(org.apache.flink.annotation.Internal) SerializationSchema(org.apache.flink.api.common.serialization.SerializationSchema) CONNECTION_PATH_PREFIX_OPTION(org.apache.flink.connector.elasticsearch.table.ElasticsearchConnectorOptions.CONNECTION_PATH_PREFIX_OPTION) Optional(java.util.Optional) DataType(org.apache.flink.table.types.DataType) LogicalType(org.apache.flink.table.types.logical.LogicalType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 2 with Column

use of org.apache.flink.table.catalog.Column in project flink by apache.

the class DynamicSinkUtils method validateAndApplyMetadata.

private static void validateAndApplyMetadata(String tableDebugName, DynamicTableSink sink, ResolvedSchema schema, List<SinkAbilitySpec> sinkAbilitySpecs) {
    final List<Column> columns = schema.getColumns();
    final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
    if (metadataColumns.isEmpty()) {
        return;
    }
    if (!(sink instanceof SupportsWritingMetadata)) {
        throw new ValidationException(String.format("Table '%s' declares persistable metadata columns, but the underlying %s " + "doesn't implement the %s interface. If the column should not " + "be persisted, it can be declared with the VIRTUAL keyword.", tableDebugName, DynamicTableSink.class.getSimpleName(), SupportsWritingMetadata.class.getSimpleName()));
    }
    final Map<String, DataType> metadataMap = ((SupportsWritingMetadata) sink).listWritableMetadata();
    metadataColumns.forEach(pos -> {
        final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
        final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
        final LogicalType metadataType = metadataColumn.getDataType().getLogicalType();
        final DataType expectedMetadataDataType = metadataMap.get(metadataKey);
        // check that metadata key is valid
        if (expectedMetadataDataType == null) {
            throw new ValidationException(String.format("Invalid metadata key '%s' in column '%s' of table '%s'. " + "The %s class '%s' supports the following metadata keys for writing:\n%s", metadataKey, metadataColumn.getName(), tableDebugName, DynamicTableSink.class.getSimpleName(), sink.getClass().getName(), String.join("\n", metadataMap.keySet())));
        }
        // check that types are compatible
        if (!supportsExplicitCast(metadataType, expectedMetadataDataType.getLogicalType())) {
            if (metadataKey.equals(metadataColumn.getName())) {
                throw new ValidationException(String.format("Invalid data type for metadata column '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
            } else {
                throw new ValidationException(String.format("Invalid data type for metadata column '%s' with metadata key '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), metadataKey, tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
            }
        }
    });
    sinkAbilitySpecs.add(new WritingMetadataSpec(createRequiredMetadataKeys(schema, sink), createConsumedType(schema, sink)));
}
Also used : WritingMetadataSpec(org.apache.flink.table.planner.plan.abilities.sink.WritingMetadataSpec) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) ValidationException(org.apache.flink.table.api.ValidationException) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) Column(org.apache.flink.table.catalog.Column) SupportsWritingMetadata(org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata) DataType(org.apache.flink.table.types.DataType) RelDataType(org.apache.calcite.rel.type.RelDataType) LogicalType(org.apache.flink.table.types.logical.LogicalType)

Example 3 with Column

use of org.apache.flink.table.catalog.Column in project flink by apache.

the class CatalogSchemaTable method getRowType.

@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
    final FlinkTypeFactory flinkTypeFactory = (FlinkTypeFactory) typeFactory;
    final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
    // The following block is a workaround to support tables defined by
    // TableEnvironment.connect() and
    // the actual table sources implement DefinedProctimeAttribute/DefinedRowtimeAttributes.
    // It should be removed after we remove DefinedProctimeAttribute/DefinedRowtimeAttributes.
    Optional<TableSource<?>> sourceOpt = findAndCreateTableSource();
    if (isStreamingMode && sourceOpt.isPresent() && schema.getColumns().stream().allMatch(Column::isPhysical) && schema.getWatermarkSpecs().isEmpty()) {
        TableSchema tableSchema = TableSchema.fromResolvedSchema(schema);
        TableSource<?> source = sourceOpt.get();
        if (TableSourceValidation.hasProctimeAttribute(source) || TableSourceValidation.hasRowtimeAttribute(source)) {
            // If the table is defined by TableEnvironment.connect(), and use the legacy
            // proctime and rowtime
            // descriptors, the TableSchema should fallback to
            // ConnectorCatalogTable#calculateSourceSchema
            tableSchema = ConnectorCatalogTable.calculateSourceSchema(source, false);
        }
        return TableSourceUtil.getSourceRowType(flinkTypeFactory, tableSchema, scala.Option.empty(), true);
    }
    final List<String> fieldNames = schema.getColumnNames();
    final List<LogicalType> fieldTypes = schema.getColumnDataTypes().stream().map(DataType::getLogicalType).map(PlannerTypeUtils::removeLegacyTypes).collect(Collectors.toList());
    return flinkTypeFactory.buildRelNodeRowType(fieldNames, fieldTypes);
}
Also used : TableSource(org.apache.flink.table.sources.TableSource) StreamTableSource(org.apache.flink.table.sources.StreamTableSource) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) TableSchema(org.apache.flink.table.api.TableSchema) Column(org.apache.flink.table.catalog.Column) LogicalType(org.apache.flink.table.types.logical.LogicalType) DataType(org.apache.flink.table.types.DataType) RelDataType(org.apache.calcite.rel.type.RelDataType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 4 with Column

use of org.apache.flink.table.catalog.Column in project flink by apache.

the class DynamicSourceUtils method pushGeneratedProjection.

/**
 * Creates a projection that adds computed columns and finalizes the table schema.
 */
private static void pushGeneratedProjection(FlinkRelBuilder relBuilder, ResolvedSchema schema) {
    final ExpressionConverter converter = new ExpressionConverter(relBuilder);
    final List<RexNode> projection = schema.getColumns().stream().map(c -> {
        if (c instanceof ComputedColumn) {
            final ComputedColumn computedColumn = (ComputedColumn) c;
            return computedColumn.getExpression().accept(converter);
        } else {
            return relBuilder.field(c.getName());
        }
    }).collect(Collectors.toList());
    relBuilder.projectNamed(projection, schema.getColumns().stream().map(Column::getName).collect(Collectors.toList()), true);
}
Also used : DataType(org.apache.flink.table.types.DataType) ScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider) ChangelogMode(org.apache.flink.table.connector.ChangelogMode) Column(org.apache.flink.table.catalog.Column) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) ShortcutUtils(org.apache.flink.table.planner.utils.ShortcutUtils) RowType(org.apache.flink.table.types.logical.RowType) ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) FlinkRelBuilder(org.apache.flink.table.planner.calcite.FlinkRelBuilder) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) ReadableConfig(org.apache.flink.configuration.ReadableConfig) RexNode(org.apache.calcite.rex.RexNode) RowField(org.apache.flink.table.types.logical.RowType.RowField) RelHint(org.apache.calcite.rel.hint.RelHint) Map(java.util.Map) LogicalTypeCasts.supportsExplicitCast(org.apache.flink.table.types.logical.utils.LogicalTypeCasts.supportsExplicitCast) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) ContextResolvedTable(org.apache.flink.table.catalog.ContextResolvedTable) RelDataType(org.apache.calcite.rel.type.RelDataType) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) TableConfig(org.apache.flink.table.api.TableConfig) WatermarkSpec(org.apache.flink.table.catalog.WatermarkSpec) RexBuilder(org.apache.calcite.rex.RexBuilder) TableException(org.apache.flink.table.api.TableException) Set(java.util.Set) ExpressionConverter(org.apache.flink.table.planner.expressions.converter.ExpressionConverter) RelNode(org.apache.calcite.rel.RelNode) Collectors(java.util.stream.Collectors) SourceAbilitySpec(org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) ComputedColumn(org.apache.flink.table.catalog.Column.ComputedColumn) DataStream(org.apache.flink.streaming.api.datastream.DataStream) List(java.util.List) Stream(java.util.stream.Stream) LogicalType(org.apache.flink.table.types.logical.LogicalType) FlinkStatistic(org.apache.flink.table.planner.plan.stats.FlinkStatistic) RowKind(org.apache.flink.types.RowKind) ValidationException(org.apache.flink.table.api.ValidationException) SupportsReadingMetadata(org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata) Internal(org.apache.flink.annotation.Internal) TypeConversions(org.apache.flink.table.types.utils.TypeConversions) ExecutionConfigOptions(org.apache.flink.table.api.config.ExecutionConfigOptions) Collections(java.util.Collections) ScanRuntimeProviderContext(org.apache.flink.table.runtime.connector.source.ScanRuntimeProviderContext) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) Column(org.apache.flink.table.catalog.Column) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) ComputedColumn(org.apache.flink.table.catalog.Column.ComputedColumn) ExpressionConverter(org.apache.flink.table.planner.expressions.converter.ExpressionConverter) ComputedColumn(org.apache.flink.table.catalog.Column.ComputedColumn) RexNode(org.apache.calcite.rex.RexNode)

Example 5 with Column

use of org.apache.flink.table.catalog.Column in project flink by apache.

the class DataStreamJavaITCase method testFromAndToChangelogStreamEventTime.

@Test
public void testFromAndToChangelogStreamEventTime() throws Exception {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final DataStream<Tuple3<Long, Integer, String>> dataStream = getWatermarkedDataStream();
    final DataStream<Row> changelogStream = dataStream.map(t -> Row.ofKind(RowKind.INSERT, t.f1, t.f2)).returns(Types.ROW(Types.INT, Types.STRING));
    // derive physical columns and add a rowtime
    final Table table = tableEnv.fromChangelogStream(changelogStream, Schema.newBuilder().columnByMetadata("rowtime", TIMESTAMP_LTZ(3)).columnByExpression("computed", $("f1").upperCase()).watermark("rowtime", sourceWatermark()).build());
    tableEnv.createTemporaryView("t", table);
    // access and reorder columns
    final Table reordered = tableEnv.sqlQuery("SELECT computed, rowtime, f0 FROM t");
    // write out the rowtime column with fully declared schema
    final DataStream<Row> result = tableEnv.toChangelogStream(reordered, Schema.newBuilder().column("f1", STRING()).columnByMetadata("rowtime", TIMESTAMP_LTZ(3)).columnByExpression("ignored", $("f1").upperCase()).column("f0", INT()).build());
    // test event time window and field access
    testResult(result.keyBy(k -> k.getField("f1")).window(TumblingEventTimeWindows.of(Time.milliseconds(5))).<Row>apply((key, window, input, out) -> {
        int sum = 0;
        for (Row row : input) {
            sum += row.<Integer>getFieldAs("f0");
        }
        out.collect(Row.of(key, sum));
    }).returns(Types.ROW(Types.STRING, Types.INT)), Row.of("A", 47), Row.of("C", 1000), Row.of("C", 1000));
}
Also used : DataType(org.apache.flink.table.types.DataType) BIGINT(org.apache.flink.table.api.DataTypes.BIGINT) STRING(org.apache.flink.table.api.DataTypes.STRING) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Arrays(java.util.Arrays) Schema(org.apache.flink.table.api.Schema) Tuple3(org.apache.flink.api.java.tuple.Tuple3) TableDescriptor(org.apache.flink.table.api.TableDescriptor) Tuple2(org.apache.flink.api.java.tuple.Tuple2) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) TupleTypeInfo(org.apache.flink.api.java.typeutils.TupleTypeInfo) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) TIMESTAMP_LTZ(org.apache.flink.table.api.DataTypes.TIMESTAMP_LTZ) RawType(org.apache.flink.table.types.logical.RawType) ZoneOffset(java.time.ZoneOffset) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) FIELD(org.apache.flink.table.api.DataTypes.FIELD) Parameterized(org.junit.runners.Parameterized) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) DOUBLE(org.apache.flink.table.api.DataTypes.DOUBLE) TableConfig(org.apache.flink.table.api.TableConfig) Expressions.$(org.apache.flink.table.api.Expressions.$) TestValuesTableFactory(org.apache.flink.table.planner.factories.TestValuesTableFactory) WatermarkStrategy(org.apache.flink.api.common.eventtime.WatermarkStrategy) Table(org.apache.flink.table.api.Table) ResolvedExpressionMock(org.apache.flink.table.expressions.utils.ResolvedExpressionMock) ZoneId(java.time.ZoneId) Objects(java.util.Objects) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) CloseableIterator(org.apache.flink.util.CloseableIterator) List(java.util.List) ValueState(org.apache.flink.api.common.state.ValueState) TumblingEventTimeWindows(org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) STRUCTURED(org.apache.flink.table.api.DataTypes.STRUCTURED) TableResult(org.apache.flink.table.api.TableResult) Row(org.apache.flink.types.Row) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) MAP(org.apache.flink.table.api.DataTypes.MAP) BOOLEAN(org.apache.flink.table.api.DataTypes.BOOLEAN) Either(org.apache.flink.types.Either) ChangelogMode(org.apache.flink.table.connector.ChangelogMode) ROW(org.apache.flink.table.api.DataTypes.ROW) Column(org.apache.flink.table.catalog.Column) RunWith(org.junit.runner.RunWith) Parameters(org.junit.runners.Parameterized.Parameters) LocalDateTime(java.time.LocalDateTime) Expressions.sourceWatermark(org.apache.flink.table.api.Expressions.sourceWatermark) DataStreamSource(org.apache.flink.streaming.api.datastream.DataStreamSource) KeyedProcessFunction(org.apache.flink.streaming.api.functions.KeyedProcessFunction) ArrayList(java.util.ArrayList) Collector(org.apache.flink.util.Collector) ProcessFunction(org.apache.flink.streaming.api.functions.ProcessFunction) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) INT(org.apache.flink.table.api.DataTypes.INT) Before(org.junit.Before) Types(org.apache.flink.api.common.typeinfo.Types) Time(org.apache.flink.streaming.api.windowing.time.Time) WatermarkSpec(org.apache.flink.table.catalog.WatermarkSpec) GenericTypeInfo(org.apache.flink.api.java.typeutils.GenericTypeInfo) Parameter(org.junit.runners.Parameterized.Parameter) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) Configuration(org.apache.flink.configuration.Configuration) SingleOutputStreamOperator(org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator) DataTypes(org.apache.flink.table.api.DataTypes) Test(org.junit.Test) IOException(java.io.IOException) CollectionUtil(org.apache.flink.util.CollectionUtil) DataStream(org.apache.flink.streaming.api.datastream.DataStream) RowKind(org.apache.flink.types.RowKind) DayOfWeek(java.time.DayOfWeek) TIMESTAMP(org.apache.flink.table.api.DataTypes.TIMESTAMP) EnumTypeInfo(org.apache.flink.api.java.typeutils.EnumTypeInfo) RuntimeExecutionMode(org.apache.flink.api.common.RuntimeExecutionMode) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) Table(org.apache.flink.table.api.Table) Tuple3(org.apache.flink.api.java.tuple.Tuple3) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Row(org.apache.flink.types.Row) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) Test(org.junit.Test)

Aggregations

Column (org.apache.flink.table.catalog.Column)14 ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)10 List (java.util.List)9 DataType (org.apache.flink.table.types.DataType)8 LogicalType (org.apache.flink.table.types.logical.LogicalType)8 Collections (java.util.Collections)7 ValidationException (org.apache.flink.table.api.ValidationException)7 ArrayList (java.util.ArrayList)6 Collectors (java.util.stream.Collectors)6 Stream (java.util.stream.Stream)6 RelDataType (org.apache.calcite.rel.type.RelDataType)6 Internal (org.apache.flink.annotation.Internal)6 Arrays (java.util.Arrays)4 Map (java.util.Map)4 Set (java.util.Set)4 RexNode (org.apache.calcite.rex.RexNode)4 ReadableConfig (org.apache.flink.configuration.ReadableConfig)4 DataStream (org.apache.flink.streaming.api.datastream.DataStream)4 TableConfig (org.apache.flink.table.api.TableConfig)4 TableException (org.apache.flink.table.api.TableException)4