Search in sources :

Example 41 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class KafkaDynamicTableFactoryTest method testPrimaryKeyValidation.

@Test
public void testPrimaryKeyValidation() {
    final ResolvedSchema pkSchema = new ResolvedSchema(SCHEMA.getColumns(), SCHEMA.getWatermarkSpecs(), UniqueConstraint.primaryKey(NAME, Collections.singletonList(NAME)));
    Map<String, String> sinkOptions = getModifiedOptions(getBasicSinkOptions(), options -> options.put(String.format("%s.%s", TestFormatFactory.IDENTIFIER, TestFormatFactory.CHANGELOG_MODE.key()), "I;UA;UB;D"));
    // pk can be defined on cdc table, should pass
    createTableSink(pkSchema, sinkOptions);
    assertThatExceptionOfType(ValidationException.class).isThrownBy(() -> createTableSink(pkSchema, getBasicSinkOptions())).havingRootCause().withMessage("The Kafka table 'default.default.t1' with 'test-format' format" + " doesn't support defining PRIMARY KEY constraint on the table, because it can't" + " guarantee the semantic of primary key.");
    assertThatExceptionOfType(ValidationException.class).isThrownBy(() -> createTableSink(pkSchema, getKeyValueOptions())).havingRootCause().withMessage("The Kafka table 'default.default.t1' with 'test-format' format" + " doesn't support defining PRIMARY KEY constraint on the table, because it can't" + " guarantee the semantic of primary key.");
    Map<String, String> sourceOptions = getModifiedOptions(getBasicSourceOptions(), options -> options.put(String.format("%s.%s", TestFormatFactory.IDENTIFIER, TestFormatFactory.CHANGELOG_MODE.key()), "I;UA;UB;D"));
    // pk can be defined on cdc table, should pass
    createTableSource(pkSchema, sourceOptions);
    assertThatExceptionOfType(ValidationException.class).isThrownBy(() -> createTableSource(pkSchema, getBasicSourceOptions())).havingRootCause().withMessage("The Kafka table 'default.default.t1' with 'test-format' format" + " doesn't support defining PRIMARY KEY constraint on the table, because it can't" + " guarantee the semantic of primary key.");
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 42 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testCreateSinkTableWithoutPK.

@Test
public void testCreateSinkTableWithoutPK() {
    thrown.expect(ValidationException.class);
    thrown.expect(containsCause(new ValidationException("'upsert-kafka' tables require to define a PRIMARY KEY constraint. " + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys.")));
    ResolvedSchema illegalSchema = ResolvedSchema.of(Column.physical("region", DataTypes.STRING()), Column.physical("view_count", DataTypes.BIGINT()));
    createTableSink(illegalSchema, getFullSinkOptions());
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Example 43 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testCreateSourceTableWithoutPK.

// --------------------------------------------------------------------------------------------
// Negative tests
// --------------------------------------------------------------------------------------------
@Test
public void testCreateSourceTableWithoutPK() {
    thrown.expect(ValidationException.class);
    thrown.expect(containsCause(new ValidationException("'upsert-kafka' tables require to define a PRIMARY KEY constraint. " + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys.")));
    ResolvedSchema illegalSchema = ResolvedSchema.of(Column.physical("window_start", DataTypes.STRING()), Column.physical("region", DataTypes.STRING()), Column.physical("view_count", DataTypes.BIGINT()));
    createTableSource(illegalSchema, getFullSourceOptions());
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Example 44 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class KinesisDynamicTableFactoryTest method testGoodTableSourceWithMetadataFields.

@Test
public void testGoodTableSourceWithMetadataFields() {
    ResolvedSchema sourceSchema = defaultSourceSchema();
    Map<String, String> sourceOptions = defaultTableOptions().build();
    Metadata[] requestedMetadata = new Metadata[] { ShardId, Timestamp };
    List<String> metadataKeys = Arrays.asList(ShardId.getFieldName(), Timestamp.getFieldName());
    DataType producedDataType = getProducedType(sourceSchema, requestedMetadata);
    // Construct actual DynamicTableSource using FactoryUtil
    KinesisDynamicSource actualSource = (KinesisDynamicSource) createTableSource(sourceSchema, sourceOptions);
    actualSource.applyReadableMetadata(metadataKeys, producedDataType);
    // Construct expected DynamicTableSink using factory under test
    KinesisDynamicSource expectedSource = new KinesisDynamicSource(sourceSchema.toPhysicalRowDataType(), STREAM_NAME, defaultConsumerProperties(), new TestFormatFactory.DecodingFormatMock(",", true), producedDataType, Arrays.asList(requestedMetadata));
    // verify that the constructed DynamicTableSource is as expected
    assertEquals(expectedSource, actualSource);
    // verify that the copy of the constructed DynamicTableSink is as expected
    assertEquals(expectedSource, actualSource.copy());
}
Also used : Metadata(org.apache.flink.streaming.connectors.kinesis.table.RowDataKinesisDeserializationSchema.Metadata) DataType(org.apache.flink.table.types.DataType) TestFormatFactory(org.apache.flink.table.factories.TestFormatFactory) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Example 45 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class OracleTableSinkITCase method testFlushBufferWhenCheckpoint.

@Test
public void testFlushBufferWhenCheckpoint() throws Exception {
    Map<String, String> options = new HashMap<>();
    options.put("connector", "jdbc");
    options.put("url", containerUrl);
    options.put("table-name", OUTPUT_TABLE5);
    options.put("sink.buffer-flush.interval", "0");
    ResolvedSchema schema = ResolvedSchema.of(Column.physical("id", DataTypes.BIGINT().notNull()));
    DynamicTableSink tableSink = createTableSink(schema, options);
    SinkRuntimeProviderContext context = new SinkRuntimeProviderContext(false);
    SinkFunctionProvider sinkProvider = (SinkFunctionProvider) tableSink.getSinkRuntimeProvider(context);
    GenericJdbcSinkFunction<RowData> sinkFunction = (GenericJdbcSinkFunction<RowData>) sinkProvider.createSinkFunction();
    sinkFunction.setRuntimeContext(new MockStreamingRuntimeContext(true, 1, 0));
    sinkFunction.open(new Configuration());
    sinkFunction.invoke(GenericRowData.of(1L), SinkContextUtil.forTimestamp(1));
    sinkFunction.invoke(GenericRowData.of(2L), SinkContextUtil.forTimestamp(1));
    check(new Row[] {}, containerUrl, OUTPUT_TABLE5, new String[] { "id" });
    sinkFunction.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1));
    check(new Row[] { Row.of(1L), Row.of(2L) }, containerUrl, OUTPUT_TABLE5, new String[] { "id" });
    sinkFunction.close();
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) MockStreamingRuntimeContext(org.apache.flink.streaming.util.MockStreamingRuntimeContext) Configuration(org.apache.flink.configuration.Configuration) HashMap(java.util.HashMap) StateSnapshotContextSynchronousImpl(org.apache.flink.runtime.state.StateSnapshotContextSynchronousImpl) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) SinkFunctionProvider(org.apache.flink.table.connector.sink.SinkFunctionProvider) GenericRowData(org.apache.flink.table.data.GenericRowData) RowData(org.apache.flink.table.data.RowData) GenericJdbcSinkFunction(org.apache.flink.connector.jdbc.internal.GenericJdbcSinkFunction) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Aggregations

ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)84 Test (org.junit.Test)50 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)20 DataType (org.apache.flink.table.types.DataType)20 RowData (org.apache.flink.table.data.RowData)17 ValidationException (org.apache.flink.table.api.ValidationException)14 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)14 List (java.util.List)11 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)11 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)10 Column (org.apache.flink.table.catalog.Column)9 LogicalType (org.apache.flink.table.types.logical.LogicalType)9 RowType (org.apache.flink.table.types.logical.RowType)9 HashMap (java.util.HashMap)8 Collectors (java.util.stream.Collectors)8 RelDataType (org.apache.calcite.rel.type.RelDataType)8 Internal (org.apache.flink.annotation.Internal)8 HBaseWriteOptions (org.apache.flink.connector.hbase.options.HBaseWriteOptions)6 FlinkTypeFactory (org.apache.flink.table.planner.calcite.FlinkTypeFactory)6 Row (org.apache.flink.types.Row)6