use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class KafkaDynamicTableFactoryTest method testPrimaryKeyValidation.
@Test
public void testPrimaryKeyValidation() {
final ResolvedSchema pkSchema = new ResolvedSchema(SCHEMA.getColumns(), SCHEMA.getWatermarkSpecs(), UniqueConstraint.primaryKey(NAME, Collections.singletonList(NAME)));
Map<String, String> sinkOptions = getModifiedOptions(getBasicSinkOptions(), options -> options.put(String.format("%s.%s", TestFormatFactory.IDENTIFIER, TestFormatFactory.CHANGELOG_MODE.key()), "I;UA;UB;D"));
// pk can be defined on cdc table, should pass
createTableSink(pkSchema, sinkOptions);
assertThatExceptionOfType(ValidationException.class).isThrownBy(() -> createTableSink(pkSchema, getBasicSinkOptions())).havingRootCause().withMessage("The Kafka table 'default.default.t1' with 'test-format' format" + " doesn't support defining PRIMARY KEY constraint on the table, because it can't" + " guarantee the semantic of primary key.");
assertThatExceptionOfType(ValidationException.class).isThrownBy(() -> createTableSink(pkSchema, getKeyValueOptions())).havingRootCause().withMessage("The Kafka table 'default.default.t1' with 'test-format' format" + " doesn't support defining PRIMARY KEY constraint on the table, because it can't" + " guarantee the semantic of primary key.");
Map<String, String> sourceOptions = getModifiedOptions(getBasicSourceOptions(), options -> options.put(String.format("%s.%s", TestFormatFactory.IDENTIFIER, TestFormatFactory.CHANGELOG_MODE.key()), "I;UA;UB;D"));
// pk can be defined on cdc table, should pass
createTableSource(pkSchema, sourceOptions);
assertThatExceptionOfType(ValidationException.class).isThrownBy(() -> createTableSource(pkSchema, getBasicSourceOptions())).havingRootCause().withMessage("The Kafka table 'default.default.t1' with 'test-format' format" + " doesn't support defining PRIMARY KEY constraint on the table, because it can't" + " guarantee the semantic of primary key.");
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testCreateSinkTableWithoutPK.
@Test
public void testCreateSinkTableWithoutPK() {
thrown.expect(ValidationException.class);
thrown.expect(containsCause(new ValidationException("'upsert-kafka' tables require to define a PRIMARY KEY constraint. " + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys.")));
ResolvedSchema illegalSchema = ResolvedSchema.of(Column.physical("region", DataTypes.STRING()), Column.physical("view_count", DataTypes.BIGINT()));
createTableSink(illegalSchema, getFullSinkOptions());
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testCreateSourceTableWithoutPK.
// --------------------------------------------------------------------------------------------
// Negative tests
// --------------------------------------------------------------------------------------------
@Test
public void testCreateSourceTableWithoutPK() {
thrown.expect(ValidationException.class);
thrown.expect(containsCause(new ValidationException("'upsert-kafka' tables require to define a PRIMARY KEY constraint. " + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys.")));
ResolvedSchema illegalSchema = ResolvedSchema.of(Column.physical("window_start", DataTypes.STRING()), Column.physical("region", DataTypes.STRING()), Column.physical("view_count", DataTypes.BIGINT()));
createTableSource(illegalSchema, getFullSourceOptions());
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class KinesisDynamicTableFactoryTest method testGoodTableSourceWithMetadataFields.
@Test
public void testGoodTableSourceWithMetadataFields() {
ResolvedSchema sourceSchema = defaultSourceSchema();
Map<String, String> sourceOptions = defaultTableOptions().build();
Metadata[] requestedMetadata = new Metadata[] { ShardId, Timestamp };
List<String> metadataKeys = Arrays.asList(ShardId.getFieldName(), Timestamp.getFieldName());
DataType producedDataType = getProducedType(sourceSchema, requestedMetadata);
// Construct actual DynamicTableSource using FactoryUtil
KinesisDynamicSource actualSource = (KinesisDynamicSource) createTableSource(sourceSchema, sourceOptions);
actualSource.applyReadableMetadata(metadataKeys, producedDataType);
// Construct expected DynamicTableSink using factory under test
KinesisDynamicSource expectedSource = new KinesisDynamicSource(sourceSchema.toPhysicalRowDataType(), STREAM_NAME, defaultConsumerProperties(), new TestFormatFactory.DecodingFormatMock(",", true), producedDataType, Arrays.asList(requestedMetadata));
// verify that the constructed DynamicTableSource is as expected
assertEquals(expectedSource, actualSource);
// verify that the copy of the constructed DynamicTableSink is as expected
assertEquals(expectedSource, actualSource.copy());
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class OracleTableSinkITCase method testFlushBufferWhenCheckpoint.
@Test
public void testFlushBufferWhenCheckpoint() throws Exception {
Map<String, String> options = new HashMap<>();
options.put("connector", "jdbc");
options.put("url", containerUrl);
options.put("table-name", OUTPUT_TABLE5);
options.put("sink.buffer-flush.interval", "0");
ResolvedSchema schema = ResolvedSchema.of(Column.physical("id", DataTypes.BIGINT().notNull()));
DynamicTableSink tableSink = createTableSink(schema, options);
SinkRuntimeProviderContext context = new SinkRuntimeProviderContext(false);
SinkFunctionProvider sinkProvider = (SinkFunctionProvider) tableSink.getSinkRuntimeProvider(context);
GenericJdbcSinkFunction<RowData> sinkFunction = (GenericJdbcSinkFunction<RowData>) sinkProvider.createSinkFunction();
sinkFunction.setRuntimeContext(new MockStreamingRuntimeContext(true, 1, 0));
sinkFunction.open(new Configuration());
sinkFunction.invoke(GenericRowData.of(1L), SinkContextUtil.forTimestamp(1));
sinkFunction.invoke(GenericRowData.of(2L), SinkContextUtil.forTimestamp(1));
check(new Row[] {}, containerUrl, OUTPUT_TABLE5, new String[] { "id" });
sinkFunction.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1));
check(new Row[] { Row.of(1L), Row.of(2L) }, containerUrl, OUTPUT_TABLE5, new String[] { "id" });
sinkFunction.close();
}
Aggregations