use of org.apache.flink.table.types.DataType in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testTableSource.
@Test
public void testTableSource() {
final DataType producedDataType = SOURCE_SCHEMA.toPhysicalRowDataType();
// Construct table source using options and table source factory
final DynamicTableSource actualSource = createTableSource(SOURCE_SCHEMA, getFullSourceOptions());
final KafkaDynamicSource expectedSource = createExpectedScanSource(producedDataType, keyDecodingFormat, valueDecodingFormat, SOURCE_KEY_FIELDS, SOURCE_VALUE_FIELDS, null, SOURCE_TOPIC, UPSERT_KAFKA_SOURCE_PROPERTIES);
assertEquals(actualSource, expectedSource);
final KafkaDynamicSource actualUpsertKafkaSource = (KafkaDynamicSource) actualSource;
ScanTableSource.ScanRuntimeProvider provider = actualUpsertKafkaSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
assertKafkaSource(provider);
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSourceWithKeyValueAndMetadata.
@Test
public void testTableSourceWithKeyValueAndMetadata() {
final Map<String, String> options = getKeyValueOptions();
options.put("value.test-format.readable-metadata", "metadata_1:INT, metadata_2:STRING");
final DynamicTableSource actualSource = createTableSource(SCHEMA_WITH_METADATA, options);
final KafkaDynamicSource actualKafkaSource = (KafkaDynamicSource) actualSource;
// initialize stateful testing formats
actualKafkaSource.applyReadableMetadata(Arrays.asList("timestamp", "value.metadata_2"), SCHEMA_WITH_METADATA.toSourceRowDataType());
actualKafkaSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
final DecodingFormatMock expectedKeyFormat = new DecodingFormatMock("#", false, ChangelogMode.insertOnly(), Collections.emptyMap());
expectedKeyFormat.producedDataType = DataTypes.ROW(DataTypes.FIELD(NAME, DataTypes.STRING())).notNull();
final Map<String, DataType> expectedReadableMetadata = new HashMap<>();
expectedReadableMetadata.put("metadata_1", DataTypes.INT());
expectedReadableMetadata.put("metadata_2", DataTypes.STRING());
final DecodingFormatMock expectedValueFormat = new DecodingFormatMock("|", false, ChangelogMode.insertOnly(), expectedReadableMetadata);
expectedValueFormat.producedDataType = DataTypes.ROW(DataTypes.FIELD(COUNT, DataTypes.DECIMAL(38, 18)), DataTypes.FIELD("metadata_2", DataTypes.STRING())).notNull();
expectedValueFormat.metadataKeys = Collections.singletonList("metadata_2");
final KafkaDynamicSource expectedKafkaSource = createExpectedScanSource(SCHEMA_WITH_METADATA.toPhysicalRowDataType(), expectedKeyFormat, expectedValueFormat, new int[] { 0 }, new int[] { 1 }, null, Collections.singletonList(TOPIC), null, KAFKA_FINAL_SOURCE_PROPERTIES, StartupMode.GROUP_OFFSETS, Collections.emptyMap(), 0);
expectedKafkaSource.producedDataType = SCHEMA_WITH_METADATA.toSourceRowDataType();
expectedKafkaSource.metadataKeys = Collections.singletonList("timestamp");
assertThat(actualSource).isEqualTo(expectedKafkaSource);
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class KafkaConnectorOptionsUtilTest method testMissingKeyFormatProjection.
@Test
public void testMissingKeyFormatProjection() {
final DataType dataType = ROW(FIELD("id", INT()));
final Map<String, String> options = createTestOptions();
final Configuration config = Configuration.fromMap(options);
try {
createKeyFormatProjection(config, dataType);
fail();
} catch (ValidationException e) {
assertThat(e, hasMessage(equalTo("A key format 'key.format' requires the declaration of one or more " + "of key fields using 'key.fields'.")));
}
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class KafkaConnectorOptionsUtilTest method testInvalidKeyFormatFieldProjection.
@Test
public void testInvalidKeyFormatFieldProjection() {
final DataType dataType = ROW(FIELD("id", INT()), FIELD("name", STRING()));
final Map<String, String> options = createTestOptions();
options.put("key.fields", "non_existing");
final Configuration config = Configuration.fromMap(options);
try {
createKeyFormatProjection(config, dataType);
fail();
} catch (ValidationException e) {
assertThat(e, hasMessage(equalTo("Could not find the field 'non_existing' in the table schema for " + "usage in the key format. A key field must be a regular, " + "physical column. The following columns can be selected " + "in the 'key.fields' option:\n" + "[id, name]")));
}
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class KafkaConnectorOptionsUtilTest method testInvalidValueFormatProjection.
@Test
public void testInvalidValueFormatProjection() {
final DataType dataType = ROW(FIELD("k_id", INT()), FIELD("id", STRING()));
final Map<String, String> options = createTestOptions();
options.put("key.fields", "k_id");
options.put("key.fields-prefix", "k_");
final Configuration config = Configuration.fromMap(options);
try {
createValueFormatProjection(config, dataType);
fail();
} catch (ValidationException e) {
assertThat(e, hasMessage(equalTo("A key prefix is not allowed when option 'value.fields-include' " + "is set to 'ALL'. Set it to 'EXCEPT_KEY' instead to avoid field overlaps.")));
}
}
Aggregations