use of org.apache.flink.table.connector.source.DynamicTableSource in project flink by apache.
the class JdbcDynamicTableFactoryTest method testJdbcLookupProperties.
@Test
public void testJdbcLookupProperties() {
Map<String, String> properties = getAllOptions();
properties.put("lookup.cache.max-rows", "1000");
properties.put("lookup.cache.ttl", "10s");
properties.put("lookup.max-retries", "10");
DynamicTableSource actual = createTableSource(SCHEMA, properties);
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").build();
JdbcLookupOptions lookupOptions = JdbcLookupOptions.builder().setCacheMaxSize(1000).setCacheExpireMs(10_000).setMaxRetryTimes(10).build();
JdbcDynamicTableSource expected = new JdbcDynamicTableSource(options, JdbcReadOptions.builder().build(), lookupOptions, SCHEMA.toPhysicalRowDataType());
assertEquals(expected, actual);
}
use of org.apache.flink.table.connector.source.DynamicTableSource in project flink by apache.
the class JdbcDynamicTableFactoryTest method testJdbcCommonProperties.
@Test
public void testJdbcCommonProperties() {
Map<String, String> properties = getAllOptions();
properties.put("driver", "org.apache.derby.jdbc.EmbeddedDriver");
properties.put("username", "user");
properties.put("password", "pass");
properties.put("connection.max-retry-timeout", "120s");
// validation for source
DynamicTableSource actualSource = createTableSource(SCHEMA, properties);
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").setDriverName("org.apache.derby.jdbc.EmbeddedDriver").setUsername("user").setPassword("pass").setConnectionCheckTimeoutSeconds(120).build();
JdbcLookupOptions lookupOptions = JdbcLookupOptions.builder().setCacheMaxSize(-1).setCacheExpireMs(10_000).setMaxRetryTimes(3).build();
JdbcDynamicTableSource expectedSource = new JdbcDynamicTableSource(options, JdbcReadOptions.builder().build(), lookupOptions, SCHEMA.toPhysicalRowDataType());
assertEquals(expectedSource, actualSource);
// validation for sink
DynamicTableSink actualSink = createTableSink(SCHEMA, properties);
// default flush configurations
JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(100).withBatchIntervalMs(1000).withMaxRetries(3).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(SCHEMA.getColumnNames().toArray(new String[0])).withKeyFields("bbb", "aaa").build();
JdbcDynamicTableSink expectedSink = new JdbcDynamicTableSink(options, executionOptions, dmlOptions, SCHEMA.toPhysicalRowDataType());
assertEquals(expectedSink, actualSink);
}
use of org.apache.flink.table.connector.source.DynamicTableSource in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testTableSource.
@Test
public void testTableSource() {
final DataType producedDataType = SOURCE_SCHEMA.toPhysicalRowDataType();
// Construct table source using options and table source factory
final DynamicTableSource actualSource = createTableSource(SOURCE_SCHEMA, getFullSourceOptions());
final KafkaDynamicSource expectedSource = createExpectedScanSource(producedDataType, keyDecodingFormat, valueDecodingFormat, SOURCE_KEY_FIELDS, SOURCE_VALUE_FIELDS, null, SOURCE_TOPIC, UPSERT_KAFKA_SOURCE_PROPERTIES);
assertEquals(actualSource, expectedSource);
final KafkaDynamicSource actualUpsertKafkaSource = (KafkaDynamicSource) actualSource;
ScanTableSource.ScanRuntimeProvider provider = actualUpsertKafkaSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
assertKafkaSource(provider);
}
use of org.apache.flink.table.connector.source.DynamicTableSource in project flink by apache.
the class KafkaDynamicTableFactoryTest method testSetOffsetResetForStartFromGroupOffsets.
private void testSetOffsetResetForStartFromGroupOffsets(String value) {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSourceOptions(), options -> {
options.remove("scan.startup.mode");
if (value == null) {
return;
}
options.put(PROPERTIES_PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, value);
});
final DynamicTableSource tableSource = createTableSource(SCHEMA, modifiedOptions);
assertThat(tableSource).isInstanceOf(KafkaDynamicSource.class);
ScanTableSource.ScanRuntimeProvider provider = ((KafkaDynamicSource) tableSource).getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
assertThat(provider).isInstanceOf(DataStreamScanProvider.class);
final KafkaSource<?> kafkaSource = assertKafkaSource(provider);
final Configuration configuration = KafkaSourceTestUtils.getKafkaSourceConfiguration(kafkaSource);
if (value == null) {
assertThat(configuration.toMap().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("none");
} else {
assertThat(configuration.toMap().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo(value);
}
}
use of org.apache.flink.table.connector.source.DynamicTableSource in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSourceCommitOnCheckpointDisabled.
@Test
public void testTableSourceCommitOnCheckpointDisabled() {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSourceOptions(), options -> options.remove("properties.group.id"));
final DynamicTableSource tableSource = createTableSource(SCHEMA, modifiedOptions);
assertThat(tableSource).isInstanceOf(KafkaDynamicSource.class);
ScanTableSource.ScanRuntimeProvider providerWithoutGroupId = ((KafkaDynamicSource) tableSource).getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
assertThat(providerWithoutGroupId).isInstanceOf(DataStreamScanProvider.class);
final KafkaSource<?> kafkaSource = assertKafkaSource(providerWithoutGroupId);
final Configuration configuration = KafkaSourceTestUtils.getKafkaSourceConfiguration(kafkaSource);
// Test offset commit on checkpoint should be disabled when do not set consumer group.
assertThat(configuration.get(KafkaSourceOptions.COMMIT_OFFSETS_ON_CHECKPOINT)).isFalse();
assertThat(configuration.get(ConfigOptions.key(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG).booleanType().noDefaultValue())).isFalse();
}
Aggregations