use of org.apache.flink.connector.jdbc.internal.options.JdbcConnectorOptions in project flink-connector-jdbc-ext by lixz3321.
the class JdbcTableSourceSinkFactoryTest method testJdbcLookupProperties.
@Test
public void testJdbcLookupProperties() {
Map<String, String> properties = getBasicProperties();
properties.put("connector.lookup.cache.max-rows", "1000");
properties.put("connector.lookup.cache.ttl", "10s");
properties.put("connector.lookup.max-retries", "10");
final StreamTableSource<?> actual = TableFactoryService.find(StreamTableSourceFactory.class, properties).createStreamTableSource(properties);
final JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").build();
final JdbcLookupOptions lookupOptions = JdbcLookupOptions.builder().setCacheMaxSize(1000).setCacheExpireMs(10_000).setMaxRetryTimes(10).build();
final JdbcTableSource expected = JdbcTableSource.builder().setOptions(options).setLookupOptions(lookupOptions).setSchema(schema).build();
assertEquals(expected, actual);
}
use of org.apache.flink.connector.jdbc.internal.options.JdbcConnectorOptions in project flink-connector-jdbc-ext by lixz3321.
the class JdbcDynamicTableFactoryTest method testJdbcSinkProperties.
@Test
public void testJdbcSinkProperties() {
Map<String, String> properties = getAllOptions();
properties.put("sink.buffer-flush.max-rows", "1000");
properties.put("sink.buffer-flush.interval", "2min");
properties.put("sink.max-retries", "5");
DynamicTableSink actual = createTableSink(SCHEMA, properties);
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").build();
JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(1000).withBatchIntervalMs(120_000).withMaxRetries(5).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(SCHEMA.getColumnNames().toArray(new String[0])).withKeyFields("bbb", "aaa").build();
JdbcDynamicTableSink expected = new JdbcDynamicTableSink(options, executionOptions, dmlOptions, TableSchema.fromResolvedSchema(SCHEMA));
assertEquals(expected, actual);
}
use of org.apache.flink.connector.jdbc.internal.options.JdbcConnectorOptions in project flink-connector-jdbc-ext by lixz3321.
the class JdbcDynamicTableFactoryTest method testJdbcReadProperties.
@Test
public void testJdbcReadProperties() {
Map<String, String> properties = getAllOptions();
properties.put("scan.partition.column", "aaa");
properties.put("scan.partition.lower-bound", "-10");
properties.put("scan.partition.upper-bound", "100");
properties.put("scan.partition.num", "10");
properties.put("scan.fetch-size", "20");
properties.put("scan.auto-commit", "false");
DynamicTableSource actual = createTableSource(SCHEMA, properties);
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").build();
JdbcReadOptions readOptions = JdbcReadOptions.builder().setPartitionColumnName("aaa").setPartitionLowerBound(-10).setPartitionUpperBound(100).setNumPartitions(10).setFetchSize(20).setAutoCommit(false).build();
JdbcLookupOptions lookupOptions = JdbcLookupOptions.builder().setCacheMaxSize(-1).setCacheExpireMs(10_000).setMaxRetryTimes(3).build();
JdbcDynamicTableSource expected = new JdbcDynamicTableSource(options, readOptions, lookupOptions, TableSchema.fromResolvedSchema(SCHEMA));
assertEquals(expected, actual);
}
use of org.apache.flink.connector.jdbc.internal.options.JdbcConnectorOptions in project flink-connector-jdbc-ext by lixz3321.
the class JdbcDynamicTableFactoryTest method testJDBCSinkWithParallelism.
@Test
public void testJDBCSinkWithParallelism() {
Map<String, String> properties = getAllOptions();
properties.put("sink.parallelism", "2");
DynamicTableSink actual = createTableSink(SCHEMA, properties);
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").setParallelism(2).build();
JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(100).withBatchIntervalMs(1000).withMaxRetries(3).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(SCHEMA.getColumnNames().toArray(new String[0])).withKeyFields("bbb", "aaa").build();
JdbcDynamicTableSink expected = new JdbcDynamicTableSink(options, executionOptions, dmlOptions, TableSchema.fromResolvedSchema(SCHEMA));
assertEquals(expected, actual);
}
use of org.apache.flink.connector.jdbc.internal.options.JdbcConnectorOptions in project flink-connector-jdbc-ext by lixz3321.
the class JdbcDynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
final ReadableConfig config = helper.getOptions();
helper.validate();
validateConfigOptions(config);
JdbcConnectorOptions jdbcOptions = getJdbcOptions(config);
TableSchema physicalSchema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema());
return new JdbcDynamicTableSink(jdbcOptions, getJdbcExecutionOptions(config), getJdbcDmlOptions(jdbcOptions, physicalSchema), physicalSchema);
}
Aggregations