use of org.apache.flink.connector.jdbc.JdbcExecutionOptions in project flink by apache.
the class JdbcDynamicTableFactoryTest method testJdbcCommonProperties.
@Test
public void testJdbcCommonProperties() {
Map<String, String> properties = getAllOptions();
properties.put("driver", "org.apache.derby.jdbc.EmbeddedDriver");
properties.put("username", "user");
properties.put("password", "pass");
properties.put("connection.max-retry-timeout", "120s");
// validation for source
DynamicTableSource actualSource = createTableSource(SCHEMA, properties);
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").setDriverName("org.apache.derby.jdbc.EmbeddedDriver").setUsername("user").setPassword("pass").setConnectionCheckTimeoutSeconds(120).build();
JdbcLookupOptions lookupOptions = JdbcLookupOptions.builder().setCacheMaxSize(-1).setCacheExpireMs(10_000).setMaxRetryTimes(3).build();
JdbcDynamicTableSource expectedSource = new JdbcDynamicTableSource(options, JdbcReadOptions.builder().build(), lookupOptions, SCHEMA.toPhysicalRowDataType());
assertEquals(expectedSource, actualSource);
// validation for sink
DynamicTableSink actualSink = createTableSink(SCHEMA, properties);
// default flush configurations
JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(100).withBatchIntervalMs(1000).withMaxRetries(3).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(SCHEMA.getColumnNames().toArray(new String[0])).withKeyFields("bbb", "aaa").build();
JdbcDynamicTableSink expectedSink = new JdbcDynamicTableSink(options, executionOptions, dmlOptions, SCHEMA.toPhysicalRowDataType());
assertEquals(expectedSink, actualSink);
}
use of org.apache.flink.connector.jdbc.JdbcExecutionOptions in project flink by apache.
the class JdbcDynamicTableFactoryTest method testJDBCSinkWithParallelism.
@Test
public void testJDBCSinkWithParallelism() {
Map<String, String> properties = getAllOptions();
properties.put("sink.parallelism", "2");
DynamicTableSink actual = createTableSink(SCHEMA, properties);
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").setParallelism(2).build();
JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(100).withBatchIntervalMs(1000).withMaxRetries(3).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(SCHEMA.getColumnNames().toArray(new String[0])).withKeyFields("bbb", "aaa").build();
JdbcDynamicTableSink expected = new JdbcDynamicTableSink(options, executionOptions, dmlOptions, SCHEMA.toPhysicalRowDataType());
assertEquals(expected, actual);
}
use of org.apache.flink.connector.jdbc.JdbcExecutionOptions in project flink by apache.
the class JdbcOutputFormatTest method testFlush.
@Test
public void testFlush() throws SQLException, IOException {
JdbcConnectorOptions jdbcOptions = JdbcConnectorOptions.builder().setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()).setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()).setTableName(OUTPUT_TABLE_2).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(jdbcOptions.getTableName()).withDialect(jdbcOptions.getDialect()).withFieldNames(fieldNames).build();
JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(3).build();
outputFormat = new JdbcOutputFormatBuilder().setJdbcOptions(jdbcOptions).setFieldDataTypes(fieldDataTypes).setJdbcDmlOptions(dmlOptions).setJdbcExecutionOptions(executionOptions).setRowDataTypeInfo(rowDataTypeInfo).build();
setRuntimeContext(outputFormat, true);
outputFormat.open(0, 1);
try (Connection dbConn = DriverManager.getConnection(DERBY_EBOOKSHOP_DB.getUrl());
PreparedStatement statement = dbConn.prepareStatement(SELECT_ALL_NEWBOOKS_2)) {
outputFormat.open(0, 1);
for (int i = 0; i < 2; ++i) {
outputFormat.writeRecord(buildGenericData(TEST_DATA[i].id, TEST_DATA[i].title, TEST_DATA[i].author, TEST_DATA[i].price, TEST_DATA[i].qty));
}
try (ResultSet resultSet = statement.executeQuery()) {
assertFalse(resultSet.next());
}
outputFormat.writeRecord(buildGenericData(TEST_DATA[2].id, TEST_DATA[2].title, TEST_DATA[2].author, TEST_DATA[2].price, TEST_DATA[2].qty));
try (ResultSet resultSet = statement.executeQuery()) {
int recordCount = 0;
while (resultSet.next()) {
assertEquals(TEST_DATA[recordCount].id, resultSet.getObject("id"));
assertEquals(TEST_DATA[recordCount].title, resultSet.getObject("title"));
assertEquals(TEST_DATA[recordCount].author, resultSet.getObject("author"));
assertEquals(TEST_DATA[recordCount].price, resultSet.getObject("price"));
assertEquals(TEST_DATA[recordCount].qty, resultSet.getObject("qty"));
recordCount++;
}
assertEquals(3, recordCount);
}
} finally {
outputFormat.close();
}
}
use of org.apache.flink.connector.jdbc.JdbcExecutionOptions in project flink by apache.
the class JdbcOutputFormatTest method testFlushWithBatchSizeEqualsZero.
@Test
public void testFlushWithBatchSizeEqualsZero() throws SQLException, IOException {
JdbcConnectorOptions jdbcOptions = JdbcConnectorOptions.builder().setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()).setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()).setTableName(OUTPUT_TABLE_2).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(jdbcOptions.getTableName()).withDialect(jdbcOptions.getDialect()).withFieldNames(fieldNames).build();
JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(0).build();
outputFormat = new JdbcOutputFormatBuilder().setJdbcOptions(jdbcOptions).setFieldDataTypes(fieldDataTypes).setJdbcDmlOptions(dmlOptions).setJdbcExecutionOptions(executionOptions).setRowDataTypeInfo(rowDataTypeInfo).build();
setRuntimeContext(outputFormat, true);
try (Connection dbConn = DriverManager.getConnection(DERBY_EBOOKSHOP_DB.getUrl());
PreparedStatement statement = dbConn.prepareStatement(SELECT_ALL_NEWBOOKS_2)) {
outputFormat.open(0, 1);
for (int i = 0; i < 2; ++i) {
outputFormat.writeRecord(buildGenericData(TEST_DATA[i].id, TEST_DATA[i].title, TEST_DATA[i].author, TEST_DATA[i].price, TEST_DATA[i].qty));
}
try (ResultSet resultSet = statement.executeQuery()) {
assertFalse(resultSet.next());
}
} finally {
outputFormat.close();
}
}
use of org.apache.flink.connector.jdbc.JdbcExecutionOptions in project flink by apache.
the class JdbcDynamicTableFactoryTest method testJdbcSinkProperties.
@Test
public void testJdbcSinkProperties() {
Map<String, String> properties = getAllOptions();
properties.put("sink.buffer-flush.max-rows", "1000");
properties.put("sink.buffer-flush.interval", "2min");
properties.put("sink.max-retries", "5");
DynamicTableSink actual = createTableSink(SCHEMA, properties);
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").build();
JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(1000).withBatchIntervalMs(120_000).withMaxRetries(5).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(SCHEMA.getColumnNames().toArray(new String[0])).withKeyFields("bbb", "aaa").build();
JdbcDynamicTableSink expected = new JdbcDynamicTableSink(options, executionOptions, dmlOptions, SCHEMA.toPhysicalRowDataType());
assertEquals(expected, actual);
}
Aggregations