use of org.apache.flink.connector.jdbc.internal.connection.SimpleJdbcConnectionProvider in project flink by apache.
the class JdbcTableOutputFormatTest method testJdbcOutputFormat.
@Test
public void testJdbcOutputFormat() throws Exception {
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl(getDbMetadata().getUrl()).setTableName(OUTPUT_TABLE).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(fieldNames).withKeyFields(keyFields).build();
format = new TableJdbcUpsertOutputFormat(new SimpleJdbcConnectionProvider(options), dmlOptions, JdbcExecutionOptions.defaults());
RuntimeContext context = Mockito.mock(RuntimeContext.class);
ExecutionConfig config = Mockito.mock(ExecutionConfig.class);
doReturn(config).when(context).getExecutionConfig();
doReturn(true).when(config).isObjectReuseEnabled();
format.setRuntimeContext(context);
format.open(0, 1);
for (TestEntry entry : TEST_DATA) {
format.writeRecord(Tuple2.of(true, toRow(entry)));
}
format.flush();
check(Arrays.stream(TEST_DATA).map(JdbcDataTestBase::toRow).toArray(Row[]::new));
// override
for (TestEntry entry : TEST_DATA) {
format.writeRecord(Tuple2.of(true, toRow(entry)));
}
format.flush();
check(Arrays.stream(TEST_DATA).map(JdbcDataTestBase::toRow).toArray(Row[]::new));
// delete
for (int i = 0; i < TEST_DATA.length / 2; i++) {
format.writeRecord(Tuple2.of(false, toRow(TEST_DATA[i])));
}
Row[] expected = new Row[TEST_DATA.length - TEST_DATA.length / 2];
for (int i = TEST_DATA.length / 2; i < TEST_DATA.length; i++) {
expected[i - TEST_DATA.length / 2] = toRow(TEST_DATA[i]);
}
format.flush();
check(expected);
}
use of org.apache.flink.connector.jdbc.internal.connection.SimpleJdbcConnectionProvider in project flink by apache.
the class JdbcTableOutputFormatTest method testUpsertFormatCloseBeforeOpen.
@Test
public void testUpsertFormatCloseBeforeOpen() throws Exception {
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl(getDbMetadata().getUrl()).setTableName(OUTPUT_TABLE).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(fieldNames).withKeyFields(keyFields).build();
format = new TableJdbcUpsertOutputFormat(new SimpleJdbcConnectionProvider(options), dmlOptions, JdbcExecutionOptions.defaults());
// FLINK-17544: There should be no NPE thrown from this method
format.close();
}
use of org.apache.flink.connector.jdbc.internal.connection.SimpleJdbcConnectionProvider in project flink by apache.
the class JdbcTableOutputFormatTest method testDeleteExecutorUpdatedOnReconnect.
/**
* Test that the delete executor in {@link TableJdbcUpsertOutputFormat} is updated when {@link
* JdbcOutputFormat#attemptFlush()} fails.
*/
@Test
public void testDeleteExecutorUpdatedOnReconnect() throws Exception {
// first fail flush from the main executor
boolean[] exceptionThrown = { false };
// then record whether the delete executor was updated
// and check it on the next flush attempt
boolean[] deleteExecutorPrepared = { false };
boolean[] deleteExecuted = { false };
format = new TableJdbcUpsertOutputFormat(new SimpleJdbcConnectionProvider(JdbcConnectorOptions.builder().setDBUrl(getDbMetadata().getUrl()).setTableName(OUTPUT_TABLE).build()) {
@Override
public boolean isConnectionValid() throws SQLException {
// trigger reconnect and re-prepare on flush failure
return false;
}
}, JdbcExecutionOptions.builder().withMaxRetries(1).withBatchIntervalMs(// disable periodic flush
Long.MAX_VALUE).build(), ctx -> new JdbcBatchStatementExecutor<Row>() {
@Override
public void executeBatch() throws SQLException {
if (!exceptionThrown[0]) {
exceptionThrown[0] = true;
throw new SQLException();
}
}
@Override
public void prepareStatements(Connection connection) {
}
@Override
public void addToBatch(Row record) {
}
@Override
public void closeStatements() {
}
}, ctx -> new JdbcBatchStatementExecutor<Row>() {
@Override
public void prepareStatements(Connection connection) {
if (exceptionThrown[0]) {
deleteExecutorPrepared[0] = true;
}
}
@Override
public void addToBatch(Row record) {
}
@Override
public void executeBatch() {
deleteExecuted[0] = true;
}
@Override
public void closeStatements() {
}
});
RuntimeContext context = Mockito.mock(RuntimeContext.class);
ExecutionConfig config = Mockito.mock(ExecutionConfig.class);
doReturn(config).when(context).getExecutionConfig();
doReturn(true).when(config).isObjectReuseEnabled();
format.setRuntimeContext(context);
format.open(0, 1);
format.writeRecord(Tuple2.of(false, /* false = delete*/
toRow(TEST_DATA[0])));
format.flush();
assertTrue("Delete should be executed", deleteExecuted[0]);
assertTrue("Delete executor should be prepared" + exceptionThrown[0], deleteExecutorPrepared[0]);
}
use of org.apache.flink.connector.jdbc.internal.connection.SimpleJdbcConnectionProvider in project flink by apache.
the class JdbcFullTest method runTest.
private void runTest(boolean exploitParallelism) throws Exception {
ExecutionEnvironment environment = ExecutionEnvironment.getExecutionEnvironment();
JdbcInputFormat.JdbcInputFormatBuilder inputBuilder = JdbcInputFormat.buildJdbcInputFormat().setDrivername(getDbMetadata().getDriverClass()).setDBUrl(getDbMetadata().getUrl()).setQuery(SELECT_ALL_BOOKS).setRowTypeInfo(ROW_TYPE_INFO);
if (exploitParallelism) {
final int fetchSize = 1;
final long min = TEST_DATA[0].id;
final long max = TEST_DATA[TEST_DATA.length - fetchSize].id;
// use a "splittable" query to exploit parallelism
inputBuilder = inputBuilder.setQuery(SELECT_ALL_BOOKS_SPLIT_BY_ID).setParametersProvider(new JdbcNumericBetweenParametersProvider(min, max).ofBatchSize(fetchSize));
}
DataSet<Row> source = environment.createInput(inputBuilder.finish());
// NOTE: in this case (with Derby driver) setSqlTypes could be skipped, but
// some databases don't null values correctly when no column type was specified
// in PreparedStatement.setObject (see its javadoc for more details)
JdbcConnectionOptions connectionOptions = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder().withUrl(getDbMetadata().getUrl()).withDriverName(getDbMetadata().getDriverClass()).build();
JdbcOutputFormat jdbcOutputFormat = new JdbcOutputFormat<>(new SimpleJdbcConnectionProvider(connectionOptions), JdbcExecutionOptions.defaults(), ctx -> createSimpleRowExecutor(String.format(INSERT_TEMPLATE, OUTPUT_TABLE), new int[] { Types.INTEGER, Types.VARCHAR, Types.VARCHAR, Types.DOUBLE, Types.INTEGER }, ctx.getExecutionConfig().isObjectReuseEnabled()), JdbcOutputFormat.RecordExtractor.identity());
source.output(jdbcOutputFormat);
environment.execute();
try (Connection dbConn = DriverManager.getConnection(getDbMetadata().getUrl());
PreparedStatement statement = dbConn.prepareStatement(SELECT_ALL_NEWBOOKS);
ResultSet resultSet = statement.executeQuery()) {
int count = 0;
while (resultSet.next()) {
count++;
}
Assert.assertEquals(TEST_DATA.length, count);
}
}
use of org.apache.flink.connector.jdbc.internal.connection.SimpleJdbcConnectionProvider in project flink by apache.
the class JdbcOutputFormatBuilder method build.
public JdbcOutputFormat<RowData, ?, ?> build() {
checkNotNull(jdbcOptions, "jdbc options can not be null");
checkNotNull(dmlOptions, "jdbc dml options can not be null");
checkNotNull(executionOptions, "jdbc execution options can not be null");
final LogicalType[] logicalTypes = Arrays.stream(fieldDataTypes).map(DataType::getLogicalType).toArray(LogicalType[]::new);
if (dmlOptions.getKeyFields().isPresent() && dmlOptions.getKeyFields().get().length > 0) {
// upsert query
return new JdbcOutputFormat<>(new SimpleJdbcConnectionProvider(jdbcOptions), executionOptions, ctx -> createBufferReduceExecutor(dmlOptions, ctx, rowDataTypeInformation, logicalTypes), JdbcOutputFormat.RecordExtractor.identity());
} else {
// append only query
final String sql = dmlOptions.getDialect().getInsertIntoStatement(dmlOptions.getTableName(), dmlOptions.getFieldNames());
return new JdbcOutputFormat<>(new SimpleJdbcConnectionProvider(jdbcOptions), executionOptions, ctx -> createSimpleBufferedExecutor(ctx, dmlOptions.getDialect(), dmlOptions.getFieldNames(), logicalTypes, sql, rowDataTypeInformation), JdbcOutputFormat.RecordExtractor.identity());
}
}
Aggregations