use of org.apache.flink.connector.jdbc.internal.executor.JdbcBatchStatementExecutor in project flink by apache.
the class JdbcOutputFormatBuilder method createBufferReduceExecutor.
private static JdbcBatchStatementExecutor<RowData> createBufferReduceExecutor(JdbcDmlOptions opt, RuntimeContext ctx, TypeInformation<RowData> rowDataTypeInfo, LogicalType[] fieldTypes) {
checkArgument(opt.getKeyFields().isPresent());
JdbcDialect dialect = opt.getDialect();
String tableName = opt.getTableName();
String[] pkNames = opt.getKeyFields().get();
int[] pkFields = Arrays.stream(pkNames).mapToInt(Arrays.asList(opt.getFieldNames())::indexOf).toArray();
LogicalType[] pkTypes = Arrays.stream(pkFields).mapToObj(f -> fieldTypes[f]).toArray(LogicalType[]::new);
final TypeSerializer<RowData> typeSerializer = rowDataTypeInfo.createSerializer(ctx.getExecutionConfig());
final Function<RowData, RowData> valueTransform = ctx.getExecutionConfig().isObjectReuseEnabled() ? typeSerializer::copy : Function.identity();
return new TableBufferReducedStatementExecutor(createUpsertRowExecutor(dialect, tableName, opt.getFieldNames(), fieldTypes, pkFields, pkNames, pkTypes), createDeleteExecutor(dialect, tableName, pkNames, pkTypes), createRowKeyExtractor(fieldTypes, pkFields), valueTransform);
}
use of org.apache.flink.connector.jdbc.internal.executor.JdbcBatchStatementExecutor in project flink by apache.
the class JdbcTableOutputFormatTest method testDeleteExecutorUpdatedOnReconnect.
/**
* Test that the delete executor in {@link TableJdbcUpsertOutputFormat} is updated when {@link
* JdbcOutputFormat#attemptFlush()} fails.
*/
@Test
public void testDeleteExecutorUpdatedOnReconnect() throws Exception {
// first fail flush from the main executor
boolean[] exceptionThrown = { false };
// then record whether the delete executor was updated
// and check it on the next flush attempt
boolean[] deleteExecutorPrepared = { false };
boolean[] deleteExecuted = { false };
format = new TableJdbcUpsertOutputFormat(new SimpleJdbcConnectionProvider(JdbcConnectorOptions.builder().setDBUrl(getDbMetadata().getUrl()).setTableName(OUTPUT_TABLE).build()) {
@Override
public boolean isConnectionValid() throws SQLException {
// trigger reconnect and re-prepare on flush failure
return false;
}
}, JdbcExecutionOptions.builder().withMaxRetries(1).withBatchIntervalMs(// disable periodic flush
Long.MAX_VALUE).build(), ctx -> new JdbcBatchStatementExecutor<Row>() {
@Override
public void executeBatch() throws SQLException {
if (!exceptionThrown[0]) {
exceptionThrown[0] = true;
throw new SQLException();
}
}
@Override
public void prepareStatements(Connection connection) {
}
@Override
public void addToBatch(Row record) {
}
@Override
public void closeStatements() {
}
}, ctx -> new JdbcBatchStatementExecutor<Row>() {
@Override
public void prepareStatements(Connection connection) {
if (exceptionThrown[0]) {
deleteExecutorPrepared[0] = true;
}
}
@Override
public void addToBatch(Row record) {
}
@Override
public void executeBatch() {
deleteExecuted[0] = true;
}
@Override
public void closeStatements() {
}
});
RuntimeContext context = Mockito.mock(RuntimeContext.class);
ExecutionConfig config = Mockito.mock(ExecutionConfig.class);
doReturn(config).when(context).getExecutionConfig();
doReturn(true).when(config).isObjectReuseEnabled();
format.setRuntimeContext(context);
format.open(0, 1);
format.writeRecord(Tuple2.of(false, /* false = delete*/
toRow(TEST_DATA[0])));
format.flush();
assertTrue("Delete should be executed", deleteExecuted[0]);
assertTrue("Delete executor should be prepared" + exceptionThrown[0], deleteExecutorPrepared[0]);
}
Aggregations