use of org.apache.flink.connector.testutils.source.reader.TestingReaderOutput in project flink by splunk.
the class PulsarSourceReaderTestBase method assigningEmptySplits.
@TestTemplate
void assigningEmptySplits(PulsarSourceReaderBase<Integer> reader, Boundedness boundedness, String topicName) throws Exception {
final PulsarPartitionSplit emptySplit = createPartitionSplit(topicName, 0, Boundedness.CONTINUOUS_UNBOUNDED, MessageId.latest);
reader.addSplits(Collections.singletonList(emptySplit));
TestingReaderOutput<Integer> output = new TestingReaderOutput<>();
InputStatus status = reader.pollNext(output);
assertThat(status).isEqualTo(InputStatus.NOTHING_AVAILABLE);
reader.close();
}
use of org.apache.flink.connector.testutils.source.reader.TestingReaderOutput in project flink-cdc-connectors by ververica.
the class MySqlSourceReaderTest method testNoDuplicateRecordsWhenKeepUpdating.
@Test
public void testNoDuplicateRecordsWhenKeepUpdating() throws Exception {
inventoryDatabase.createAndInitialize();
String tableName = inventoryDatabase.getDatabaseName() + ".products";
// use default split size which is large to make sure we only have one snapshot split
final MySqlSourceConfig sourceConfig = new MySqlSourceConfigFactory().startupOptions(StartupOptions.initial()).databaseList(inventoryDatabase.getDatabaseName()).tableList(tableName).includeSchemaChanges(false).hostname(MYSQL_CONTAINER.getHost()).port(MYSQL_CONTAINER.getDatabasePort()).username(customerDatabase.getUsername()).password(customerDatabase.getPassword()).serverTimeZone(ZoneId.of("UTC").toString()).createConfig(0);
final MySqlSnapshotSplitAssigner assigner = new MySqlSnapshotSplitAssigner(sourceConfig, DEFAULT_PARALLELISM, Collections.singletonList(TableId.parse(tableName)), false);
assigner.open();
MySqlSnapshotSplit snapshotSplit = (MySqlSnapshotSplit) assigner.getNext().get();
// should contain only one split
assertFalse(assigner.getNext().isPresent());
// and the split is a full range one
assertNull(snapshotSplit.getSplitStart());
assertNull(snapshotSplit.getSplitEnd());
final AtomicBoolean finishReading = new AtomicBoolean(false);
final CountDownLatch updatingExecuted = new CountDownLatch(1);
TestingReaderContext testingReaderContext = new TestingReaderContext();
MySqlSourceReader<SourceRecord> reader = createReader(sourceConfig, testingReaderContext);
reader.start();
Thread updateWorker = new Thread(() -> {
try (Connection connection = inventoryDatabase.getJdbcConnection();
Statement statement = connection.createStatement()) {
boolean flagSet = false;
while (!finishReading.get()) {
statement.execute("UPDATE products SET description='" + UUID.randomUUID().toString() + "' WHERE id=101");
if (!flagSet) {
updatingExecuted.countDown();
flagSet = true;
}
}
} catch (Exception throwables) {
throwables.printStackTrace();
}
});
// start to keep updating the products table
updateWorker.start();
// wait until the updating executed
updatingExecuted.await();
// start to read chunks of the products table
reader.addSplits(Collections.singletonList(snapshotSplit));
reader.notifyNoMoreSplits();
TestingReaderOutput<SourceRecord> output = new TestingReaderOutput<>();
while (true) {
InputStatus status = reader.pollNext(output);
if (status == InputStatus.END_OF_INPUT) {
break;
}
if (status == InputStatus.NOTHING_AVAILABLE) {
reader.isAvailable().get();
}
}
// stop the updating worker
finishReading.set(true);
updateWorker.join();
// check the result
ArrayList<SourceRecord> emittedRecords = output.getEmittedRecords();
Map<Object, SourceRecord> recordByKey = new HashMap<>();
for (SourceRecord record : emittedRecords) {
SourceRecord existed = recordByKey.get(record.key());
if (existed != null) {
fail(String.format("The emitted record contains duplicate records on key\n%s\n%s\n", existed, record));
} else {
recordByKey.put(record.key(), record);
}
}
}
Aggregations