use of io.debezium.util.Stopwatch in project debezium by debezium.
the class RecordsStreamProducerIT method shouldProcessLargerTx.
@Test
public void shouldProcessLargerTx() throws Exception {
Testing.Print.disable();
final int numberOfEvents = 1000;
startConnector();
waitForStreamingToStart();
final String topicPrefix = "public.test_table";
final String topicName = topicName(topicPrefix);
final Stopwatch stopwatch = Stopwatch.reusable();
consumer = testConsumer(numberOfEvents);
// This is not accurate as we measure also including the data but
// it is sufficient to confirm there is no large difference
// in runtime between the cases
stopwatch.start();
executeAndWait(IntStream.rangeClosed(2, numberOfEvents + 1).boxed().map(x -> "INSERT INTO test_table (text) VALUES ('insert" + x + "')").collect(Collectors.joining(";")));
stopwatch.stop();
final long firstRun = stopwatch.durations().statistics().getTotal().toMillis();
logger.info("Single tx duration = {} ms", firstRun);
for (int i = 0; i < numberOfEvents; i++) {
SourceRecord record = consumer.remove();
assertEquals(topicName, record.topic());
VerifyRecord.isValidInsert(record, PK_FIELD, i + 2);
}
consumer.expects(numberOfEvents);
IntStream.rangeClosed(2, numberOfEvents + 1).forEach(x -> TestHelper.execute("INSERT INTO test_table (text) VALUES ('insert" + x + "')"));
stopwatch.start();
// There should be no significant difference between many TX runtime and single large TX
// We still add generous limits as the runtime is in seconds and we cannot provide
// a stable scheduling environment
consumer.await(3 * firstRun, TimeUnit.MILLISECONDS);
stopwatch.stop();
for (int i = 0; i < numberOfEvents; i++) {
SourceRecord record = consumer.remove();
assertEquals(topicName, record.topic());
VerifyRecord.isValidInsert(record, PK_FIELD, i + 1002);
}
logger.info("Many tx duration = {} ms", stopwatch.durations().statistics().getTotal().toMillis());
}
use of io.debezium.util.Stopwatch in project debezium by debezium.
the class KafkaClusterTest method shouldStartClusterAndAllowProducersAndConsumersToUseIt.
@Test
@SkipLongRunning
public void shouldStartClusterAndAllowProducersAndConsumersToUseIt() throws Exception {
Testing.Debug.enable();
final String topicName = "topicA";
final CountDownLatch completion = new CountDownLatch(2);
final int numMessages = 100;
final AtomicLong messagesRead = new AtomicLong(0);
// Start a cluster and create a topic ...
cluster.addBrokers(1).startup();
cluster.createTopics(topicName);
// Consume messages asynchronously ...
Stopwatch sw = Stopwatch.reusable().start();
cluster.useTo().consumeIntegers(topicName, numMessages, 10, TimeUnit.SECONDS, completion::countDown, (key, value) -> {
messagesRead.incrementAndGet();
return true;
});
// Produce some messages asynchronously ...
cluster.useTo().produceIntegers(topicName, numMessages, 1, completion::countDown);
// Wait for both to complete ...
if (completion.await(10, TimeUnit.SECONDS)) {
sw.stop();
Testing.debug("Both consumer and producer completed normally in " + sw.durations());
} else {
Testing.debug("Consumer and/or producer did not completed normally");
}
assertThat(messagesRead.get()).isEqualTo(numMessages);
}
use of io.debezium.util.Stopwatch in project debezium by debezium.
the class AbstractMySqlConnectorOutputTest method waitForGtidSetsToMatch.
/**
* Wait a maximum amount of time until the replica catches up with the master.
*
* @param master the configuration with the {@link MySqlConnectorConfig#HOSTNAME} and {@link MySqlConnectorConfig#PORT}
* configuration properties for the MySQL master; may not be null
* @param replica the configuration with the {@link MySqlConnectorConfig#HOSTNAME} and {@link MySqlConnectorConfig#PORT}
* configuration properties for the MySQL replica; may not be null
* @param timeout the maximum amount of time to wait
* @param unit the time unit for the timeout
* @see #waitForGtidSetsToMatch(Configuration, Configuration)
*/
protected static void waitForGtidSetsToMatch(Configuration master, Configuration replica, long timeout, TimeUnit unit) {
GtidSet masterGtidSet = readAvailableGtidSet(master);
if (masterGtidSet == null) {
// no GTIDs ...
return;
}
Stopwatch sw = Stopwatch.reusable().start();
CountDownLatch latch = new CountDownLatch(1);
Runnable runner = () -> {
try {
GtidSet replicaGtidSet = null;
while (true) {
Testing.debug("Checking replica's GTIDs and comparing to primary's...");
replicaGtidSet = readAvailableGtidSet(replica);
// The replica will have extra sources, so check whether the replica has everything in the master ...
if (masterGtidSet.isContainedWithin(replicaGtidSet)) {
Testing.debug("Replica's GTIDs are caught up to the primary's.");
sw.stop();
return;
}
Testing.debug("Waiting for replica's GTIDs to catch up to primary's...");
Thread.sleep(100);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} finally {
latch.countDown();
}
};
// Start the thread to keep checking for the replica's GTID set ...
Thread checker = new Thread(runner, "mysql-replica-watcher");
checker.start();
try {
if (!latch.await(timeout, unit)) {
// Timed out waiting for them to match ...
checker.interrupt();
}
Testing.print("Waited a total of " + sw.durations().statistics().getTotalAsString() + " for the replica to catch up to the primary.");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
use of io.debezium.util.Stopwatch in project debezium by debezium.
the class LogMinerStreamingChangeEventSource method execute.
/**
* This is the loop to get changes from LogMiner
*
* @param context
* change event source context
*/
@Override
public void execute(ChangeEventSourceContext context, OraclePartition partition, OracleOffsetContext offsetContext) {
if (!connectorConfig.getSnapshotMode().shouldStream()) {
LOGGER.info("Streaming is not enabled in current configuration");
return;
}
try {
// We explicitly expect auto-commit to be disabled
jdbcConnection.setAutoCommit(false);
startScn = offsetContext.getScn();
snapshotScn = offsetContext.getSnapshotScn();
Scn firstScn = getFirstScnInLogs(jdbcConnection);
if (startScn.compareTo(snapshotScn) == 0) {
// This is the initial run of the streaming change event source.
// We need to compute the correct start offset for mining. That is not the snapshot offset,
// but the start offset of the oldest transaction that was still pending when the snapshot
// was taken.
computeStartScnForFirstMiningSession(offsetContext, firstScn);
}
try (LogWriterFlushStrategy flushStrategy = resolveFlushStrategy()) {
if (!isContinuousMining && startScn.compareTo(firstScn.subtract(Scn.ONE)) < 0) {
// startScn is the exclusive lower bound, so must be >= (firstScn - 1)
throw new DebeziumException("Online REDO LOG files or archive log files do not contain the offset scn " + startScn + ". Please perform a new snapshot.");
}
setNlsSessionParameters(jdbcConnection);
checkDatabaseAndTableState(jdbcConnection, connectorConfig.getPdbName(), schema);
try (LogMinerEventProcessor processor = createProcessor(context, partition, offsetContext)) {
if (archiveLogOnlyMode && !waitForStartScnInArchiveLogs(context, startScn)) {
return;
}
initializeRedoLogsForMining(jdbcConnection, false, startScn);
int retryAttempts = 1;
Stopwatch sw = Stopwatch.accumulating().start();
while (context.isRunning()) {
// Calculate time difference before each mining session to detect time zone offset changes (e.g. DST) on database server
streamingMetrics.calculateTimeDifference(getDatabaseSystemTime(jdbcConnection));
if (archiveLogOnlyMode && !waitForStartScnInArchiveLogs(context, startScn)) {
break;
}
Instant start = Instant.now();
endScn = calculateEndScn(jdbcConnection, startScn, endScn);
// case we want to pause and restart the loop waiting for a new archive log before proceeding.
if (archiveLogOnlyMode && startScn.equals(endScn)) {
pauseBetweenMiningSessions();
continue;
}
flushStrategy.flush(jdbcConnection.getCurrentScn());
boolean restartRequired = false;
if (connectorConfig.getLogMiningMaximumSession().isPresent()) {
final Duration totalDuration = sw.stop().durations().statistics().getTotal();
if (totalDuration.toMillis() >= connectorConfig.getLogMiningMaximumSession().get().toMillis()) {
LOGGER.info("LogMiner session has exceeded maximum session time of '{}', forcing restart.", connectorConfig.getLogMiningMaximumSession());
restartRequired = true;
} else {
// resume the existing stop watch, we haven't met the criteria yet
sw.start();
}
}
if (restartRequired || hasLogSwitchOccurred()) {
// This is the way to mitigate PGA leaks.
// With one mining session, it grows and maybe there is another way to flush PGA.
// At this point we use a new mining session
endMiningSession(jdbcConnection, offsetContext);
initializeRedoLogsForMining(jdbcConnection, true, startScn);
// log switch or restart required, re-create a new stop watch
sw = Stopwatch.accumulating().start();
}
if (context.isRunning()) {
if (!startMiningSession(jdbcConnection, startScn, endScn, retryAttempts)) {
retryAttempts++;
} else {
retryAttempts = 1;
startScn = processor.process(startScn, endScn);
streamingMetrics.setCurrentBatchProcessingTime(Duration.between(start, Instant.now()));
captureSessionMemoryStatistics(jdbcConnection);
}
pauseBetweenMiningSessions();
}
}
}
}
} catch (Throwable t) {
LOGGER.error("Mining session stopped due to error.", t);
streamingMetrics.incrementErrorCount();
errorHandler.setProducerThrowable(t);
} finally {
LOGGER.info("startScn={}, endScn={}", startScn, endScn);
LOGGER.info("Streaming metrics dump: {}", streamingMetrics.toString());
LOGGER.info("Offsets: {}", offsetContext);
}
}
use of io.debezium.util.Stopwatch in project debezium by debezium.
the class KafkaClusterTest method shouldStartClusterAndAllowAsynchronousProductionAndAutomaticConsumersToUseIt.
@Test
@SkipLongRunning
public void shouldStartClusterAndAllowAsynchronousProductionAndAutomaticConsumersToUseIt() throws Exception {
Testing.Debug.enable();
final String topicName = "topicA";
final CountDownLatch completion = new CountDownLatch(2);
final int numMessages = 3;
final AtomicLong messagesRead = new AtomicLong(0);
// Start a cluster and create a topic ...
cluster.addBrokers(1).startup();
cluster.createTopics(topicName);
// Consume messages asynchronously ...
Stopwatch sw = Stopwatch.reusable().start();
cluster.useTo().consumeIntegers(topicName, numMessages, 10, TimeUnit.SECONDS, completion::countDown, (key, value) -> {
messagesRead.incrementAndGet();
return true;
});
// Produce some messages interactively ...
cluster.useTo().produce("manual", new StringSerializer(), new IntegerSerializer(), produer -> {
produer.write(topicName, "key1", 1);
produer.write(topicName, "key2", 2);
produer.write(topicName, "key3", 3);
completion.countDown();
});
// Wait for the consumer to to complete ...
if (completion.await(10, TimeUnit.SECONDS)) {
sw.stop();
Testing.debug("The consumer completed normally in " + sw.durations());
} else {
Testing.debug("Consumer did not completed normally");
}
assertThat(messagesRead.get()).isEqualTo(numMessages);
}
Aggregations