Search in sources :

Example 1 with Stopwatch

use of io.debezium.util.Stopwatch in project debezium by debezium.

the class RecordsStreamProducerIT method shouldProcessLargerTx.

@Test
public void shouldProcessLargerTx() throws Exception {
    Testing.Print.disable();
    final int numberOfEvents = 1000;
    startConnector();
    waitForStreamingToStart();
    final String topicPrefix = "public.test_table";
    final String topicName = topicName(topicPrefix);
    final Stopwatch stopwatch = Stopwatch.reusable();
    consumer = testConsumer(numberOfEvents);
    // This is not accurate as we measure also including the data but
    // it is sufficient to confirm there is no large difference
    // in runtime between the cases
    stopwatch.start();
    executeAndWait(IntStream.rangeClosed(2, numberOfEvents + 1).boxed().map(x -> "INSERT INTO test_table (text) VALUES ('insert" + x + "')").collect(Collectors.joining(";")));
    stopwatch.stop();
    final long firstRun = stopwatch.durations().statistics().getTotal().toMillis();
    logger.info("Single tx duration = {} ms", firstRun);
    for (int i = 0; i < numberOfEvents; i++) {
        SourceRecord record = consumer.remove();
        assertEquals(topicName, record.topic());
        VerifyRecord.isValidInsert(record, PK_FIELD, i + 2);
    }
    consumer.expects(numberOfEvents);
    IntStream.rangeClosed(2, numberOfEvents + 1).forEach(x -> TestHelper.execute("INSERT INTO test_table (text) VALUES ('insert" + x + "')"));
    stopwatch.start();
    // There should be no significant difference between many TX runtime and single large TX
    // We still add generous limits as the runtime is in seconds and we cannot provide
    // a stable scheduling environment
    consumer.await(3 * firstRun, TimeUnit.MILLISECONDS);
    stopwatch.stop();
    for (int i = 0; i < numberOfEvents; i++) {
        SourceRecord record = consumer.remove();
        assertEquals(topicName, record.topic());
        VerifyRecord.isValidInsert(record, PK_FIELD, i + 1002);
    }
    logger.info("Many tx duration = {} ms", stopwatch.durations().statistics().getTotal().toMillis());
}
Also used : Stopwatch(io.debezium.util.Stopwatch) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Point(io.debezium.data.geometry.Point) Test(org.junit.Test)

Example 2 with Stopwatch

use of io.debezium.util.Stopwatch in project debezium by debezium.

the class KafkaClusterTest method shouldStartClusterAndAllowProducersAndConsumersToUseIt.

@Test
@SkipLongRunning
public void shouldStartClusterAndAllowProducersAndConsumersToUseIt() throws Exception {
    Testing.Debug.enable();
    final String topicName = "topicA";
    final CountDownLatch completion = new CountDownLatch(2);
    final int numMessages = 100;
    final AtomicLong messagesRead = new AtomicLong(0);
    // Start a cluster and create a topic ...
    cluster.addBrokers(1).startup();
    cluster.createTopics(topicName);
    // Consume messages asynchronously ...
    Stopwatch sw = Stopwatch.reusable().start();
    cluster.useTo().consumeIntegers(topicName, numMessages, 10, TimeUnit.SECONDS, completion::countDown, (key, value) -> {
        messagesRead.incrementAndGet();
        return true;
    });
    // Produce some messages asynchronously ...
    cluster.useTo().produceIntegers(topicName, numMessages, 1, completion::countDown);
    // Wait for both to complete ...
    if (completion.await(10, TimeUnit.SECONDS)) {
        sw.stop();
        Testing.debug("Both consumer and producer completed normally in " + sw.durations());
    } else {
        Testing.debug("Consumer and/or producer did not completed normally");
    }
    assertThat(messagesRead.get()).isEqualTo(numMessages);
}
Also used : AtomicLong(java.util.concurrent.atomic.AtomicLong) Stopwatch(io.debezium.util.Stopwatch) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test) SkipLongRunning(io.debezium.junit.SkipLongRunning)

Example 3 with Stopwatch

use of io.debezium.util.Stopwatch in project debezium by debezium.

the class AbstractMySqlConnectorOutputTest method waitForGtidSetsToMatch.

/**
 * Wait a maximum amount of time until the replica catches up with the master.
 *
 * @param master the configuration with the {@link MySqlConnectorConfig#HOSTNAME} and {@link MySqlConnectorConfig#PORT}
 *            configuration properties for the MySQL master; may not be null
 * @param replica the configuration with the {@link MySqlConnectorConfig#HOSTNAME} and {@link MySqlConnectorConfig#PORT}
 *            configuration properties for the MySQL replica; may not be null
 * @param timeout the maximum amount of time to wait
 * @param unit the time unit for the timeout
 * @see #waitForGtidSetsToMatch(Configuration, Configuration)
 */
protected static void waitForGtidSetsToMatch(Configuration master, Configuration replica, long timeout, TimeUnit unit) {
    GtidSet masterGtidSet = readAvailableGtidSet(master);
    if (masterGtidSet == null) {
        // no GTIDs ...
        return;
    }
    Stopwatch sw = Stopwatch.reusable().start();
    CountDownLatch latch = new CountDownLatch(1);
    Runnable runner = () -> {
        try {
            GtidSet replicaGtidSet = null;
            while (true) {
                Testing.debug("Checking replica's GTIDs and comparing to primary's...");
                replicaGtidSet = readAvailableGtidSet(replica);
                // The replica will have extra sources, so check whether the replica has everything in the master ...
                if (masterGtidSet.isContainedWithin(replicaGtidSet)) {
                    Testing.debug("Replica's GTIDs are caught up to the primary's.");
                    sw.stop();
                    return;
                }
                Testing.debug("Waiting for replica's GTIDs to catch up to primary's...");
                Thread.sleep(100);
            }
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        } finally {
            latch.countDown();
        }
    };
    // Start the thread to keep checking for the replica's GTID set ...
    Thread checker = new Thread(runner, "mysql-replica-watcher");
    checker.start();
    try {
        if (!latch.await(timeout, unit)) {
            // Timed out waiting for them to match ...
            checker.interrupt();
        }
        Testing.print("Waited a total of " + sw.durations().statistics().getTotalAsString() + " for the replica to catch up to the primary.");
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }
}
Also used : Stopwatch(io.debezium.util.Stopwatch) CountDownLatch(java.util.concurrent.CountDownLatch)

Example 4 with Stopwatch

use of io.debezium.util.Stopwatch in project debezium by debezium.

the class LogMinerStreamingChangeEventSource method execute.

/**
 * This is the loop to get changes from LogMiner
 *
 * @param context
 *         change event source context
 */
@Override
public void execute(ChangeEventSourceContext context, OraclePartition partition, OracleOffsetContext offsetContext) {
    if (!connectorConfig.getSnapshotMode().shouldStream()) {
        LOGGER.info("Streaming is not enabled in current configuration");
        return;
    }
    try {
        // We explicitly expect auto-commit to be disabled
        jdbcConnection.setAutoCommit(false);
        startScn = offsetContext.getScn();
        snapshotScn = offsetContext.getSnapshotScn();
        Scn firstScn = getFirstScnInLogs(jdbcConnection);
        if (startScn.compareTo(snapshotScn) == 0) {
            // This is the initial run of the streaming change event source.
            // We need to compute the correct start offset for mining. That is not the snapshot offset,
            // but the start offset of the oldest transaction that was still pending when the snapshot
            // was taken.
            computeStartScnForFirstMiningSession(offsetContext, firstScn);
        }
        try (LogWriterFlushStrategy flushStrategy = resolveFlushStrategy()) {
            if (!isContinuousMining && startScn.compareTo(firstScn.subtract(Scn.ONE)) < 0) {
                // startScn is the exclusive lower bound, so must be >= (firstScn - 1)
                throw new DebeziumException("Online REDO LOG files or archive log files do not contain the offset scn " + startScn + ".  Please perform a new snapshot.");
            }
            setNlsSessionParameters(jdbcConnection);
            checkDatabaseAndTableState(jdbcConnection, connectorConfig.getPdbName(), schema);
            try (LogMinerEventProcessor processor = createProcessor(context, partition, offsetContext)) {
                if (archiveLogOnlyMode && !waitForStartScnInArchiveLogs(context, startScn)) {
                    return;
                }
                initializeRedoLogsForMining(jdbcConnection, false, startScn);
                int retryAttempts = 1;
                Stopwatch sw = Stopwatch.accumulating().start();
                while (context.isRunning()) {
                    // Calculate time difference before each mining session to detect time zone offset changes (e.g. DST) on database server
                    streamingMetrics.calculateTimeDifference(getDatabaseSystemTime(jdbcConnection));
                    if (archiveLogOnlyMode && !waitForStartScnInArchiveLogs(context, startScn)) {
                        break;
                    }
                    Instant start = Instant.now();
                    endScn = calculateEndScn(jdbcConnection, startScn, endScn);
                    // case we want to pause and restart the loop waiting for a new archive log before proceeding.
                    if (archiveLogOnlyMode && startScn.equals(endScn)) {
                        pauseBetweenMiningSessions();
                        continue;
                    }
                    flushStrategy.flush(jdbcConnection.getCurrentScn());
                    boolean restartRequired = false;
                    if (connectorConfig.getLogMiningMaximumSession().isPresent()) {
                        final Duration totalDuration = sw.stop().durations().statistics().getTotal();
                        if (totalDuration.toMillis() >= connectorConfig.getLogMiningMaximumSession().get().toMillis()) {
                            LOGGER.info("LogMiner session has exceeded maximum session time of '{}', forcing restart.", connectorConfig.getLogMiningMaximumSession());
                            restartRequired = true;
                        } else {
                            // resume the existing stop watch, we haven't met the criteria yet
                            sw.start();
                        }
                    }
                    if (restartRequired || hasLogSwitchOccurred()) {
                        // This is the way to mitigate PGA leaks.
                        // With one mining session, it grows and maybe there is another way to flush PGA.
                        // At this point we use a new mining session
                        endMiningSession(jdbcConnection, offsetContext);
                        initializeRedoLogsForMining(jdbcConnection, true, startScn);
                        // log switch or restart required, re-create a new stop watch
                        sw = Stopwatch.accumulating().start();
                    }
                    if (context.isRunning()) {
                        if (!startMiningSession(jdbcConnection, startScn, endScn, retryAttempts)) {
                            retryAttempts++;
                        } else {
                            retryAttempts = 1;
                            startScn = processor.process(startScn, endScn);
                            streamingMetrics.setCurrentBatchProcessingTime(Duration.between(start, Instant.now()));
                            captureSessionMemoryStatistics(jdbcConnection);
                        }
                        pauseBetweenMiningSessions();
                    }
                }
            }
        }
    } catch (Throwable t) {
        LOGGER.error("Mining session stopped due to error.", t);
        streamingMetrics.incrementErrorCount();
        errorHandler.setProducerThrowable(t);
    } finally {
        LOGGER.info("startScn={}, endScn={}", startScn, endScn);
        LOGGER.info("Streaming metrics dump: {}", streamingMetrics.toString());
        LOGGER.info("Offsets: {}", offsetContext);
    }
}
Also used : DebeziumException(io.debezium.DebeziumException) LogMinerEventProcessor(io.debezium.connector.oracle.logminer.processor.LogMinerEventProcessor) Instant(java.time.Instant) Stopwatch(io.debezium.util.Stopwatch) LogWriterFlushStrategy(io.debezium.connector.oracle.logminer.logwriter.LogWriterFlushStrategy) CommitLogWriterFlushStrategy(io.debezium.connector.oracle.logminer.logwriter.CommitLogWriterFlushStrategy) RacCommitLogWriterFlushStrategy(io.debezium.connector.oracle.logminer.logwriter.RacCommitLogWriterFlushStrategy) Duration(java.time.Duration) Scn(io.debezium.connector.oracle.Scn)

Example 5 with Stopwatch

use of io.debezium.util.Stopwatch in project debezium by debezium.

the class KafkaClusterTest method shouldStartClusterAndAllowAsynchronousProductionAndAutomaticConsumersToUseIt.

@Test
@SkipLongRunning
public void shouldStartClusterAndAllowAsynchronousProductionAndAutomaticConsumersToUseIt() throws Exception {
    Testing.Debug.enable();
    final String topicName = "topicA";
    final CountDownLatch completion = new CountDownLatch(2);
    final int numMessages = 3;
    final AtomicLong messagesRead = new AtomicLong(0);
    // Start a cluster and create a topic ...
    cluster.addBrokers(1).startup();
    cluster.createTopics(topicName);
    // Consume messages asynchronously ...
    Stopwatch sw = Stopwatch.reusable().start();
    cluster.useTo().consumeIntegers(topicName, numMessages, 10, TimeUnit.SECONDS, completion::countDown, (key, value) -> {
        messagesRead.incrementAndGet();
        return true;
    });
    // Produce some messages interactively ...
    cluster.useTo().produce("manual", new StringSerializer(), new IntegerSerializer(), produer -> {
        produer.write(topicName, "key1", 1);
        produer.write(topicName, "key2", 2);
        produer.write(topicName, "key3", 3);
        completion.countDown();
    });
    // Wait for the consumer to to complete ...
    if (completion.await(10, TimeUnit.SECONDS)) {
        sw.stop();
        Testing.debug("The consumer completed normally in " + sw.durations());
    } else {
        Testing.debug("Consumer did not completed normally");
    }
    assertThat(messagesRead.get()).isEqualTo(numMessages);
}
Also used : AtomicLong(java.util.concurrent.atomic.AtomicLong) Stopwatch(io.debezium.util.Stopwatch) CountDownLatch(java.util.concurrent.CountDownLatch) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) Test(org.junit.Test) SkipLongRunning(io.debezium.junit.SkipLongRunning)

Aggregations

Stopwatch (io.debezium.util.Stopwatch)6 CountDownLatch (java.util.concurrent.CountDownLatch)4 Test (org.junit.Test)4 AtomicLong (java.util.concurrent.atomic.AtomicLong)3 SkipLongRunning (io.debezium.junit.SkipLongRunning)2 IntegerSerializer (org.apache.kafka.common.serialization.IntegerSerializer)2 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)2 DebeziumException (io.debezium.DebeziumException)1 Scn (io.debezium.connector.oracle.Scn)1 CommitLogWriterFlushStrategy (io.debezium.connector.oracle.logminer.logwriter.CommitLogWriterFlushStrategy)1 LogWriterFlushStrategy (io.debezium.connector.oracle.logminer.logwriter.LogWriterFlushStrategy)1 RacCommitLogWriterFlushStrategy (io.debezium.connector.oracle.logminer.logwriter.RacCommitLogWriterFlushStrategy)1 LogMinerEventProcessor (io.debezium.connector.oracle.logminer.processor.LogMinerEventProcessor)1 Point (io.debezium.data.geometry.Point)1 Duration (java.time.Duration)1 Instant (java.time.Instant)1 SourceRecord (org.apache.kafka.connect.source.SourceRecord)1