Search in sources :

Example 11 with OffsetCommitCallback

use of org.apache.kafka.clients.consumer.OffsetCommitCallback in project kafka by apache.

the class WorkerSinkTaskTest method testRequestCommit.

@Test
public void testRequestCommit() throws Exception {
    createTask(initialState);
    expectInitializeTask();
    expectTaskGetTopic(true);
    expectPollInitialAssignment();
    expectConsumerPoll(1);
    expectConversionAndTransformation(1);
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall();
    final Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
    offsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
    offsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
    sinkTask.preCommit(offsets);
    EasyMock.expectLastCall().andReturn(offsets);
    EasyMock.expect(consumer.assignment()).andReturn(INITIAL_ASSIGNMENT).times(2);
    final Capture<OffsetCommitCallback> callback = EasyMock.newCapture();
    consumer.commitAsync(EasyMock.eq(offsets), EasyMock.capture(callback));
    EasyMock.expectLastCall().andAnswer(() -> {
        callback.getValue().onComplete(offsets, null);
        return null;
    });
    expectConsumerPoll(0);
    sinkTask.put(Collections.emptyList());
    EasyMock.expectLastCall();
    PowerMock.replayAll();
    workerTask.initialize(TASK_CONFIG);
    workerTask.initializeAndStart();
    // Initial assignment
    time.sleep(30000L);
    workerTask.iteration();
    assertSinkMetricValue("partition-count", 2);
    // First record delivered
    workerTask.iteration();
    assertSinkMetricValue("partition-count", 2);
    assertSinkMetricValue("sink-record-read-total", 1.0);
    assertSinkMetricValue("sink-record-send-total", 1.0);
    assertSinkMetricValue("sink-record-active-count", 1.0);
    assertSinkMetricValue("sink-record-active-count-max", 1.0);
    assertSinkMetricValue("sink-record-active-count-avg", 0.333333);
    assertSinkMetricValue("offset-commit-seq-no", 0.0);
    assertSinkMetricValue("offset-commit-completion-total", 0.0);
    assertSinkMetricValue("offset-commit-skip-total", 0.0);
    assertTaskMetricValue("status", "running");
    assertTaskMetricValue("running-ratio", 1.0);
    assertTaskMetricValue("pause-ratio", 0.0);
    assertTaskMetricValue("batch-size-max", 1.0);
    assertTaskMetricValue("batch-size-avg", 0.5);
    assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
    assertTaskMetricValue("offset-commit-success-percentage", 0.0);
    // Grab the commit time prior to requesting a commit.
    // This time should advance slightly after committing.
    // KAFKA-8229
    final long previousCommitValue = workerTask.getNextCommit();
    sinkTaskContext.getValue().requestCommit();
    assertTrue(sinkTaskContext.getValue().isCommitRequested());
    assertNotEquals(offsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "lastCommittedOffsets"));
    time.sleep(10000L);
    // triggers the commit
    workerTask.iteration();
    time.sleep(10000L);
    // should have been cleared
    assertFalse(sinkTaskContext.getValue().isCommitRequested());
    assertEquals(offsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "lastCommittedOffsets"));
    assertEquals(0, workerTask.commitFailures());
    // Assert the next commit time advances slightly, the amount it advances
    // is the normal commit time less the two sleeps since it started each
    // of those sleeps were 10 seconds.
    // KAFKA-8229
    assertEquals("Should have only advanced by 40 seconds", previousCommitValue + (WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_DEFAULT - 10000L * 2), workerTask.getNextCommit());
    assertSinkMetricValue("partition-count", 2);
    assertSinkMetricValue("sink-record-read-total", 1.0);
    assertSinkMetricValue("sink-record-send-total", 1.0);
    assertSinkMetricValue("sink-record-active-count", 0.0);
    assertSinkMetricValue("sink-record-active-count-max", 1.0);
    assertSinkMetricValue("sink-record-active-count-avg", 0.2);
    assertSinkMetricValue("offset-commit-seq-no", 1.0);
    assertSinkMetricValue("offset-commit-completion-total", 1.0);
    assertSinkMetricValue("offset-commit-skip-total", 0.0);
    assertTaskMetricValue("status", "running");
    assertTaskMetricValue("running-ratio", 1.0);
    assertTaskMetricValue("pause-ratio", 0.0);
    assertTaskMetricValue("batch-size-max", 1.0);
    assertTaskMetricValue("batch-size-avg", 0.33333);
    assertTaskMetricValue("offset-commit-max-time-ms", 0.0);
    assertTaskMetricValue("offset-commit-avg-time-ms", 0.0);
    assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
    assertTaskMetricValue("offset-commit-success-percentage", 1.0);
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) RetryWithToleranceOperatorTest(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 12 with OffsetCommitCallback

use of org.apache.kafka.clients.consumer.OffsetCommitCallback in project wikidata-query-rdf by wikimedia.

the class KafkaStreamConsumerUnitTest method test_commit_offsets.

@Test
public void test_commit_offsets() {
    TopicPartition topicPartition = new TopicPartition("topic", 0);
    MutationEventData firstEvent = genEvent("Q1", 0, uris("uri:1"), uris(), uris(), uris(), Instant.EPOCH).get(0);
    MutationEventData secondEvent = genEvent("Q1", 1, uris("uri:2"), uris(), uris(), uris(), Instant.EPOCH).get(0);
    MutationEventData thirdEvent = genEvent("Q1", 2, uris("uri:3"), uris(), uris(), uris(), Instant.EPOCH).get(0);
    Map<TopicPartition, OffsetAndMetadata> firstOffsets = Collections.singletonMap(topicPartition, new OffsetAndMetadata(1));
    Map<TopicPartition, OffsetAndMetadata> secondOffsets = Collections.singletonMap(topicPartition, new OffsetAndMetadata(2));
    Map<TopicPartition, OffsetAndMetadata> thirdOffsets = Collections.singletonMap(topicPartition, new OffsetAndMetadata(3));
    // we want real instances as we use AtomicReference
    when(consumer.poll(any())).thenReturn(new ConsumerRecords<>(singletonMap(topicPartition, singletonList(new ConsumerRecord<>(TESTED_STREAM, 0, 1, null, firstEvent)))), new ConsumerRecords<>(singletonMap(topicPartition, singletonList(new ConsumerRecord<>(TESTED_STREAM, 0, 2, null, secondEvent)))), new ConsumerRecords<>(singletonMap(topicPartition, singletonList(new ConsumerRecord<>(TESTED_STREAM, 0, 3, null, thirdEvent)))), new ConsumerRecords<>(emptyMap()));
    ArgumentCaptor<OffsetCommitCallback> callback = ArgumentCaptor.forClass(OffsetCommitCallback.class);
    KafkaStreamConsumer streamConsumer = new KafkaStreamConsumer(consumer, topicPartition, chunkDeser, 1, KafkaStreamConsumerMetricsListener.forRegistry(new MetricRegistry()), m -> true);
    StreamConsumer.Batch b = streamConsumer.poll(Duration.ofMillis(10));
    streamConsumer.acknowledge();
    verify(consumer, times(1)).commitAsync(eq(firstOffsets), callback.capture());
    streamConsumer.poll(Duration.ofMillis(10));
    // fail the first commit and verify that we retry
    callback.getValue().onComplete(firstOffsets, new Exception("simulated failure"));
    verify(consumer, times(2)).commitAsync(eq(firstOffsets), callback.capture());
    streamConsumer.acknowledge();
    // fail the first commit a second time after we are ready to commit the second batch
    // and verify that we do not retry
    callback.getValue().onComplete(firstOffsets, new Exception("simulated failure"));
    verify(consumer, times(2)).commitAsync(eq(firstOffsets), callback.capture());
    // also verify that we send commitAsync for the second batch
    verify(consumer, times(1)).commitAsync(eq(secondOffsets), callback.capture());
    // fail the second commit and verify that we retry
    callback.getValue().onComplete(secondOffsets, new Exception("Simulated failure"));
    verify(consumer, times(2)).commitAsync(eq(secondOffsets), callback.capture());
    // the retry succeeded
    callback.getValue().onComplete(secondOffsets, null);
    streamConsumer.poll(Duration.ofMillis(10));
    streamConsumer.acknowledge();
    verify(consumer, times(1)).commitAsync(eq(thirdOffsets), callback.capture());
    streamConsumer.close();
    // verify that we commit synchronously since we did not receive yet the ack of our async commit
    verify(consumer, times(1)).commitSync(eq(thirdOffsets));
}
Also used : MetricRegistry(com.codahale.metrics.MetricRegistry) MutationEventData(org.wikidata.query.rdf.updater.MutationEventData) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) Test(org.junit.Test)

Example 13 with OffsetCommitCallback

use of org.apache.kafka.clients.consumer.OffsetCommitCallback in project flink by apache.

the class Kafka09FetcherTest method ensureOffsetsGetCommitted.

@Test
public void ensureOffsetsGetCommitted() throws Exception {
    // test data
    final KafkaTopicPartition testPartition1 = new KafkaTopicPartition("test", 42);
    final KafkaTopicPartition testPartition2 = new KafkaTopicPartition("another", 99);
    final Map<KafkaTopicPartition, Long> testCommitData1 = new HashMap<>();
    testCommitData1.put(testPartition1, 11L);
    testCommitData1.put(testPartition2, 18L);
    final Map<KafkaTopicPartition, Long> testCommitData2 = new HashMap<>();
    testCommitData2.put(testPartition1, 19L);
    testCommitData2.put(testPartition2, 28L);
    final BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> commitStore = new LinkedBlockingQueue<>();
    // ----- the mock consumer with poll(), wakeup(), and commit(A)sync calls ----
    final MultiShotLatch blockerLatch = new MultiShotLatch();
    KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
    when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {

        @Override
        public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
            blockerLatch.await();
            return ConsumerRecords.empty();
        }
    });
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            blockerLatch.trigger();
            return null;
        }
    }).when(mockConsumer).wakeup();
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) {
            @SuppressWarnings("unchecked") Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) invocation.getArguments()[0];
            OffsetCommitCallback callback = (OffsetCommitCallback) invocation.getArguments()[1];
            commitStore.add(offsets);
            callback.onComplete(offsets, null);
            return null;
        }
    }).when(mockConsumer).commitAsync(Mockito.<Map<TopicPartition, OffsetAndMetadata>>any(), any(OffsetCommitCallback.class));
    // make sure the fetcher creates the mock consumer
    whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
    // ----- create the test fetcher -----
    @SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
    Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
    KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
    final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
    null, /* punctuated watermark extractor */
    new TestProcessingTimeService(), 10, /* watermark interval */
    this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
    // ----- run the fetcher -----
    final AtomicReference<Throwable> error = new AtomicReference<>();
    final Thread fetcherRunner = new Thread("fetcher runner") {

        @Override
        public void run() {
            try {
                fetcher.runFetchLoop();
            } catch (Throwable t) {
                error.set(t);
            }
        }
    };
    fetcherRunner.start();
    // ----- trigger the first offset commit -----
    fetcher.commitInternalOffsetsToKafka(testCommitData1);
    Map<TopicPartition, OffsetAndMetadata> result1 = commitStore.take();
    for (Entry<TopicPartition, OffsetAndMetadata> entry : result1.entrySet()) {
        TopicPartition partition = entry.getKey();
        if (partition.topic().equals("test")) {
            assertEquals(42, partition.partition());
            assertEquals(12L, entry.getValue().offset());
        } else if (partition.topic().equals("another")) {
            assertEquals(99, partition.partition());
            assertEquals(17L, entry.getValue().offset());
        }
    }
    // ----- trigger the second offset commit -----
    fetcher.commitInternalOffsetsToKafka(testCommitData2);
    Map<TopicPartition, OffsetAndMetadata> result2 = commitStore.take();
    for (Entry<TopicPartition, OffsetAndMetadata> entry : result2.entrySet()) {
        TopicPartition partition = entry.getKey();
        if (partition.topic().equals("test")) {
            assertEquals(42, partition.partition());
            assertEquals(20L, entry.getValue().offset());
        } else if (partition.topic().equals("another")) {
            assertEquals(99, partition.partition());
            assertEquals(27L, entry.getValue().offset());
        }
    }
    // ----- test done, wait till the fetcher is done for a clean shutdown -----
    fetcher.cancel();
    fetcherRunner.join();
    // check that there were no errors in the fetcher
    final Throwable caughtError = error.get();
    if (caughtError != null && !(caughtError instanceof Handover.ClosedException)) {
        throw new Exception("Exception in the fetcher", caughtError);
    }
}
Also used : UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) HashMap(java.util.HashMap) MultiShotLatch(org.apache.flink.core.testutils.MultiShotLatch) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Properties(java.util.Properties) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) KeyedDeserializationSchemaWrapper(org.apache.flink.streaming.util.serialization.KeyedDeserializationSchemaWrapper) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Handover(org.apache.flink.streaming.connectors.kafka.internal.Handover) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaConsumerThread(org.apache.flink.streaming.connectors.kafka.internal.KafkaConsumerThread) Kafka09Fetcher(org.apache.flink.streaming.connectors.kafka.internal.Kafka09Fetcher) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) Mockito.anyLong(org.mockito.Mockito.anyLong) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) HashMap(java.util.HashMap) Map(java.util.Map) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 14 with OffsetCommitCallback

use of org.apache.kafka.clients.consumer.OffsetCommitCallback in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSinkTask method doCommitAsync.

private void doCommitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, final int seqno) {
    log.info("{} Committing offsets asynchronously using sequence number {}: {}", this, seqno, offsets);
    OffsetCommitCallback cb = new OffsetCommitCallback() {

        @Override
        public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception error) {
            onCommitCompleted(error, seqno, offsets);
        }
    };
    consumer.commitAsync(offsets, cb);
}
Also used : OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) HashMap(java.util.HashMap) Map(java.util.Map) KafkaException(org.apache.kafka.common.KafkaException) WakeupException(org.apache.kafka.common.errors.WakeupException) RetriableException(org.apache.kafka.connect.errors.RetriableException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 15 with OffsetCommitCallback

use of org.apache.kafka.clients.consumer.OffsetCommitCallback in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSinkTaskTest method testLongRunningCommitWithoutTimeout.

// Test that the commitTimeoutMs timestamp is correctly computed and checked in WorkerSinkTask.iteration()
// when there is a long running commit in process. See KAFKA-4942 for more information.
@Test
public void testLongRunningCommitWithoutTimeout() throws Exception {
    createTask(initialState);
    expectInitializeTask();
    // iter 1
    expectPollInitialAssignment();
    // iter 2
    expectConsumerPoll(1);
    expectConversionAndTransformation(1);
    sinkTask.put(EasyMock.<Collection<SinkRecord>>anyObject());
    EasyMock.expectLastCall();
    final Map<TopicPartition, OffsetAndMetadata> workerStartingOffsets = new HashMap<>();
    workerStartingOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET));
    workerStartingOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
    final Map<TopicPartition, OffsetAndMetadata> workerCurrentOffsets = new HashMap<>();
    workerCurrentOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
    workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
    // iter 3 - note that we return the current offset to indicate they should be committed
    sinkTask.preCommit(workerCurrentOffsets);
    EasyMock.expectLastCall().andReturn(workerCurrentOffsets);
    // We need to delay the result of trying to commit offsets to Kafka via the consumer.commitAsync
    // method. We do this so that we can test that we do not erroneously mark a commit as timed out
    // while it is still running and under time. To fake this for tests we have the commit run in a
    // separate thread and wait for a latch which we control back in the main thread.
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final CountDownLatch latch = new CountDownLatch(1);
    consumer.commitAsync(EasyMock.eq(workerCurrentOffsets), EasyMock.<OffsetCommitCallback>anyObject());
    EasyMock.expectLastCall().andAnswer(new IAnswer<Void>() {

        @SuppressWarnings("unchecked")
        @Override
        public Void answer() throws Throwable {
            // Grab the arguments passed to the consumer.commitAsync method
            final Object[] args = EasyMock.getCurrentArguments();
            final Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) args[0];
            final OffsetCommitCallback callback = (OffsetCommitCallback) args[1];
            executor.execute(new Runnable() {

                @Override
                public void run() {
                    try {
                        latch.await();
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                    }
                    callback.onComplete(offsets, null);
                }
            });
            return null;
        }
    });
    // no actual consumer.commit() triggered
    expectConsumerPoll(0);
    sinkTask.put(EasyMock.<Collection<SinkRecord>>anyObject());
    EasyMock.expectLastCall();
    PowerMock.replayAll();
    workerTask.initialize(TASK_CONFIG);
    workerTask.initializeAndStart();
    // iter 1 -- initial assignment
    workerTask.iteration();
    assertEquals(workerStartingOffsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "currentOffsets"));
    assertEquals(workerStartingOffsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "lastCommittedOffsets"));
    time.sleep(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_DEFAULT);
    // iter 2 -- deliver 2 records
    workerTask.iteration();
    sinkTaskContext.getValue().requestCommit();
    // iter 3 -- commit in progress
    workerTask.iteration();
    // Make sure the "committing" flag didn't immediately get flipped back to false due to an incorrect timeout
    assertTrue("Expected worker to be in the process of committing offsets", workerTask.isCommitting());
    // Let the async commit finish and wait for it to end
    latch.countDown();
    executor.shutdown();
    executor.awaitTermination(30, TimeUnit.SECONDS);
    assertEquals(workerCurrentOffsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "currentOffsets"));
    assertEquals(workerCurrentOffsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "lastCommittedOffsets"));
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) CountDownLatch(java.util.concurrent.CountDownLatch) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) ExecutorService(java.util.concurrent.ExecutorService) Map(java.util.Map) HashMap(java.util.HashMap) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

OffsetCommitCallback (org.apache.kafka.clients.consumer.OffsetCommitCallback)28 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)24 TopicPartition (org.apache.kafka.common.TopicPartition)24 HashMap (java.util.HashMap)18 Test (org.junit.Test)15 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)12 Map (java.util.Map)11 WakeupException (org.apache.kafka.common.errors.WakeupException)11 KafkaException (org.apache.kafka.common.KafkaException)10 CommitFailedException (org.apache.kafka.clients.consumer.CommitFailedException)7 RetriableCommitFailedException (org.apache.kafka.clients.consumer.RetriableCommitFailedException)7 GroupAuthorizationException (org.apache.kafka.common.errors.GroupAuthorizationException)7 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)6 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)5 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)5 RetryWithToleranceOperatorTest (org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest)5 SinkRecord (org.apache.kafka.connect.sink.SinkRecord)5 AtomicReference (java.util.concurrent.atomic.AtomicReference)4 ArrayList (java.util.ArrayList)3 List (java.util.List)3