Search in sources :

Example 61 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.

the class GlobalStateManagerImplTest method shouldUsePollMsPlusRequestTimeoutInPollDuringRestoreAndTimeoutWhenNoProgressDuringRestore.

@Test
public void shouldUsePollMsPlusRequestTimeoutInPollDuringRestoreAndTimeoutWhenNoProgressDuringRestore() {
    consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public synchronized ConsumerRecords<byte[], byte[]> poll(final Duration timeout) {
            time.sleep(timeout.toMillis());
            return super.poll(timeout);
        }
    };
    final HashMap<TopicPartition, Long> startOffsets = new HashMap<>();
    startOffsets.put(t1, 1L);
    final HashMap<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(t1, 3L);
    consumer.updatePartitions(t1.topic(), Collections.singletonList(new PartitionInfo(t1.topic(), t1.partition(), null, null, null)));
    consumer.assign(Collections.singletonList(t1));
    consumer.updateBeginningOffsets(startOffsets);
    consumer.updateEndOffsets(endOffsets);
    streamsConfig = new StreamsConfig(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, "appId"), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"), mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath())));
    stateManager = new GlobalStateManagerImpl(new LogContext("mock"), time, topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
    processorContext.setStateManger(stateManager);
    stateManager.setGlobalProcessorContext(processorContext);
    final long startTime = time.milliseconds();
    final TimeoutException exception = assertThrows(TimeoutException.class, () -> stateManager.initialize());
    assertThat(exception.getMessage(), equalTo("Global task did not make progress to restore state within 301000 ms. Adjust `task.timeout.ms` if needed."));
    assertThat(time.milliseconds() - startTime, equalTo(331_100L));
}
Also used : HashMap(java.util.HashMap) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Example 62 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.

the class WorkerSinkTaskTest method testPollRedeliveryWithConsumerRebalance.

@Test
public void testPollRedeliveryWithConsumerRebalance() throws Exception {
    createTask(initialState);
    expectInitializeTask();
    expectTaskGetTopic(true);
    expectPollInitialAssignment();
    // If a retriable exception is thrown, we should redeliver the same batch, pausing the consumer in the meantime
    expectConsumerPoll(1);
    expectConversionAndTransformation(1);
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
    // Pause
    EasyMock.expect(consumer.assignment()).andReturn(INITIAL_ASSIGNMENT);
    consumer.pause(INITIAL_ASSIGNMENT);
    PowerMock.expectLastCall();
    // Empty consumer poll (all partitions are paused) with rebalance; one new partition is assigned
    EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
        rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet());
        rebalanceListener.getValue().onPartitionsAssigned(Collections.singleton(TOPIC_PARTITION3));
        return ConsumerRecords.empty();
    });
    Set<TopicPartition> newAssignment = new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3));
    EasyMock.expect(consumer.assignment()).andReturn(newAssignment).times(3);
    EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
    sinkTask.open(Collections.singleton(TOPIC_PARTITION3));
    EasyMock.expectLastCall();
    // All partitions are re-paused in order to pause any newly-assigned partitions so that redelivery efforts can continue
    consumer.pause(newAssignment);
    EasyMock.expectLastCall();
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
    // Next delivery attempt fails again
    expectConsumerPoll(0);
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
    // Non-empty consumer poll; all initially-assigned partitions are revoked in rebalance, and new partitions are allowed to resume
    ConsumerRecord<byte[], byte[]> newRecord = new ConsumerRecord<>(TOPIC, PARTITION3, FIRST_OFFSET, RAW_KEY, RAW_VALUE);
    EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
        rebalanceListener.getValue().onPartitionsRevoked(INITIAL_ASSIGNMENT);
        rebalanceListener.getValue().onPartitionsAssigned(Collections.emptyList());
        return new ConsumerRecords<>(Collections.singletonMap(TOPIC_PARTITION3, Collections.singletonList(newRecord)));
    });
    newAssignment = Collections.singleton(TOPIC_PARTITION3);
    EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(newAssignment)).times(3);
    final Map<TopicPartition, OffsetAndMetadata> offsets = INITIAL_ASSIGNMENT.stream().collect(Collectors.toMap(Function.identity(), tp -> new OffsetAndMetadata(FIRST_OFFSET)));
    sinkTask.preCommit(offsets);
    EasyMock.expectLastCall().andReturn(offsets);
    sinkTask.close(INITIAL_ASSIGNMENT);
    EasyMock.expectLastCall();
    // All partitions are resumed, as all previously paused-for-redelivery partitions were revoked
    newAssignment.forEach(tp -> {
        consumer.resume(Collections.singleton(tp));
        EasyMock.expectLastCall();
    });
    expectConversionAndTransformation(1);
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall();
    PowerMock.replayAll();
    workerTask.initialize(TASK_CONFIG);
    workerTask.initializeAndStart();
    workerTask.iteration();
    workerTask.iteration();
    workerTask.iteration();
    workerTask.iteration();
    workerTask.iteration();
    PowerMock.verifyAll();
}
Also used : Arrays(java.util.Arrays) MockTime(org.apache.kafka.common.utils.MockTime) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Schema(org.apache.kafka.connect.data.Schema) Collections.singleton(java.util.Collections.singleton) Arrays.asList(java.util.Arrays.asList) RecordBatch(org.apache.kafka.common.record.RecordBatch) Converter(org.apache.kafka.connect.storage.Converter) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) MetricName(org.apache.kafka.common.MetricName) Assert.fail(org.junit.Assert.fail) IExpectationSetters(org.easymock.IExpectationSetters) TimestampType(org.apache.kafka.common.record.TimestampType) TopicPartition(org.apache.kafka.common.TopicPartition) Time(org.apache.kafka.common.utils.Time) WakeupException(org.apache.kafka.common.errors.WakeupException) Collection(java.util.Collection) Set(java.util.Set) RetryWithToleranceOperatorTest(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest) PowerMock(org.powermock.api.easymock.PowerMock) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Header(org.apache.kafka.common.header.Header) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assert.assertFalse(org.junit.Assert.assertFalse) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Whitebox(org.powermock.reflect.Whitebox) ConnectorTaskId(org.apache.kafka.connect.util.ConnectorTaskId) Headers(org.apache.kafka.common.header.Headers) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Assert.assertSame(org.junit.Assert.assertSame) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) StandaloneConfig(org.apache.kafka.connect.runtime.standalone.StandaloneConfig) HeaderConverter(org.apache.kafka.connect.storage.HeaderConverter) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) MetricGroup(org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup) PowerMockRunner(org.powermock.modules.junit4.PowerMockRunner) PowerMockIgnore(org.powermock.core.classloader.annotations.PowerMockIgnore) StringConverter(org.apache.kafka.connect.storage.StringConverter) ExecutorService(java.util.concurrent.ExecutorService) SinkConnector(org.apache.kafka.connect.sink.SinkConnector) SinkTask(org.apache.kafka.connect.sink.SinkTask) Before(org.junit.Before) Capture(org.easymock.Capture) Iterator(java.util.Iterator) PluginClassLoader(org.apache.kafka.connect.runtime.isolation.PluginClassLoader) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue) ClusterConfigState(org.apache.kafka.connect.runtime.distributed.ClusterConfigState) Mock(org.powermock.api.easymock.annotation.Mock) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) StatusBackingStore(org.apache.kafka.connect.storage.StatusBackingStore) TimeUnit(java.util.concurrent.TimeUnit) RetriableException(org.apache.kafka.connect.errors.RetriableException) CaptureType(org.easymock.CaptureType) Assert.assertNull(org.junit.Assert.assertNull) ConnectException(org.apache.kafka.connect.errors.ConnectException) SinkTaskMetricsGroup(org.apache.kafka.connect.runtime.WorkerSinkTask.SinkTaskMetricsGroup) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) RetriableException(org.apache.kafka.connect.errors.RetriableException) HashSet(java.util.HashSet) RetryWithToleranceOperatorTest(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 63 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.

the class WorkerSinkTaskTest method testCommitWithOutOfOrderCallback.

// Verify that when commitAsync is called but the supplied callback is not called by the consumer before a
// rebalance occurs, the async callback does not reset the last committed offset from the rebalance.
// See KAFKA-5731 for more information.
@Test
public void testCommitWithOutOfOrderCallback() throws Exception {
    createTask(initialState);
    expectInitializeTask();
    expectTaskGetTopic(true);
    // iter 1
    expectPollInitialAssignment();
    // iter 2
    expectConsumerPoll(1);
    expectConversionAndTransformation(4);
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall();
    final Map<TopicPartition, OffsetAndMetadata> workerStartingOffsets = new HashMap<>();
    workerStartingOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET));
    workerStartingOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
    final Map<TopicPartition, OffsetAndMetadata> workerCurrentOffsets = new HashMap<>();
    workerCurrentOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
    workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
    final List<TopicPartition> originalPartitions = new ArrayList<>(INITIAL_ASSIGNMENT);
    final List<TopicPartition> rebalancedPartitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
    final Map<TopicPartition, OffsetAndMetadata> rebalanceOffsets = new HashMap<>();
    rebalanceOffsets.put(TOPIC_PARTITION, workerCurrentOffsets.get(TOPIC_PARTITION));
    rebalanceOffsets.put(TOPIC_PARTITION2, workerCurrentOffsets.get(TOPIC_PARTITION2));
    rebalanceOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET));
    final Map<TopicPartition, OffsetAndMetadata> postRebalanceCurrentOffsets = new HashMap<>();
    postRebalanceCurrentOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 3));
    postRebalanceCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
    postRebalanceCurrentOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET + 2));
    EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(originalPartitions)).times(2);
    // iter 3 - note that we return the current offset to indicate they should be committed
    sinkTask.preCommit(workerCurrentOffsets);
    EasyMock.expectLastCall().andReturn(workerCurrentOffsets);
    // We need to delay the result of trying to commit offsets to Kafka via the consumer.commitAsync
    // method. We do this so that we can test that the callback is not called until after the rebalance
    // changes the lastCommittedOffsets. To fake this for tests we have the commitAsync build a function
    // that will call the callback with the appropriate parameters, and we'll run that function later.
    final AtomicReference<Runnable> asyncCallbackRunner = new AtomicReference<>();
    final AtomicBoolean asyncCallbackRan = new AtomicBoolean();
    consumer.commitAsync(EasyMock.eq(workerCurrentOffsets), EasyMock.anyObject());
    EasyMock.expectLastCall().andAnswer(() -> {
        // Grab the arguments passed to the consumer.commitAsync method
        final Object[] args = EasyMock.getCurrentArguments();
        @SuppressWarnings("unchecked") final Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) args[0];
        final OffsetCommitCallback callback = (OffsetCommitCallback) args[1];
        asyncCallbackRunner.set(() -> {
            callback.onComplete(offsets, null);
            asyncCallbackRan.set(true);
        });
        return null;
    });
    // Expect the next poll to discover and perform the rebalance, THEN complete the previous callback handler,
    // and then return one record for TP1 and one for TP3.
    final AtomicBoolean rebalanced = new AtomicBoolean();
    EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
        // Rebalance always begins with revoking current partitions ...
        rebalanceListener.getValue().onPartitionsRevoked(originalPartitions);
        // Respond to the rebalance
        Map<TopicPartition, Long> offsets = new HashMap<>();
        offsets.put(TOPIC_PARTITION, rebalanceOffsets.get(TOPIC_PARTITION).offset());
        offsets.put(TOPIC_PARTITION2, rebalanceOffsets.get(TOPIC_PARTITION2).offset());
        offsets.put(TOPIC_PARTITION3, rebalanceOffsets.get(TOPIC_PARTITION3).offset());
        sinkTaskContext.getValue().offset(offsets);
        rebalanceListener.getValue().onPartitionsAssigned(rebalancedPartitions);
        rebalanced.set(true);
        // Run the previous async commit handler
        asyncCallbackRunner.get().run();
        // And prep the two records to return
        long timestamp = RecordBatch.NO_TIMESTAMP;
        TimestampType timestampType = TimestampType.NO_TIMESTAMP_TYPE;
        List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
        records.add(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturnedTp1 + 1, timestamp, timestampType, 0, 0, RAW_KEY, RAW_VALUE, new RecordHeaders(), Optional.empty()));
        records.add(new ConsumerRecord<>(TOPIC, PARTITION3, FIRST_OFFSET + recordsReturnedTp3 + 1, timestamp, timestampType, 0, 0, RAW_KEY, RAW_VALUE, new RecordHeaders(), Optional.empty()));
        recordsReturnedTp1 += 1;
        recordsReturnedTp3 += 1;
        return new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), records));
    });
    // onPartitionsRevoked
    sinkTask.preCommit(workerCurrentOffsets);
    EasyMock.expectLastCall().andReturn(workerCurrentOffsets);
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall();
    sinkTask.close(new ArrayList<>(workerCurrentOffsets.keySet()));
    EasyMock.expectLastCall();
    consumer.commitSync(workerCurrentOffsets);
    EasyMock.expectLastCall();
    // onPartitionsAssigned - step 1
    final long offsetTp1 = rebalanceOffsets.get(TOPIC_PARTITION).offset();
    final long offsetTp2 = rebalanceOffsets.get(TOPIC_PARTITION2).offset();
    final long offsetTp3 = rebalanceOffsets.get(TOPIC_PARTITION3).offset();
    EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(offsetTp1);
    EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(offsetTp2);
    EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(offsetTp3);
    EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(rebalancedPartitions)).times(6);
    // onPartitionsAssigned - step 2
    sinkTask.open(EasyMock.eq(rebalancedPartitions));
    EasyMock.expectLastCall();
    // onPartitionsAssigned - step 3 rewind
    consumer.seek(TOPIC_PARTITION, offsetTp1);
    EasyMock.expectLastCall();
    consumer.seek(TOPIC_PARTITION2, offsetTp2);
    EasyMock.expectLastCall();
    consumer.seek(TOPIC_PARTITION3, offsetTp3);
    EasyMock.expectLastCall();
    // iter 4 - note that we return the current offset to indicate they should be committed
    sinkTask.preCommit(postRebalanceCurrentOffsets);
    EasyMock.expectLastCall().andReturn(postRebalanceCurrentOffsets);
    final Capture<OffsetCommitCallback> callback = EasyMock.newCapture();
    consumer.commitAsync(EasyMock.eq(postRebalanceCurrentOffsets), EasyMock.capture(callback));
    EasyMock.expectLastCall().andAnswer(() -> {
        callback.getValue().onComplete(postRebalanceCurrentOffsets, null);
        return null;
    });
    // no actual consumer.commit() triggered
    expectConsumerPoll(1);
    sinkTask.put(EasyMock.anyObject());
    EasyMock.expectLastCall();
    PowerMock.replayAll();
    workerTask.initialize(TASK_CONFIG);
    workerTask.initializeAndStart();
    // iter 1 -- initial assignment
    workerTask.iteration();
    assertEquals(workerStartingOffsets, Whitebox.getInternalState(workerTask, "currentOffsets"));
    assertEquals(workerStartingOffsets, Whitebox.getInternalState(workerTask, "lastCommittedOffsets"));
    time.sleep(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_DEFAULT);
    // iter 2 -- deliver 2 records
    workerTask.iteration();
    sinkTaskContext.getValue().requestCommit();
    // iter 3 -- commit in progress
    workerTask.iteration();
    assertSinkMetricValue("partition-count", 3);
    assertSinkMetricValue("sink-record-read-total", 3.0);
    assertSinkMetricValue("sink-record-send-total", 3.0);
    assertSinkMetricValue("sink-record-active-count", 4.0);
    assertSinkMetricValue("sink-record-active-count-max", 4.0);
    assertSinkMetricValue("sink-record-active-count-avg", 0.71429);
    assertSinkMetricValue("offset-commit-seq-no", 2.0);
    assertSinkMetricValue("offset-commit-completion-total", 1.0);
    assertSinkMetricValue("offset-commit-skip-total", 1.0);
    assertTaskMetricValue("status", "running");
    assertTaskMetricValue("running-ratio", 1.0);
    assertTaskMetricValue("pause-ratio", 0.0);
    assertTaskMetricValue("batch-size-max", 2.0);
    assertTaskMetricValue("batch-size-avg", 1.0);
    assertTaskMetricValue("offset-commit-max-time-ms", 0.0);
    assertTaskMetricValue("offset-commit-avg-time-ms", 0.0);
    assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
    assertTaskMetricValue("offset-commit-success-percentage", 1.0);
    assertTrue(asyncCallbackRan.get());
    assertTrue(rebalanced.get());
    // Check that the offsets were not reset by the out-of-order async commit callback
    assertEquals(postRebalanceCurrentOffsets, Whitebox.getInternalState(workerTask, "currentOffsets"));
    assertEquals(rebalanceOffsets, Whitebox.getInternalState(workerTask, "lastCommittedOffsets"));
    time.sleep(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_DEFAULT);
    sinkTaskContext.getValue().requestCommit();
    // iter 4 -- commit in progress
    workerTask.iteration();
    // Check that the offsets were not reset by the out-of-order async commit callback
    assertEquals(postRebalanceCurrentOffsets, Whitebox.getInternalState(workerTask, "currentOffsets"));
    assertEquals(postRebalanceCurrentOffsets, Whitebox.getInternalState(workerTask, "lastCommittedOffsets"));
    assertSinkMetricValue("partition-count", 3);
    assertSinkMetricValue("sink-record-read-total", 4.0);
    assertSinkMetricValue("sink-record-send-total", 4.0);
    assertSinkMetricValue("sink-record-active-count", 0.0);
    assertSinkMetricValue("sink-record-active-count-max", 4.0);
    assertSinkMetricValue("sink-record-active-count-avg", 0.5555555);
    assertSinkMetricValue("offset-commit-seq-no", 3.0);
    assertSinkMetricValue("offset-commit-completion-total", 2.0);
    assertSinkMetricValue("offset-commit-skip-total", 1.0);
    assertTaskMetricValue("status", "running");
    assertTaskMetricValue("running-ratio", 1.0);
    assertTaskMetricValue("pause-ratio", 0.0);
    assertTaskMetricValue("batch-size-max", 2.0);
    assertTaskMetricValue("batch-size-avg", 1.0);
    assertTaskMetricValue("offset-commit-max-time-ms", 0.0);
    assertTaskMetricValue("offset-commit-avg-time-ms", 0.0);
    assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
    assertTaskMetricValue("offset-commit-success-percentage", 1.0);
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) TimestampType(org.apache.kafka.common.record.TimestampType) HashSet(java.util.HashSet) AtomicReference(java.util.concurrent.atomic.AtomicReference) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TopicPartition(org.apache.kafka.common.TopicPartition) Map(java.util.Map) HashMap(java.util.HashMap) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) RetryWithToleranceOperatorTest(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 64 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.

the class WorkerSinkTaskTest method expectConsumerPoll.

private void expectConsumerPoll(final int numMessages, final long timestamp, final TimestampType timestampType, Headers headers) {
    EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
        List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
        for (int i = 0; i < numMessages; i++) records.add(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturnedTp1 + i, timestamp, timestampType, 0, 0, RAW_KEY, RAW_VALUE, headers, Optional.empty()));
        recordsReturnedTp1 += numMessages;
        return new ConsumerRecords<>(numMessages > 0 ? Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), records) : Collections.emptyMap());
    });
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 65 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.

the class SmokeTestDriver method verify.

public static VerificationResult verify(final String kafka, final Map<String, Set<Integer>> inputs, final int maxRecordsPerKey) {
    final Properties props = new Properties();
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "verifier");
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, NumberDeserializer.class);
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    final KafkaConsumer<String, Number> consumer = new KafkaConsumer<>(props);
    final List<TopicPartition> partitions = getAllPartitions(consumer, TOPICS);
    consumer.assign(partitions);
    consumer.seekToBeginning(partitions);
    final int recordsGenerated = inputs.size() * maxRecordsPerKey;
    int recordsProcessed = 0;
    final Map<String, AtomicInteger> processed = Stream.of(TOPICS).collect(Collectors.toMap(t -> t, t -> new AtomicInteger(0)));
    final Map<String, Map<String, LinkedList<ConsumerRecord<String, Number>>>> events = new HashMap<>();
    VerificationResult verificationResult = new VerificationResult(false, "no results yet");
    int retry = 0;
    final long start = System.currentTimeMillis();
    while (System.currentTimeMillis() - start < TimeUnit.MINUTES.toMillis(6)) {
        final ConsumerRecords<String, Number> records = consumer.poll(Duration.ofSeconds(5));
        if (records.isEmpty() && recordsProcessed >= recordsGenerated) {
            verificationResult = verifyAll(inputs, events, false);
            if (verificationResult.passed()) {
                break;
            } else if (retry++ > MAX_RECORD_EMPTY_RETRIES) {
                System.out.println(Instant.now() + " Didn't get any more results, verification hasn't passed, and out of retries.");
                break;
            } else {
                System.out.println(Instant.now() + " Didn't get any more results, but verification hasn't passed (yet). Retrying..." + retry);
            }
        } else {
            System.out.println(Instant.now() + " Get some more results from " + records.partitions() + ", resetting retry.");
            retry = 0;
            for (final ConsumerRecord<String, Number> record : records) {
                final String key = record.key();
                final String topic = record.topic();
                processed.get(topic).incrementAndGet();
                if (topic.equals("echo")) {
                    recordsProcessed++;
                    if (recordsProcessed % 100 == 0) {
                        System.out.println("Echo records processed = " + recordsProcessed);
                    }
                }
                events.computeIfAbsent(topic, t -> new HashMap<>()).computeIfAbsent(key, k -> new LinkedList<>()).add(record);
            }
            System.out.println(processed);
        }
    }
    consumer.close();
    final long finished = System.currentTimeMillis() - start;
    System.out.println("Verification time=" + finished);
    System.out.println("-------------------");
    System.out.println("Result Verification");
    System.out.println("-------------------");
    System.out.println("recordGenerated=" + recordsGenerated);
    System.out.println("recordProcessed=" + recordsProcessed);
    if (recordsProcessed > recordsGenerated) {
        System.out.println("PROCESSED-MORE-THAN-GENERATED");
    } else if (recordsProcessed < recordsGenerated) {
        System.out.println("PROCESSED-LESS-THAN-GENERATED");
    }
    boolean success;
    final Map<String, Set<Number>> received = events.get("echo").entrySet().stream().map(entry -> mkEntry(entry.getKey(), entry.getValue().stream().map(ConsumerRecord::value).collect(Collectors.toSet()))).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
    success = inputs.equals(received);
    if (success) {
        System.out.println("ALL-RECORDS-DELIVERED");
    } else {
        int missedCount = 0;
        for (final Map.Entry<String, Set<Integer>> entry : inputs.entrySet()) {
            missedCount += received.get(entry.getKey()).size();
        }
        System.out.println("missedRecords=" + missedCount);
    }
    // give it one more try if it's not already passing.
    if (!verificationResult.passed()) {
        verificationResult = verifyAll(inputs, events, true);
    }
    success &= verificationResult.passed();
    System.out.println(verificationResult.result());
    System.out.println(success ? "SUCCESS" : "FAILURE");
    return verificationResult;
}
Also used : Arrays(java.util.Arrays) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Exit(org.apache.kafka.common.utils.Exit) ByteArrayOutputStream(java.io.ByteArrayOutputStream) HashMap(java.util.HashMap) Random(java.util.Random) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Map(java.util.Map) Deserializer(org.apache.kafka.common.serialization.Deserializer) LinkedList(java.util.LinkedList) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) Utils(org.apache.kafka.common.utils.Utils) PrintStream(java.io.PrintStream) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) PartitionInfo(org.apache.kafka.common.PartitionInfo) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Stream(java.util.stream.Stream) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Callback(org.apache.kafka.clients.producer.Callback) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) Properties(java.util.Properties) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedList(java.util.LinkedList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) HashMap(java.util.HashMap) Map(java.util.Map) Collections.emptyMap(java.util.Collections.emptyMap)

Aggregations

ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)67 TopicPartition (org.apache.kafka.common.TopicPartition)57 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)46 HashMap (java.util.HashMap)37 ArrayList (java.util.ArrayList)32 Test (org.junit.Test)30 List (java.util.List)29 Map (java.util.Map)21 Properties (java.util.Properties)16 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)16 Duration (java.time.Duration)13 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)12 Collection (java.util.Collection)11 Collections (java.util.Collections)11 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)11 Set (java.util.Set)9 AtomicReference (java.util.concurrent.atomic.AtomicReference)9 Collectors (java.util.stream.Collectors)9 HashSet (java.util.HashSet)8 ConsumerRebalanceListener (org.apache.kafka.clients.consumer.ConsumerRebalanceListener)8