use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class GlobalStateManagerImplTest method shouldUsePollMsPlusRequestTimeoutInPollDuringRestoreAndTimeoutWhenNoProgressDuringRestore.
@Test
public void shouldUsePollMsPlusRequestTimeoutInPollDuringRestoreAndTimeoutWhenNoProgressDuringRestore() {
consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public synchronized ConsumerRecords<byte[], byte[]> poll(final Duration timeout) {
time.sleep(timeout.toMillis());
return super.poll(timeout);
}
};
final HashMap<TopicPartition, Long> startOffsets = new HashMap<>();
startOffsets.put(t1, 1L);
final HashMap<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(t1, 3L);
consumer.updatePartitions(t1.topic(), Collections.singletonList(new PartitionInfo(t1.topic(), t1.partition(), null, null, null)));
consumer.assign(Collections.singletonList(t1));
consumer.updateBeginningOffsets(startOffsets);
consumer.updateEndOffsets(endOffsets);
streamsConfig = new StreamsConfig(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, "appId"), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"), mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath())));
stateManager = new GlobalStateManagerImpl(new LogContext("mock"), time, topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
processorContext.setStateManger(stateManager);
stateManager.setGlobalProcessorContext(processorContext);
final long startTime = time.milliseconds();
final TimeoutException exception = assertThrows(TimeoutException.class, () -> stateManager.initialize());
assertThat(exception.getMessage(), equalTo("Global task did not make progress to restore state within 301000 ms. Adjust `task.timeout.ms` if needed."));
assertThat(time.milliseconds() - startTime, equalTo(331_100L));
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class WorkerSinkTaskTest method testPollRedeliveryWithConsumerRebalance.
@Test
public void testPollRedeliveryWithConsumerRebalance() throws Exception {
createTask(initialState);
expectInitializeTask();
expectTaskGetTopic(true);
expectPollInitialAssignment();
// If a retriable exception is thrown, we should redeliver the same batch, pausing the consumer in the meantime
expectConsumerPoll(1);
expectConversionAndTransformation(1);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
// Pause
EasyMock.expect(consumer.assignment()).andReturn(INITIAL_ASSIGNMENT);
consumer.pause(INITIAL_ASSIGNMENT);
PowerMock.expectLastCall();
// Empty consumer poll (all partitions are paused) with rebalance; one new partition is assigned
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet());
rebalanceListener.getValue().onPartitionsAssigned(Collections.singleton(TOPIC_PARTITION3));
return ConsumerRecords.empty();
});
Set<TopicPartition> newAssignment = new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3));
EasyMock.expect(consumer.assignment()).andReturn(newAssignment).times(3);
EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
sinkTask.open(Collections.singleton(TOPIC_PARTITION3));
EasyMock.expectLastCall();
// All partitions are re-paused in order to pause any newly-assigned partitions so that redelivery efforts can continue
consumer.pause(newAssignment);
EasyMock.expectLastCall();
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
// Next delivery attempt fails again
expectConsumerPoll(0);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
// Non-empty consumer poll; all initially-assigned partitions are revoked in rebalance, and new partitions are allowed to resume
ConsumerRecord<byte[], byte[]> newRecord = new ConsumerRecord<>(TOPIC, PARTITION3, FIRST_OFFSET, RAW_KEY, RAW_VALUE);
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
rebalanceListener.getValue().onPartitionsRevoked(INITIAL_ASSIGNMENT);
rebalanceListener.getValue().onPartitionsAssigned(Collections.emptyList());
return new ConsumerRecords<>(Collections.singletonMap(TOPIC_PARTITION3, Collections.singletonList(newRecord)));
});
newAssignment = Collections.singleton(TOPIC_PARTITION3);
EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(newAssignment)).times(3);
final Map<TopicPartition, OffsetAndMetadata> offsets = INITIAL_ASSIGNMENT.stream().collect(Collectors.toMap(Function.identity(), tp -> new OffsetAndMetadata(FIRST_OFFSET)));
sinkTask.preCommit(offsets);
EasyMock.expectLastCall().andReturn(offsets);
sinkTask.close(INITIAL_ASSIGNMENT);
EasyMock.expectLastCall();
// All partitions are resumed, as all previously paused-for-redelivery partitions were revoked
newAssignment.forEach(tp -> {
consumer.resume(Collections.singleton(tp));
EasyMock.expectLastCall();
});
expectConversionAndTransformation(1);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
workerTask.iteration();
workerTask.iteration();
workerTask.iteration();
workerTask.iteration();
workerTask.iteration();
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class WorkerSinkTaskTest method testCommitWithOutOfOrderCallback.
// Verify that when commitAsync is called but the supplied callback is not called by the consumer before a
// rebalance occurs, the async callback does not reset the last committed offset from the rebalance.
// See KAFKA-5731 for more information.
@Test
public void testCommitWithOutOfOrderCallback() throws Exception {
createTask(initialState);
expectInitializeTask();
expectTaskGetTopic(true);
// iter 1
expectPollInitialAssignment();
// iter 2
expectConsumerPoll(1);
expectConversionAndTransformation(4);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall();
final Map<TopicPartition, OffsetAndMetadata> workerStartingOffsets = new HashMap<>();
workerStartingOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET));
workerStartingOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
final Map<TopicPartition, OffsetAndMetadata> workerCurrentOffsets = new HashMap<>();
workerCurrentOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
final List<TopicPartition> originalPartitions = new ArrayList<>(INITIAL_ASSIGNMENT);
final List<TopicPartition> rebalancedPartitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
final Map<TopicPartition, OffsetAndMetadata> rebalanceOffsets = new HashMap<>();
rebalanceOffsets.put(TOPIC_PARTITION, workerCurrentOffsets.get(TOPIC_PARTITION));
rebalanceOffsets.put(TOPIC_PARTITION2, workerCurrentOffsets.get(TOPIC_PARTITION2));
rebalanceOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET));
final Map<TopicPartition, OffsetAndMetadata> postRebalanceCurrentOffsets = new HashMap<>();
postRebalanceCurrentOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 3));
postRebalanceCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
postRebalanceCurrentOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET + 2));
EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(originalPartitions)).times(2);
// iter 3 - note that we return the current offset to indicate they should be committed
sinkTask.preCommit(workerCurrentOffsets);
EasyMock.expectLastCall().andReturn(workerCurrentOffsets);
// We need to delay the result of trying to commit offsets to Kafka via the consumer.commitAsync
// method. We do this so that we can test that the callback is not called until after the rebalance
// changes the lastCommittedOffsets. To fake this for tests we have the commitAsync build a function
// that will call the callback with the appropriate parameters, and we'll run that function later.
final AtomicReference<Runnable> asyncCallbackRunner = new AtomicReference<>();
final AtomicBoolean asyncCallbackRan = new AtomicBoolean();
consumer.commitAsync(EasyMock.eq(workerCurrentOffsets), EasyMock.anyObject());
EasyMock.expectLastCall().andAnswer(() -> {
// Grab the arguments passed to the consumer.commitAsync method
final Object[] args = EasyMock.getCurrentArguments();
@SuppressWarnings("unchecked") final Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) args[0];
final OffsetCommitCallback callback = (OffsetCommitCallback) args[1];
asyncCallbackRunner.set(() -> {
callback.onComplete(offsets, null);
asyncCallbackRan.set(true);
});
return null;
});
// Expect the next poll to discover and perform the rebalance, THEN complete the previous callback handler,
// and then return one record for TP1 and one for TP3.
final AtomicBoolean rebalanced = new AtomicBoolean();
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
// Rebalance always begins with revoking current partitions ...
rebalanceListener.getValue().onPartitionsRevoked(originalPartitions);
// Respond to the rebalance
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, rebalanceOffsets.get(TOPIC_PARTITION).offset());
offsets.put(TOPIC_PARTITION2, rebalanceOffsets.get(TOPIC_PARTITION2).offset());
offsets.put(TOPIC_PARTITION3, rebalanceOffsets.get(TOPIC_PARTITION3).offset());
sinkTaskContext.getValue().offset(offsets);
rebalanceListener.getValue().onPartitionsAssigned(rebalancedPartitions);
rebalanced.set(true);
// Run the previous async commit handler
asyncCallbackRunner.get().run();
// And prep the two records to return
long timestamp = RecordBatch.NO_TIMESTAMP;
TimestampType timestampType = TimestampType.NO_TIMESTAMP_TYPE;
List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
records.add(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturnedTp1 + 1, timestamp, timestampType, 0, 0, RAW_KEY, RAW_VALUE, new RecordHeaders(), Optional.empty()));
records.add(new ConsumerRecord<>(TOPIC, PARTITION3, FIRST_OFFSET + recordsReturnedTp3 + 1, timestamp, timestampType, 0, 0, RAW_KEY, RAW_VALUE, new RecordHeaders(), Optional.empty()));
recordsReturnedTp1 += 1;
recordsReturnedTp3 += 1;
return new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), records));
});
// onPartitionsRevoked
sinkTask.preCommit(workerCurrentOffsets);
EasyMock.expectLastCall().andReturn(workerCurrentOffsets);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall();
sinkTask.close(new ArrayList<>(workerCurrentOffsets.keySet()));
EasyMock.expectLastCall();
consumer.commitSync(workerCurrentOffsets);
EasyMock.expectLastCall();
// onPartitionsAssigned - step 1
final long offsetTp1 = rebalanceOffsets.get(TOPIC_PARTITION).offset();
final long offsetTp2 = rebalanceOffsets.get(TOPIC_PARTITION2).offset();
final long offsetTp3 = rebalanceOffsets.get(TOPIC_PARTITION3).offset();
EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(offsetTp1);
EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(offsetTp2);
EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(offsetTp3);
EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(rebalancedPartitions)).times(6);
// onPartitionsAssigned - step 2
sinkTask.open(EasyMock.eq(rebalancedPartitions));
EasyMock.expectLastCall();
// onPartitionsAssigned - step 3 rewind
consumer.seek(TOPIC_PARTITION, offsetTp1);
EasyMock.expectLastCall();
consumer.seek(TOPIC_PARTITION2, offsetTp2);
EasyMock.expectLastCall();
consumer.seek(TOPIC_PARTITION3, offsetTp3);
EasyMock.expectLastCall();
// iter 4 - note that we return the current offset to indicate they should be committed
sinkTask.preCommit(postRebalanceCurrentOffsets);
EasyMock.expectLastCall().andReturn(postRebalanceCurrentOffsets);
final Capture<OffsetCommitCallback> callback = EasyMock.newCapture();
consumer.commitAsync(EasyMock.eq(postRebalanceCurrentOffsets), EasyMock.capture(callback));
EasyMock.expectLastCall().andAnswer(() -> {
callback.getValue().onComplete(postRebalanceCurrentOffsets, null);
return null;
});
// no actual consumer.commit() triggered
expectConsumerPoll(1);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
// iter 1 -- initial assignment
workerTask.iteration();
assertEquals(workerStartingOffsets, Whitebox.getInternalState(workerTask, "currentOffsets"));
assertEquals(workerStartingOffsets, Whitebox.getInternalState(workerTask, "lastCommittedOffsets"));
time.sleep(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_DEFAULT);
// iter 2 -- deliver 2 records
workerTask.iteration();
sinkTaskContext.getValue().requestCommit();
// iter 3 -- commit in progress
workerTask.iteration();
assertSinkMetricValue("partition-count", 3);
assertSinkMetricValue("sink-record-read-total", 3.0);
assertSinkMetricValue("sink-record-send-total", 3.0);
assertSinkMetricValue("sink-record-active-count", 4.0);
assertSinkMetricValue("sink-record-active-count-max", 4.0);
assertSinkMetricValue("sink-record-active-count-avg", 0.71429);
assertSinkMetricValue("offset-commit-seq-no", 2.0);
assertSinkMetricValue("offset-commit-completion-total", 1.0);
assertSinkMetricValue("offset-commit-skip-total", 1.0);
assertTaskMetricValue("status", "running");
assertTaskMetricValue("running-ratio", 1.0);
assertTaskMetricValue("pause-ratio", 0.0);
assertTaskMetricValue("batch-size-max", 2.0);
assertTaskMetricValue("batch-size-avg", 1.0);
assertTaskMetricValue("offset-commit-max-time-ms", 0.0);
assertTaskMetricValue("offset-commit-avg-time-ms", 0.0);
assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
assertTaskMetricValue("offset-commit-success-percentage", 1.0);
assertTrue(asyncCallbackRan.get());
assertTrue(rebalanced.get());
// Check that the offsets were not reset by the out-of-order async commit callback
assertEquals(postRebalanceCurrentOffsets, Whitebox.getInternalState(workerTask, "currentOffsets"));
assertEquals(rebalanceOffsets, Whitebox.getInternalState(workerTask, "lastCommittedOffsets"));
time.sleep(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_DEFAULT);
sinkTaskContext.getValue().requestCommit();
// iter 4 -- commit in progress
workerTask.iteration();
// Check that the offsets were not reset by the out-of-order async commit callback
assertEquals(postRebalanceCurrentOffsets, Whitebox.getInternalState(workerTask, "currentOffsets"));
assertEquals(postRebalanceCurrentOffsets, Whitebox.getInternalState(workerTask, "lastCommittedOffsets"));
assertSinkMetricValue("partition-count", 3);
assertSinkMetricValue("sink-record-read-total", 4.0);
assertSinkMetricValue("sink-record-send-total", 4.0);
assertSinkMetricValue("sink-record-active-count", 0.0);
assertSinkMetricValue("sink-record-active-count-max", 4.0);
assertSinkMetricValue("sink-record-active-count-avg", 0.5555555);
assertSinkMetricValue("offset-commit-seq-no", 3.0);
assertSinkMetricValue("offset-commit-completion-total", 2.0);
assertSinkMetricValue("offset-commit-skip-total", 1.0);
assertTaskMetricValue("status", "running");
assertTaskMetricValue("running-ratio", 1.0);
assertTaskMetricValue("pause-ratio", 0.0);
assertTaskMetricValue("batch-size-max", 2.0);
assertTaskMetricValue("batch-size-avg", 1.0);
assertTaskMetricValue("offset-commit-max-time-ms", 0.0);
assertTaskMetricValue("offset-commit-avg-time-ms", 0.0);
assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
assertTaskMetricValue("offset-commit-success-percentage", 1.0);
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class WorkerSinkTaskTest method expectConsumerPoll.
private void expectConsumerPoll(final int numMessages, final long timestamp, final TimestampType timestampType, Headers headers) {
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
for (int i = 0; i < numMessages; i++) records.add(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturnedTp1 + i, timestamp, timestampType, 0, 0, RAW_KEY, RAW_VALUE, headers, Optional.empty()));
recordsReturnedTp1 += numMessages;
return new ConsumerRecords<>(numMessages > 0 ? Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), records) : Collections.emptyMap());
});
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class SmokeTestDriver method verify.
public static VerificationResult verify(final String kafka, final Map<String, Set<Integer>> inputs, final int maxRecordsPerKey) {
final Properties props = new Properties();
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "verifier");
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, NumberDeserializer.class);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
final KafkaConsumer<String, Number> consumer = new KafkaConsumer<>(props);
final List<TopicPartition> partitions = getAllPartitions(consumer, TOPICS);
consumer.assign(partitions);
consumer.seekToBeginning(partitions);
final int recordsGenerated = inputs.size() * maxRecordsPerKey;
int recordsProcessed = 0;
final Map<String, AtomicInteger> processed = Stream.of(TOPICS).collect(Collectors.toMap(t -> t, t -> new AtomicInteger(0)));
final Map<String, Map<String, LinkedList<ConsumerRecord<String, Number>>>> events = new HashMap<>();
VerificationResult verificationResult = new VerificationResult(false, "no results yet");
int retry = 0;
final long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < TimeUnit.MINUTES.toMillis(6)) {
final ConsumerRecords<String, Number> records = consumer.poll(Duration.ofSeconds(5));
if (records.isEmpty() && recordsProcessed >= recordsGenerated) {
verificationResult = verifyAll(inputs, events, false);
if (verificationResult.passed()) {
break;
} else if (retry++ > MAX_RECORD_EMPTY_RETRIES) {
System.out.println(Instant.now() + " Didn't get any more results, verification hasn't passed, and out of retries.");
break;
} else {
System.out.println(Instant.now() + " Didn't get any more results, but verification hasn't passed (yet). Retrying..." + retry);
}
} else {
System.out.println(Instant.now() + " Get some more results from " + records.partitions() + ", resetting retry.");
retry = 0;
for (final ConsumerRecord<String, Number> record : records) {
final String key = record.key();
final String topic = record.topic();
processed.get(topic).incrementAndGet();
if (topic.equals("echo")) {
recordsProcessed++;
if (recordsProcessed % 100 == 0) {
System.out.println("Echo records processed = " + recordsProcessed);
}
}
events.computeIfAbsent(topic, t -> new HashMap<>()).computeIfAbsent(key, k -> new LinkedList<>()).add(record);
}
System.out.println(processed);
}
}
consumer.close();
final long finished = System.currentTimeMillis() - start;
System.out.println("Verification time=" + finished);
System.out.println("-------------------");
System.out.println("Result Verification");
System.out.println("-------------------");
System.out.println("recordGenerated=" + recordsGenerated);
System.out.println("recordProcessed=" + recordsProcessed);
if (recordsProcessed > recordsGenerated) {
System.out.println("PROCESSED-MORE-THAN-GENERATED");
} else if (recordsProcessed < recordsGenerated) {
System.out.println("PROCESSED-LESS-THAN-GENERATED");
}
boolean success;
final Map<String, Set<Number>> received = events.get("echo").entrySet().stream().map(entry -> mkEntry(entry.getKey(), entry.getValue().stream().map(ConsumerRecord::value).collect(Collectors.toSet()))).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
success = inputs.equals(received);
if (success) {
System.out.println("ALL-RECORDS-DELIVERED");
} else {
int missedCount = 0;
for (final Map.Entry<String, Set<Integer>> entry : inputs.entrySet()) {
missedCount += received.get(entry.getKey()).size();
}
System.out.println("missedRecords=" + missedCount);
}
// give it one more try if it's not already passing.
if (!verificationResult.passed()) {
verificationResult = verifyAll(inputs, events, true);
}
success &= verificationResult.passed();
System.out.println(verificationResult.result());
System.out.println(success ? "SUCCESS" : "FAILURE");
return verificationResult;
}
Aggregations