use of org.apache.kafka.clients.consumer.ConsumerRecords in project ksql by confluentinc.
the class KsqlEngineMetricsTest method consumeMessages.
private void consumeMessages(int numMessages, String groupId) {
ConsumerCollector collector1 = new ConsumerCollector();
collector1.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, groupId));
Map<TopicPartition, List<ConsumerRecord<Object, Object>>> records = new HashMap<>();
List<ConsumerRecord<Object, Object>> recordList = new ArrayList<>();
for (int i = 0; i < numMessages; i++) {
recordList.add(new ConsumerRecord<>("foo", 1, 1, 1l, TimestampType.CREATE_TIME, 1l, 10, 10, "key", "1234567890"));
}
records.put(new TopicPartition("foo", 1), recordList);
ConsumerRecords<Object, Object> consumerRecords = new ConsumerRecords<>(records);
collector1.onConsume(consumerRecords);
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project apache-kafka-on-k8s by banzaicloud.
the class MockConsumerInterceptor method onConsume.
@Override
@SuppressWarnings("deprecation")
public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {
// This will ensure that we get the cluster metadata when onConsume is called for the first time
// as subsequent compareAndSet operations will fail.
CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get());
Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
for (TopicPartition tp : records.partitions()) {
List<ConsumerRecord<String, String>> lst = new ArrayList<>();
for (ConsumerRecord<String, String> record : records.records(tp)) {
lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value().toUpperCase(Locale.ROOT)));
}
recordMap.put(tp, lst);
}
return new ConsumerRecords<String, String>(recordMap);
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSinkTaskTest method testWakeupInCommitSyncCausesRetry.
@Test
public void testWakeupInCommitSyncCausesRetry() throws Exception {
createTask(initialState);
expectInitializeTask();
expectPollInitialAssignment();
expectConsumerPoll(1);
expectConversionAndTransformation(1);
sinkTask.put(EasyMock.<Collection<SinkRecord>>anyObject());
EasyMock.expectLastCall();
final List<TopicPartition> partitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2);
final Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
offsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
sinkTask.preCommit(offsets);
EasyMock.expectLastCall().andReturn(offsets);
// first one raises wakeup
consumer.commitSync(EasyMock.<Map<TopicPartition, OffsetAndMetadata>>anyObject());
EasyMock.expectLastCall().andThrow(new WakeupException());
// we should retry and complete the commit
consumer.commitSync(EasyMock.<Map<TopicPartition, OffsetAndMetadata>>anyObject());
EasyMock.expectLastCall();
sinkTask.close(new HashSet<>(partitions));
EasyMock.expectLastCall();
EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
sinkTask.open(partitions);
EasyMock.expectLastCall();
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
rebalanceListener.getValue().onPartitionsRevoked(partitions);
rebalanceListener.getValue().onPartitionsAssigned(partitions);
return ConsumerRecords.empty();
}
});
EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(partitions));
consumer.resume(Collections.singleton(TOPIC_PARTITION));
EasyMock.expectLastCall();
consumer.resume(Collections.singleton(TOPIC_PARTITION2));
EasyMock.expectLastCall();
statusListener.onResume(taskId);
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
time.sleep(30000L);
workerTask.initializeAndStart();
time.sleep(30000L);
// poll for initial assignment
workerTask.iteration();
time.sleep(30000L);
// first record delivered
workerTask.iteration();
// now rebalance with the wakeup triggered
workerTask.iteration();
time.sleep(30000L);
assertSinkMetricValue("partition-count", 2);
assertSinkMetricValue("sink-record-read-total", 1.0);
assertSinkMetricValue("sink-record-send-total", 1.0);
assertSinkMetricValue("sink-record-active-count", 0.0);
assertSinkMetricValue("sink-record-active-count-max", 1.0);
assertSinkMetricValue("sink-record-active-count-avg", 0.33333);
assertSinkMetricValue("offset-commit-seq-no", 1.0);
assertSinkMetricValue("offset-commit-completion-total", 1.0);
assertSinkMetricValue("offset-commit-skip-total", 0.0);
assertTaskMetricValue("status", "running");
assertTaskMetricValue("running-ratio", 1.0);
assertTaskMetricValue("pause-ratio", 0.0);
assertTaskMetricValue("batch-size-max", 1.0);
assertTaskMetricValue("batch-size-avg", 1.0);
assertTaskMetricValue("offset-commit-max-time-ms", 0.0);
assertTaskMetricValue("offset-commit-avg-time-ms", 0.0);
assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
assertTaskMetricValue("offset-commit-success-percentage", 1.0);
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSinkTaskTest method testCommitWithOutOfOrderCallback.
// Verify that when commitAsync is called but the supplied callback is not called by the consumer before a
// rebalance occurs, the async callback does not reset the last committed offset from the rebalance.
// See KAFKA-5731 for more information.
@Test
public void testCommitWithOutOfOrderCallback() throws Exception {
createTask(initialState);
expectInitializeTask();
// iter 1
expectPollInitialAssignment();
// iter 2
expectConsumerPoll(1);
expectConversionAndTransformation(4);
sinkTask.put(EasyMock.<Collection<SinkRecord>>anyObject());
EasyMock.expectLastCall();
final Map<TopicPartition, OffsetAndMetadata> workerStartingOffsets = new HashMap<>();
workerStartingOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET));
workerStartingOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
final Map<TopicPartition, OffsetAndMetadata> workerCurrentOffsets = new HashMap<>();
workerCurrentOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
final List<TopicPartition> originalPartitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2);
final List<TopicPartition> rebalancedPartitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
final Map<TopicPartition, OffsetAndMetadata> rebalanceOffsets = new HashMap<>();
rebalanceOffsets.put(TOPIC_PARTITION, workerCurrentOffsets.get(TOPIC_PARTITION));
rebalanceOffsets.put(TOPIC_PARTITION2, workerCurrentOffsets.get(TOPIC_PARTITION2));
rebalanceOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET));
final Map<TopicPartition, OffsetAndMetadata> postRebalanceCurrentOffsets = new HashMap<>();
postRebalanceCurrentOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 3));
postRebalanceCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
postRebalanceCurrentOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET + 2));
// iter 3 - note that we return the current offset to indicate they should be committed
sinkTask.preCommit(workerCurrentOffsets);
EasyMock.expectLastCall().andReturn(workerCurrentOffsets);
// We need to delay the result of trying to commit offsets to Kafka via the consumer.commitAsync
// method. We do this so that we can test that the callback is not called until after the rebalance
// changes the lastCommittedOffsets. To fake this for tests we have the commitAsync build a function
// that will call the callback with the appropriate parameters, and we'll run that function later.
final AtomicReference<Runnable> asyncCallbackRunner = new AtomicReference<>();
final AtomicBoolean asyncCallbackRan = new AtomicBoolean();
consumer.commitAsync(EasyMock.eq(workerCurrentOffsets), EasyMock.<OffsetCommitCallback>anyObject());
EasyMock.expectLastCall().andAnswer(new IAnswer<Void>() {
@SuppressWarnings("unchecked")
@Override
public Void answer() throws Throwable {
// Grab the arguments passed to the consumer.commitAsync method
final Object[] args = EasyMock.getCurrentArguments();
final Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) args[0];
final OffsetCommitCallback callback = (OffsetCommitCallback) args[1];
asyncCallbackRunner.set(new Runnable() {
@Override
public void run() {
callback.onComplete(offsets, null);
asyncCallbackRan.set(true);
}
});
return null;
}
});
// Expect the next poll to discover and perform the rebalance, THEN complete the previous callback handler,
// and then return one record for TP1 and one for TP3.
final AtomicBoolean rebalanced = new AtomicBoolean();
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// Rebalance always begins with revoking current partitions ...
rebalanceListener.getValue().onPartitionsRevoked(originalPartitions);
// Respond to the rebalance
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, rebalanceOffsets.get(TOPIC_PARTITION).offset());
offsets.put(TOPIC_PARTITION2, rebalanceOffsets.get(TOPIC_PARTITION2).offset());
offsets.put(TOPIC_PARTITION3, rebalanceOffsets.get(TOPIC_PARTITION3).offset());
sinkTaskContext.getValue().offset(offsets);
rebalanceListener.getValue().onPartitionsAssigned(rebalancedPartitions);
rebalanced.set(true);
// Run the previous async commit handler
asyncCallbackRunner.get().run();
// And prep the two records to return
long timestamp = RecordBatch.NO_TIMESTAMP;
TimestampType timestampType = TimestampType.NO_TIMESTAMP_TYPE;
List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
records.add(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturnedTp1 + 1, timestamp, timestampType, 0L, 0, 0, RAW_KEY, RAW_VALUE));
records.add(new ConsumerRecord<>(TOPIC, PARTITION3, FIRST_OFFSET + recordsReturnedTp3 + 1, timestamp, timestampType, 0L, 0, 0, RAW_KEY, RAW_VALUE));
recordsReturnedTp1 += 1;
recordsReturnedTp3 += 1;
return new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), records));
}
});
// onPartitionsRevoked
sinkTask.preCommit(workerCurrentOffsets);
EasyMock.expectLastCall().andReturn(workerCurrentOffsets);
sinkTask.put(EasyMock.<Collection<SinkRecord>>anyObject());
EasyMock.expectLastCall();
sinkTask.close(workerCurrentOffsets.keySet());
EasyMock.expectLastCall();
consumer.commitSync(workerCurrentOffsets);
EasyMock.expectLastCall();
// onPartitionsAssigned - step 1
final long offsetTp1 = rebalanceOffsets.get(TOPIC_PARTITION).offset();
final long offsetTp2 = rebalanceOffsets.get(TOPIC_PARTITION2).offset();
final long offsetTp3 = rebalanceOffsets.get(TOPIC_PARTITION3).offset();
EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(offsetTp1);
EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(offsetTp2);
EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(offsetTp3);
// onPartitionsAssigned - step 2
sinkTask.open(rebalancedPartitions);
EasyMock.expectLastCall();
// onPartitionsAssigned - step 3 rewind
consumer.seek(TOPIC_PARTITION, offsetTp1);
EasyMock.expectLastCall();
consumer.seek(TOPIC_PARTITION2, offsetTp2);
EasyMock.expectLastCall();
consumer.seek(TOPIC_PARTITION3, offsetTp3);
EasyMock.expectLastCall();
// iter 4 - note that we return the current offset to indicate they should be committed
sinkTask.preCommit(postRebalanceCurrentOffsets);
EasyMock.expectLastCall().andReturn(postRebalanceCurrentOffsets);
final Capture<OffsetCommitCallback> callback = EasyMock.newCapture();
consumer.commitAsync(EasyMock.eq(postRebalanceCurrentOffsets), EasyMock.capture(callback));
EasyMock.expectLastCall().andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
callback.getValue().onComplete(postRebalanceCurrentOffsets, null);
return null;
}
});
// no actual consumer.commit() triggered
expectConsumerPoll(1);
sinkTask.put(EasyMock.<Collection<SinkRecord>>anyObject());
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
// iter 1 -- initial assignment
workerTask.iteration();
assertEquals(workerStartingOffsets, Whitebox.getInternalState(workerTask, "currentOffsets"));
assertEquals(workerStartingOffsets, Whitebox.getInternalState(workerTask, "lastCommittedOffsets"));
time.sleep(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_DEFAULT);
// iter 2 -- deliver 2 records
workerTask.iteration();
sinkTaskContext.getValue().requestCommit();
// iter 3 -- commit in progress
workerTask.iteration();
assertSinkMetricValue("partition-count", 3);
assertSinkMetricValue("sink-record-read-total", 3.0);
assertSinkMetricValue("sink-record-send-total", 3.0);
assertSinkMetricValue("sink-record-active-count", 4.0);
assertSinkMetricValue("sink-record-active-count-max", 4.0);
assertSinkMetricValue("sink-record-active-count-avg", 0.71429);
assertSinkMetricValue("offset-commit-seq-no", 2.0);
assertSinkMetricValue("offset-commit-completion-total", 1.0);
assertSinkMetricValue("offset-commit-skip-total", 1.0);
assertTaskMetricValue("status", "running");
assertTaskMetricValue("running-ratio", 1.0);
assertTaskMetricValue("pause-ratio", 0.0);
assertTaskMetricValue("batch-size-max", 2.0);
assertTaskMetricValue("batch-size-avg", 1.0);
assertTaskMetricValue("offset-commit-max-time-ms", 0.0);
assertTaskMetricValue("offset-commit-avg-time-ms", 0.0);
assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
assertTaskMetricValue("offset-commit-success-percentage", 1.0);
assertTrue(asyncCallbackRan.get());
assertTrue(rebalanced.get());
// Check that the offsets were not reset by the out-of-order async commit callback
assertEquals(postRebalanceCurrentOffsets, Whitebox.getInternalState(workerTask, "currentOffsets"));
assertEquals(rebalanceOffsets, Whitebox.getInternalState(workerTask, "lastCommittedOffsets"));
time.sleep(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_DEFAULT);
sinkTaskContext.getValue().requestCommit();
// iter 4 -- commit in progress
workerTask.iteration();
// Check that the offsets were not reset by the out-of-order async commit callback
assertEquals(postRebalanceCurrentOffsets, Whitebox.getInternalState(workerTask, "currentOffsets"));
assertEquals(postRebalanceCurrentOffsets, Whitebox.getInternalState(workerTask, "lastCommittedOffsets"));
assertSinkMetricValue("partition-count", 3);
assertSinkMetricValue("sink-record-read-total", 4.0);
assertSinkMetricValue("sink-record-send-total", 4.0);
assertSinkMetricValue("sink-record-active-count", 0.0);
assertSinkMetricValue("sink-record-active-count-max", 4.0);
assertSinkMetricValue("sink-record-active-count-avg", 0.5555555);
assertSinkMetricValue("offset-commit-seq-no", 3.0);
assertSinkMetricValue("offset-commit-completion-total", 2.0);
assertSinkMetricValue("offset-commit-skip-total", 1.0);
assertTaskMetricValue("status", "running");
assertTaskMetricValue("running-ratio", 1.0);
assertTaskMetricValue("pause-ratio", 0.0);
assertTaskMetricValue("batch-size-max", 2.0);
assertTaskMetricValue("batch-size-avg", 1.0);
assertTaskMetricValue("offset-commit-max-time-ms", 0.0);
assertTaskMetricValue("offset-commit-avg-time-ms", 0.0);
assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
assertTaskMetricValue("offset-commit-success-percentage", 1.0);
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project core-ng-project by neowu.
the class KafkaMessageListenerThread method process.
private void process(Consumer<String, byte[]> consumer, ConsumerRecords<String, byte[]> kafkaRecords) {
StopWatch watch = new StopWatch();
int count = 0;
int size = 0;
try {
Map<String, List<ConsumerRecord<String, byte[]>>> messages = Maps.newLinkedHashMap();
for (ConsumerRecord<String, byte[]> record : kafkaRecords) {
messages.computeIfAbsent(record.topic(), key -> Lists.newArrayList()).add(record);
count++;
size += record.value().length;
}
for (Map.Entry<String, List<ConsumerRecord<String, byte[]>>> entry : messages.entrySet()) {
String topic = entry.getKey();
List<ConsumerRecord<String, byte[]>> records = entry.getValue();
KafkaMessageListener.BulkMessageHandlerHolder<?> bulkHandlerHolder = bulkHandlerHolders.get(topic);
if (bulkHandlerHolder != null) {
handle(topic, bulkHandlerHolder, records, longProcessThreshold(batchLongProcessThresholdInNano, records.size(), count));
} else {
KafkaMessageListener.MessageHandlerHolder<?> handlerHolder = handlerHolders.get(topic);
if (handlerHolder != null) {
handle(topic, handlerHolder, records, longProcessThreshold(batchLongProcessThresholdInNano, 1, count));
}
}
}
} finally {
consumer.commitAsync();
logger.info("process kafka records, count={}, size={}, elapsedTime={}", count, size, watch.elapsedTime());
}
}
Aggregations