use of org.apache.kafka.clients.consumer.ConsumerRecords in project samza by apache.
the class TestZkStreamProcessorBase method verifyNumMessages.
/**
* Consumes data from the topic until there are no new messages for a while
* and asserts that the number of consumed messages is as expected.
*/
protected void verifyNumMessages(String topic, final Map<Integer, Boolean> expectedValues, int expectedNumMessages) {
consumer.subscribe(Collections.singletonList(topic));
Map<Integer, Boolean> map = new HashMap<>(expectedValues);
int count = 0;
int emptyPollCount = 0;
while (count < expectedNumMessages && emptyPollCount < 5) {
ConsumerRecords records = consumer.poll(5000);
if (!records.isEmpty()) {
Iterator<ConsumerRecord> iterator = records.iterator();
while (iterator.hasNext()) {
ConsumerRecord record = iterator.next();
String val = new String((byte[]) record.value());
LOG.info("Got value " + val + "; count = " + count + "; out of " + expectedNumMessages);
Integer valI = Integer.valueOf(val);
if (valI < BAD_MESSAGE_KEY) {
map.put(valI, true);
count++;
}
}
} else {
emptyPollCount++;
LOG.warn("empty polls " + emptyPollCount);
}
}
// filter out numbers we did not get
long numFalse = map.values().stream().filter(v -> !v).count();
Assert.assertEquals("didn't get this number of events ", 0, numFalse);
Assert.assertEquals(expectedNumMessages, count);
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project samza by apache.
the class TestStreamProcessor method verifyNumMessages.
/**
* Consumes data from the topic until there are no new messages for a while
* and asserts that the number of consumed messages is as expected.
*/
@SuppressWarnings("unchecked")
private void verifyNumMessages(KafkaConsumer consumer, String topic, int expectedNumMessages) {
consumer.subscribe(Collections.singletonList(topic));
int count = 0;
int emptyPollCount = 0;
while (count < expectedNumMessages && emptyPollCount < 5) {
ConsumerRecords records = consumer.poll(5000);
if (!records.isEmpty()) {
for (ConsumerRecord record : (Iterable<ConsumerRecord>) records) {
Assert.assertEquals(new String((byte[]) record.value()), String.valueOf(count));
count++;
}
} else {
emptyPollCount++;
}
}
Assert.assertEquals(count, expectedNumMessages);
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project storm by apache.
the class KafkaSpoutRebalanceTest method emitOneMessagePerPartitionThenRevokeOnePartition.
// Returns messageIds in order of emission
private List<KafkaSpoutMessageId> emitOneMessagePerPartitionThenRevokeOnePartition(KafkaSpout<String, String> spout, TopicPartition partitionThatWillBeRevoked, TopicPartition assignedPartition, TopicAssigner topicAssigner) {
// Setup spout with mock consumer so we can get at the rebalance listener
spout.open(conf, contextMock, collectorMock);
spout.activate();
// Assign partitions to the spout
ArgumentCaptor<ConsumerRebalanceListener> rebalanceListenerCapture = ArgumentCaptor.forClass(ConsumerRebalanceListener.class);
verify(topicAssigner).assignPartitions(any(), any(), rebalanceListenerCapture.capture());
ConsumerRebalanceListener consumerRebalanceListener = rebalanceListenerCapture.getValue();
Set<TopicPartition> assignedPartitions = new HashSet<>();
assignedPartitions.add(partitionThatWillBeRevoked);
assignedPartitions.add(assignedPartition);
consumerRebalanceListener.onPartitionsAssigned(assignedPartitions);
when(consumerMock.assignment()).thenReturn(assignedPartitions);
// Make the consumer return a single message for each partition
when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partitionThatWillBeRevoked, SpoutWithMockedConsumerSetupHelper.createRecords(partitionThatWillBeRevoked, 0, 1)))).thenReturn(new ConsumerRecords<>(Collections.singletonMap(assignedPartition, SpoutWithMockedConsumerSetupHelper.createRecords(assignedPartition, 0, 1)))).thenReturn(new ConsumerRecords<>(Collections.emptyMap()));
// Emit the messages
spout.nextTuple();
ArgumentCaptor<KafkaSpoutMessageId> messageIdForRevokedPartition = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
verify(collectorMock).emit(anyString(), anyList(), messageIdForRevokedPartition.capture());
reset(collectorMock);
spout.nextTuple();
ArgumentCaptor<KafkaSpoutMessageId> messageIdForAssignedPartition = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
verify(collectorMock).emit(anyString(), anyList(), messageIdForAssignedPartition.capture());
// Now rebalance
consumerRebalanceListener.onPartitionsRevoked(assignedPartitions);
consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(assignedPartition));
when(consumerMock.assignment()).thenReturn(Collections.singleton(assignedPartition));
List<KafkaSpoutMessageId> emittedMessageIds = new ArrayList<>();
emittedMessageIds.add(messageIdForRevokedPartition.getValue());
emittedMessageIds.add(messageIdForAssignedPartition.getValue());
return emittedMessageIds;
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project storm by apache.
the class KafkaSpoutEmitTest method testSpoutWillSkipPartitionsAtTheMaxUncommittedOffsetsLimit.
@Test
public void testSpoutWillSkipPartitionsAtTheMaxUncommittedOffsetsLimit() {
// This verifies that partitions can't prevent each other from retrying tuples due to the maxUncommittedOffsets limit.
try (SimulatedTime simulatedTime = new SimulatedTime()) {
TopicPartition partitionTwo = new TopicPartition(SingleTopicKafkaSpoutConfiguration.TOPIC, 2);
KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition, partitionTwo);
Map<TopicPartition, List<ConsumerRecord<String, String>>> records = new HashMap<>();
// This is cheating a bit since maxPollRecords would normally spread this across multiple polls
records.put(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, spoutConfig.getMaxUncommittedOffsets()));
records.put(partitionTwo, SpoutWithMockedConsumerSetupHelper.createRecords(partitionTwo, 0, spoutConfig.getMaxUncommittedOffsets() + 1));
int numMessages = spoutConfig.getMaxUncommittedOffsets() * 2 + 1;
when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(records));
for (int i = 0; i < numMessages; i++) {
spout.nextTuple();
}
ArgumentCaptor<KafkaSpoutMessageId> messageIds = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
verify(collectorMock, times(numMessages)).emit(anyString(), anyList(), messageIds.capture());
// Now fail a tuple on partition one and verify that it is allowed to retry, because the failed tuple is below the maxUncommittedOffsets limit
Optional<KafkaSpoutMessageId> failedMessageIdPartitionOne = messageIds.getAllValues().stream().filter(messageId -> messageId.partition() == partition.partition()).findAny();
spout.fail(failedMessageIdPartitionOne.get());
// Also fail the last tuple from partition two. Since the failed tuple is beyond the maxUncommittedOffsets limit, it should not be retried until earlier messages are acked.
Optional<KafkaSpoutMessageId> failedMessagePartitionTwo = messageIds.getAllValues().stream().filter(messageId -> messageId.partition() == partitionTwo.partition()).max((msgId, msgId2) -> (int) (msgId.offset() - msgId2.offset()));
spout.fail(failedMessagePartitionTwo.get());
reset(collectorMock);
Time.advanceTime(50);
when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, failedMessageIdPartitionOne.get().offset(), 1))));
spout.nextTuple();
verify(collectorMock, times(1)).emit(anyObject(), anyObject(), anyObject());
InOrder inOrder = inOrder(consumerMock);
inOrder.verify(consumerMock).seek(partition, failedMessageIdPartitionOne.get().offset());
// Should not seek on the paused partition
inOrder.verify(consumerMock, never()).seek(eq(partitionTwo), anyLong());
inOrder.verify(consumerMock).pause(Collections.singleton(partitionTwo));
inOrder.verify(consumerMock).poll(anyLong());
inOrder.verify(consumerMock).resume(Collections.singleton(partitionTwo));
reset(collectorMock);
// Now also check that no more tuples are polled for, since both partitions are at their limits
spout.nextTuple();
verify(collectorMock, never()).emit(anyObject(), anyObject(), anyObject());
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class MockConsumerInterceptor method onConsume.
@Override
public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {
// This will ensure that we get the cluster metadata when onConsume is called for the first time
// as subsequent compareAndSet operations will fail.
CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get());
Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
for (TopicPartition tp : records.partitions()) {
List<ConsumerRecord<String, String>> lst = new ArrayList<>();
for (ConsumerRecord<String, String> record : records.records(tp)) {
lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value().toUpperCase(Locale.ROOT), new RecordHeaders(), Optional.empty()));
}
recordMap.put(tp, lst);
}
return new ConsumerRecords<>(recordMap);
}
Aggregations