use of org.apache.kafka.clients.consumer.ConsumerRecords in project heron by twitter.
the class KafkaSpoutTest method fail.
@Test
public void fail() {
when(kafkaConsumerFactory.create()).thenReturn(consumer);
TopicPartition topicPartition = new TopicPartition(DUMMY_TOPIC_NAME, 0);
List<ConsumerRecord<String, byte[]>> recordList = new ArrayList<>();
byte[] randomBytes = new byte[1];
for (int i = 0; i < 5; i++) {
RANDOM.nextBytes(randomBytes);
recordList.add(new ConsumerRecord<>(DUMMY_TOPIC_NAME, 0, i, "key", Arrays.copyOf(randomBytes, randomBytes.length)));
}
ConsumerRecords<String, byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(topicPartition, recordList));
when(consumer.poll(any(Duration.class))).thenReturn(consumerRecords);
kafkaSpout.open(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATLEAST_ONCE.name()), topologyContext, collector);
verify(consumer).subscribe(eq(Collections.singleton(DUMMY_TOPIC_NAME)), consumerRebalanceListenerArgumentCaptor.capture());
ConsumerRebalanceListener consumerRebalanceListener = consumerRebalanceListenerArgumentCaptor.getValue();
consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(topicPartition));
// poll the topic
kafkaSpout.nextTuple();
// emit all of the five records
for (int i = 0; i < 5; i++) {
kafkaSpout.nextTuple();
}
// ack came in out of order, second and third record fails
kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 4));
kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 0));
kafkaSpout.fail(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 1));
kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 3));
kafkaSpout.fail(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 2));
// commit and poll
kafkaSpout.nextTuple();
verify(consumer).seek(topicPartition, 1);
verify(consumer).commitAsync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(1)), null);
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project heron by twitter.
the class KafkaSpoutTest method nextTuple.
@Test
public void nextTuple() {
when(kafkaConsumerFactory.create()).thenReturn(consumer);
ConsumerRecords<String, byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(DUMMY_TOPIC_NAME, 0), Collections.singletonList(new ConsumerRecord<>(DUMMY_TOPIC_NAME, 0, 0, "key", new byte[] { 0xF }))));
when(consumer.poll(any(Duration.class))).thenReturn(consumerRecords);
doReturn(Collections.singletonMap(new MetricName("name", "group", "description", Collections.singletonMap("name", "value")), metric)).when(consumer).metrics();
when(metric.metricValue()).thenReturn("sample value");
kafkaSpout.open(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATMOST_ONCE.name()), topologyContext, collector);
verify(consumer).subscribe(eq(Collections.singleton(DUMMY_TOPIC_NAME)), consumerRebalanceListenerArgumentCaptor.capture());
ConsumerRebalanceListener consumerRebalanceListener = consumerRebalanceListenerArgumentCaptor.getValue();
TopicPartition topicPartition = new TopicPartition(DUMMY_TOPIC_NAME, 0);
consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(topicPartition));
kafkaSpout.nextTuple();
verify(consumer).commitAsync();
verify(topologyContext).registerMetric(eq("name-group-name-value"), kafkaMetricDecoratorArgumentCaptor.capture(), eq(60));
assertEquals("sample value", kafkaMetricDecoratorArgumentCaptor.getValue().getValueAndReset());
kafkaSpout.nextTuple();
verify(collector).emit(eq("default"), listArgumentCaptor.capture());
assertEquals("key", listArgumentCaptor.getValue().get(0));
assertArrayEquals(new byte[] { 0xF }, (byte[]) listArgumentCaptor.getValue().get(1));
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project hive by apache.
the class TransactionalKafkaWriterTest method checkData.
private void checkData() {
Set<TopicPartition> assignment = Collections.singleton(new TopicPartition(TOPIC, 0));
consumer.assign(assignment);
consumer.seekToBeginning(assignment);
long numRecords = 0;
boolean emptyPoll = false;
while (numRecords < RECORD_NUMBER && !emptyPoll) {
ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(10000));
Assert.assertFalse(records.records(new TopicPartition(TOPIC, 0)).stream().anyMatch(consumerRecord -> !RECORDS_WRITABLES.contains(new KafkaWritable(0, consumerRecord.timestamp(), consumerRecord.value(), consumerRecord.key()))));
emptyPoll = records.isEmpty();
numRecords += records.count();
}
Assert.assertEquals(RECORD_NUMBER, numRecords);
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project canal by alibaba.
the class CanalKafkaConsumer method getMessage.
@SuppressWarnings("unchecked")
@Override
public List<CommonMessage> getMessage(Long timeout, TimeUnit unit) {
if (!flatMessage) {
ConsumerRecords<String, Message> records = (ConsumerRecords<String, Message>) kafkaConsumer.poll(unit.toMillis(timeout));
if (!records.isEmpty()) {
currentOffsets.clear();
List<CommonMessage> messages = new ArrayList<>();
for (ConsumerRecord<String, Message> record : records) {
if (currentOffsets.get(record.partition()) == null) {
currentOffsets.put(record.partition(), record.offset());
}
messages.addAll(MessageUtil.convert(record.value()));
}
return messages;
}
} else {
ConsumerRecords<String, String> records = (ConsumerRecords<String, String>) kafkaConsumer.poll(unit.toMillis(timeout));
if (!records.isEmpty()) {
List<CommonMessage> messages = new ArrayList<>();
currentOffsets.clear();
for (ConsumerRecord<String, String> record : records) {
if (currentOffsets.get(record.partition()) == null) {
currentOffsets.put(record.partition(), record.offset());
}
String flatMessageJson = record.value();
CommonMessage flatMessages = JSON.parseObject(flatMessageJson, CommonMessage.class);
messages.add(flatMessages);
}
return messages;
}
}
return null;
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project storm by apache.
the class KafkaSpoutRebalanceTest method emitOneMessagePerPartitionThenRevokeOnePartition.
//Returns messageIds in order of emission
private List<KafkaSpoutMessageId> emitOneMessagePerPartitionThenRevokeOnePartition(KafkaSpout<String, String> spout, TopicPartition partitionThatWillBeRevoked, TopicPartition assignedPartition) {
//Setup spout with mock consumer so we can get at the rebalance listener
spout.open(conf, contextMock, collectorMock);
spout.activate();
ArgumentCaptor<ConsumerRebalanceListener> rebalanceListenerCapture = ArgumentCaptor.forClass(ConsumerRebalanceListener.class);
verify(consumerMock).subscribe(anyCollection(), rebalanceListenerCapture.capture());
//Assign partitions to the spout
ConsumerRebalanceListener consumerRebalanceListener = rebalanceListenerCapture.getValue();
List<TopicPartition> assignedPartitions = new ArrayList<>();
assignedPartitions.add(partitionThatWillBeRevoked);
assignedPartitions.add(assignedPartition);
consumerRebalanceListener.onPartitionsAssigned(assignedPartitions);
//Make the consumer return a single message for each partition
Map<TopicPartition, List<ConsumerRecord<String, String>>> firstPartitionRecords = new HashMap<>();
firstPartitionRecords.put(partitionThatWillBeRevoked, Collections.singletonList(new ConsumerRecord(partitionThatWillBeRevoked.topic(), partitionThatWillBeRevoked.partition(), 0L, "key", "value")));
Map<TopicPartition, List<ConsumerRecord<String, String>>> secondPartitionRecords = new HashMap<>();
secondPartitionRecords.put(assignedPartition, Collections.singletonList(new ConsumerRecord(assignedPartition.topic(), assignedPartition.partition(), 0L, "key", "value")));
when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords(firstPartitionRecords)).thenReturn(new ConsumerRecords(secondPartitionRecords)).thenReturn(new ConsumerRecords(Collections.emptyMap()));
//Emit the messages
spout.nextTuple();
ArgumentCaptor<KafkaSpoutMessageId> messageIdForRevokedPartition = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
verify(collectorMock).emit(anyObject(), anyObject(), messageIdForRevokedPartition.capture());
reset(collectorMock);
spout.nextTuple();
ArgumentCaptor<KafkaSpoutMessageId> messageIdForAssignedPartition = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
verify(collectorMock).emit(anyObject(), anyObject(), messageIdForAssignedPartition.capture());
//Now rebalance
consumerRebalanceListener.onPartitionsRevoked(assignedPartitions);
consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(assignedPartition));
List<KafkaSpoutMessageId> emittedMessageIds = new ArrayList<>();
emittedMessageIds.add(messageIdForRevokedPartition.getValue());
emittedMessageIds.add(messageIdForAssignedPartition.getValue());
return emittedMessageIds;
}
Aggregations