Search in sources :

Example 36 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project heron by twitter.

the class KafkaSpoutTest method fail.

@Test
public void fail() {
    when(kafkaConsumerFactory.create()).thenReturn(consumer);
    TopicPartition topicPartition = new TopicPartition(DUMMY_TOPIC_NAME, 0);
    List<ConsumerRecord<String, byte[]>> recordList = new ArrayList<>();
    byte[] randomBytes = new byte[1];
    for (int i = 0; i < 5; i++) {
        RANDOM.nextBytes(randomBytes);
        recordList.add(new ConsumerRecord<>(DUMMY_TOPIC_NAME, 0, i, "key", Arrays.copyOf(randomBytes, randomBytes.length)));
    }
    ConsumerRecords<String, byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(topicPartition, recordList));
    when(consumer.poll(any(Duration.class))).thenReturn(consumerRecords);
    kafkaSpout.open(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATLEAST_ONCE.name()), topologyContext, collector);
    verify(consumer).subscribe(eq(Collections.singleton(DUMMY_TOPIC_NAME)), consumerRebalanceListenerArgumentCaptor.capture());
    ConsumerRebalanceListener consumerRebalanceListener = consumerRebalanceListenerArgumentCaptor.getValue();
    consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(topicPartition));
    // poll the topic
    kafkaSpout.nextTuple();
    // emit all of the five records
    for (int i = 0; i < 5; i++) {
        kafkaSpout.nextTuple();
    }
    // ack came in out of order, second and third record fails
    kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 4));
    kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 0));
    kafkaSpout.fail(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 1));
    kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 3));
    kafkaSpout.fail(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 2));
    // commit and poll
    kafkaSpout.nextTuple();
    verify(consumer).seek(topicPartition, 1);
    verify(consumer).commitAsync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(1)), null);
}
Also used : ArrayList(java.util.ArrayList) Duration(java.time.Duration) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Test(org.junit.Test)

Example 37 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project heron by twitter.

the class KafkaSpoutTest method nextTuple.

@Test
public void nextTuple() {
    when(kafkaConsumerFactory.create()).thenReturn(consumer);
    ConsumerRecords<String, byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(DUMMY_TOPIC_NAME, 0), Collections.singletonList(new ConsumerRecord<>(DUMMY_TOPIC_NAME, 0, 0, "key", new byte[] { 0xF }))));
    when(consumer.poll(any(Duration.class))).thenReturn(consumerRecords);
    doReturn(Collections.singletonMap(new MetricName("name", "group", "description", Collections.singletonMap("name", "value")), metric)).when(consumer).metrics();
    when(metric.metricValue()).thenReturn("sample value");
    kafkaSpout.open(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATMOST_ONCE.name()), topologyContext, collector);
    verify(consumer).subscribe(eq(Collections.singleton(DUMMY_TOPIC_NAME)), consumerRebalanceListenerArgumentCaptor.capture());
    ConsumerRebalanceListener consumerRebalanceListener = consumerRebalanceListenerArgumentCaptor.getValue();
    TopicPartition topicPartition = new TopicPartition(DUMMY_TOPIC_NAME, 0);
    consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(topicPartition));
    kafkaSpout.nextTuple();
    verify(consumer).commitAsync();
    verify(topologyContext).registerMetric(eq("name-group-name-value"), kafkaMetricDecoratorArgumentCaptor.capture(), eq(60));
    assertEquals("sample value", kafkaMetricDecoratorArgumentCaptor.getValue().getValueAndReset());
    kafkaSpout.nextTuple();
    verify(collector).emit(eq("default"), listArgumentCaptor.capture());
    assertEquals("key", listArgumentCaptor.getValue().get(0));
    assertArrayEquals(new byte[] { 0xF }, (byte[]) listArgumentCaptor.getValue().get(1));
}
Also used : MetricName(org.apache.kafka.common.MetricName) TopicPartition(org.apache.kafka.common.TopicPartition) Duration(java.time.Duration) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) Test(org.junit.Test)

Example 38 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project hive by apache.

the class TransactionalKafkaWriterTest method checkData.

private void checkData() {
    Set<TopicPartition> assignment = Collections.singleton(new TopicPartition(TOPIC, 0));
    consumer.assign(assignment);
    consumer.seekToBeginning(assignment);
    long numRecords = 0;
    boolean emptyPoll = false;
    while (numRecords < RECORD_NUMBER && !emptyPoll) {
        ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(10000));
        Assert.assertFalse(records.records(new TopicPartition(TOPIC, 0)).stream().anyMatch(consumerRecord -> !RECORDS_WRITABLES.contains(new KafkaWritable(0, consumerRecord.timestamp(), consumerRecord.value(), consumerRecord.key()))));
        emptyPoll = records.isEmpty();
        numRecords += records.count();
    }
    Assert.assertEquals(RECORD_NUMBER, numRecords);
}
Also used : IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) BeforeClass(org.junit.BeforeClass) FileSystem(org.apache.hadoop.fs.FileSystem) HashMap(java.util.HashMap) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Charset(java.nio.charset.Charset) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Before(org.junit.Before) TopicPartition(org.apache.kafka.common.TopicPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) HiveConf(org.apache.hadoop.hive.conf.HiveConf) Set(java.util.Set) Test(org.junit.Test) IOException(java.io.IOException) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) Table(org.apache.hadoop.hive.metastore.api.Table) Mockito(org.mockito.Mockito) List(java.util.List) Rule(org.junit.Rule) Ignore(org.junit.Ignore) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) Assert(org.junit.Assert) Collections(java.util.Collections) TemporaryFolder(org.junit.rules.TemporaryFolder) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) TopicPartition(org.apache.kafka.common.TopicPartition)

Example 39 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project canal by alibaba.

the class CanalKafkaConsumer method getMessage.

@SuppressWarnings("unchecked")
@Override
public List<CommonMessage> getMessage(Long timeout, TimeUnit unit) {
    if (!flatMessage) {
        ConsumerRecords<String, Message> records = (ConsumerRecords<String, Message>) kafkaConsumer.poll(unit.toMillis(timeout));
        if (!records.isEmpty()) {
            currentOffsets.clear();
            List<CommonMessage> messages = new ArrayList<>();
            for (ConsumerRecord<String, Message> record : records) {
                if (currentOffsets.get(record.partition()) == null) {
                    currentOffsets.put(record.partition(), record.offset());
                }
                messages.addAll(MessageUtil.convert(record.value()));
            }
            return messages;
        }
    } else {
        ConsumerRecords<String, String> records = (ConsumerRecords<String, String>) kafkaConsumer.poll(unit.toMillis(timeout));
        if (!records.isEmpty()) {
            List<CommonMessage> messages = new ArrayList<>();
            currentOffsets.clear();
            for (ConsumerRecord<String, String> record : records) {
                if (currentOffsets.get(record.partition()) == null) {
                    currentOffsets.put(record.partition(), record.offset());
                }
                String flatMessageJson = record.value();
                CommonMessage flatMessages = JSON.parseObject(flatMessageJson, CommonMessage.class);
                messages.add(flatMessages);
            }
            return messages;
        }
    }
    return null;
}
Also used : Message(com.alibaba.otter.canal.protocol.Message) CommonMessage(com.alibaba.otter.canal.connector.core.consumer.CommonMessage) ArrayList(java.util.ArrayList) CommonMessage(com.alibaba.otter.canal.connector.core.consumer.CommonMessage) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords)

Example 40 with ConsumerRecords

use of org.apache.kafka.clients.consumer.ConsumerRecords in project storm by apache.

the class KafkaSpoutRebalanceTest method emitOneMessagePerPartitionThenRevokeOnePartition.

//Returns messageIds in order of emission
private List<KafkaSpoutMessageId> emitOneMessagePerPartitionThenRevokeOnePartition(KafkaSpout<String, String> spout, TopicPartition partitionThatWillBeRevoked, TopicPartition assignedPartition) {
    //Setup spout with mock consumer so we can get at the rebalance listener
    spout.open(conf, contextMock, collectorMock);
    spout.activate();
    ArgumentCaptor<ConsumerRebalanceListener> rebalanceListenerCapture = ArgumentCaptor.forClass(ConsumerRebalanceListener.class);
    verify(consumerMock).subscribe(anyCollection(), rebalanceListenerCapture.capture());
    //Assign partitions to the spout
    ConsumerRebalanceListener consumerRebalanceListener = rebalanceListenerCapture.getValue();
    List<TopicPartition> assignedPartitions = new ArrayList<>();
    assignedPartitions.add(partitionThatWillBeRevoked);
    assignedPartitions.add(assignedPartition);
    consumerRebalanceListener.onPartitionsAssigned(assignedPartitions);
    //Make the consumer return a single message for each partition
    Map<TopicPartition, List<ConsumerRecord<String, String>>> firstPartitionRecords = new HashMap<>();
    firstPartitionRecords.put(partitionThatWillBeRevoked, Collections.singletonList(new ConsumerRecord(partitionThatWillBeRevoked.topic(), partitionThatWillBeRevoked.partition(), 0L, "key", "value")));
    Map<TopicPartition, List<ConsumerRecord<String, String>>> secondPartitionRecords = new HashMap<>();
    secondPartitionRecords.put(assignedPartition, Collections.singletonList(new ConsumerRecord(assignedPartition.topic(), assignedPartition.partition(), 0L, "key", "value")));
    when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords(firstPartitionRecords)).thenReturn(new ConsumerRecords(secondPartitionRecords)).thenReturn(new ConsumerRecords(Collections.emptyMap()));
    //Emit the messages
    spout.nextTuple();
    ArgumentCaptor<KafkaSpoutMessageId> messageIdForRevokedPartition = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
    verify(collectorMock).emit(anyObject(), anyObject(), messageIdForRevokedPartition.capture());
    reset(collectorMock);
    spout.nextTuple();
    ArgumentCaptor<KafkaSpoutMessageId> messageIdForAssignedPartition = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
    verify(collectorMock).emit(anyObject(), anyObject(), messageIdForAssignedPartition.capture());
    //Now rebalance
    consumerRebalanceListener.onPartitionsRevoked(assignedPartitions);
    consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(assignedPartition));
    List<KafkaSpoutMessageId> emittedMessageIds = new ArrayList<>();
    emittedMessageIds.add(messageIdForRevokedPartition.getValue());
    emittedMessageIds.add(messageIdForAssignedPartition.getValue());
    return emittedMessageIds;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) List(java.util.List)

Aggregations

ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)71 TopicPartition (org.apache.kafka.common.TopicPartition)59 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)48 HashMap (java.util.HashMap)37 ArrayList (java.util.ArrayList)32 List (java.util.List)32 Test (org.junit.Test)32 Map (java.util.Map)21 Properties (java.util.Properties)16 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)16 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)16 Duration (java.time.Duration)15 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)12 Collection (java.util.Collection)11 Collections (java.util.Collections)11 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)11 Set (java.util.Set)9 AtomicReference (java.util.concurrent.atomic.AtomicReference)9 Collectors (java.util.stream.Collectors)9 HashSet (java.util.HashSet)8