Search in sources :

Example 61 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project samza by apache.

the class CheckpointVersionIntegrationTest method runStatefulApp.

private void runStatefulApp(List<String> inputMessages, List<String> expectedInputTopicMessages, List<String> expectedChangelogMessages, Map<String, String> configs) {
    // create input topic and produce the first batch of input messages
    createTopic(INPUT_TOPIC, 1);
    inputMessages.forEach(m -> produceMessage(INPUT_TOPIC, 0, m, m));
    // verify that the input messages were produced successfully
    if (inputMessages.size() > 0) {
        List<ConsumerRecord<String, String>> inputRecords = consumeMessages(INPUT_TOPIC, inputMessages.size());
        List<String> readInputMessages = inputRecords.stream().map(ConsumerRecord::value).collect(Collectors.toList());
        Assert.assertEquals(expectedInputTopicMessages, readInputMessages);
    }
    // run the application
    RunApplicationContext context = runApplication(new MyStatefulApplication(INPUT_SYSTEM, INPUT_TOPIC, Collections.singletonMap(STORE_NAME, CHANGELOG_TOPIC)), "myApp", configs);
    // wait for the application to finish
    context.getRunner().waitForFinish();
    // consume and verify the changelog messages
    if (expectedChangelogMessages.size() > 0) {
        List<ConsumerRecord<String, String>> changelogRecords = consumeMessages(CHANGELOG_TOPIC, expectedChangelogMessages.size());
        List<String> changelogMessages = changelogRecords.stream().map(ConsumerRecord::value).collect(Collectors.toList());
        Assert.assertEquals(expectedChangelogMessages, changelogMessages);
    }
    LOG.info("Finished initial run");
}
Also used : MyStatefulApplication(org.apache.samza.storage.MyStatefulApplication) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 62 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project samza by apache.

the class TestStreamProcessor method verifyNumMessages.

/**
 * Consumes data from the topic until there are no new messages for a while
 * and asserts that the number of consumed messages is as expected.
 */
@SuppressWarnings("unchecked")
private void verifyNumMessages(KafkaConsumer consumer, String topic, int expectedNumMessages) {
    consumer.subscribe(Collections.singletonList(topic));
    int count = 0;
    int emptyPollCount = 0;
    while (count < expectedNumMessages && emptyPollCount < 5) {
        ConsumerRecords records = consumer.poll(5000);
        if (!records.isEmpty()) {
            for (ConsumerRecord record : (Iterable<ConsumerRecord>) records) {
                Assert.assertEquals(new String((byte[]) record.value()), String.valueOf(count));
                count++;
            }
        } else {
            emptyPollCount++;
        }
    }
    Assert.assertEquals(count, expectedNumMessages);
}
Also used : ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 63 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project storm by apache.

the class KafkaTridentSpoutEmitterEmitTest method testTimeStampStrategyWhenTopologyIsRedeployed.

@Test
public void testTimeStampStrategyWhenTopologyIsRedeployed() {
    /**
     * TIMESTAMP strategy should be applied if the emitter is new and the topology has been redeployed (storm id has changed)
     * Offset should be reset according to the offset corresponding to startTimeStamp
     */
    long preRestartEmittedOffset = 20;
    int preRestartEmittedRecords = 10;
    long timeStampStartOffset = 2L;
    long pollTimeout = 1L;
    KafkaTridentSpoutBatchMetadata preExecutorRestartLastMeta = new KafkaTridentSpoutBatchMetadata(preRestartEmittedOffset, preRestartEmittedOffset + preRestartEmittedRecords - 1, "Some older topology");
    KafkaConsumer<String, String> kafkaConsumer = Mockito.mock(KafkaConsumer.class);
    when(kafkaConsumer.assignment()).thenReturn(Collections.singleton(partition));
    OffsetAndTimestamp offsetAndTimestamp = new OffsetAndTimestamp(timeStampStartOffset, startTimeStamp);
    HashMap<TopicPartition, OffsetAndTimestamp> map = new HashMap<>();
    map.put(partition, offsetAndTimestamp);
    when(kafkaConsumer.offsetsForTimes(Collections.singletonMap(partition, startTimeStamp))).thenReturn(map);
    HashMap<TopicPartition, List<ConsumerRecord<String, String>>> topicPartitionMap = new HashMap<>();
    List<ConsumerRecord<String, String>> newRecords = SpoutWithMockedConsumerSetupHelper.createRecords(partition, timeStampStartOffset, recordsInKafka);
    topicPartitionMap.put(partition, newRecords);
    when(kafkaConsumer.poll(pollTimeout)).thenReturn(new ConsumerRecords<>(topicPartitionMap));
    KafkaTridentSpoutEmitter<String, String> emitter = createEmitter(kafkaConsumer, FirstPollOffsetStrategy.TIMESTAMP);
    TransactionAttempt txid = new TransactionAttempt(0L, 0);
    KafkaTridentSpoutTopicPartition kttp = new KafkaTridentSpoutTopicPartition(partition);
    Map<String, Object> meta = emitter.emitPartitionBatchNew(txid, collectorMock, kttp, preExecutorRestartLastMeta.toMap());
    verify(collectorMock, times(recordsInKafka)).emit(emitCaptor.capture());
    verify(kafkaConsumer, times(1)).seek(partition, timeStampStartOffset);
    List<List<Object>> emits = emitCaptor.getAllValues();
    assertThat(emits.get(0).get(0), is(timeStampStartOffset));
    KafkaTridentSpoutBatchMetadata deserializedMeta = KafkaTridentSpoutBatchMetadata.fromMap(meta);
    assertThat("The batch should start at the first offset for startTimestamp", deserializedMeta.getFirstOffset(), is(timeStampStartOffset));
}
Also used : HashMap(java.util.HashMap) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) ArgumentMatchers.anyList(org.mockito.ArgumentMatchers.anyList) List(java.util.List) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) TransactionAttempt(org.apache.storm.trident.topology.TransactionAttempt) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 64 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project storm by apache.

the class DefaultRecordTranslatorTest method testBasic.

@Test
public void testBasic() {
    DefaultRecordTranslator<String, String> trans = new DefaultRecordTranslator<>();
    assertEquals(Arrays.asList("default"), trans.streams());
    assertEquals(new Fields("topic", "partition", "offset", "key", "value"), trans.getFieldsFor("default"));
    ConsumerRecord<String, String> cr = new ConsumerRecord<>("TOPIC", 100, 100, "THE KEY", "THE VALUE");
    assertEquals(Arrays.asList("TOPIC", 100, 100l, "THE KEY", "THE VALUE"), trans.apply(cr));
}
Also used : Fields(org.apache.storm.tuple.Fields) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Example 65 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project storm by apache.

the class ByTopicRecordTranslatorTest method testNullTranslation.

@Test
public void testNullTranslation() {
    ByTopicRecordTranslator<String, String> trans = new ByTopicRecordTranslator<>((r) -> null, new Fields("key"));
    ConsumerRecord<String, String> cr = new ConsumerRecord<>("TOPIC 1", 100, 100, "THE KEY", "THE VALUE");
    assertEquals(null, trans.apply(cr));
}
Also used : Fields(org.apache.storm.tuple.Fields) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Aggregations

ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)314 TopicPartition (org.apache.kafka.common.TopicPartition)160 Test (org.junit.Test)145 ArrayList (java.util.ArrayList)123 List (java.util.List)100 HashMap (java.util.HashMap)98 Map (java.util.Map)70 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)61 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)51 Test (org.junit.jupiter.api.Test)35 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)33 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)31 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)31 LinkedHashMap (java.util.LinkedHashMap)30 Header (org.apache.kafka.common.header.Header)29 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)28 TimeUnit (java.util.concurrent.TimeUnit)27 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 ByteBuffer (java.nio.ByteBuffer)22