Search in sources :

Example 36 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class EosTestDriver method addRecord.

private static void addRecord(final ConsumerRecord<byte[], byte[]> record, final Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> recordPerTopicPerPartition, final boolean withRepartitioning) {
    final String topic = record.topic();
    final TopicPartition partition = new TopicPartition(topic, record.partition());
    if (verifyTopic(topic, withRepartitioning)) {
        Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> topicRecordsPerPartition = recordPerTopicPerPartition.get(topic);
        if (topicRecordsPerPartition == null) {
            topicRecordsPerPartition = new HashMap<>();
            recordPerTopicPerPartition.put(topic, topicRecordsPerPartition);
        }
        List<ConsumerRecord<byte[], byte[]>> records = topicRecordsPerPartition.get(partition);
        if (records == null) {
            records = new ArrayList<>();
            topicRecordsPerPartition.put(partition, records);
        }
        records.add(record);
    } else {
        throw new RuntimeException("FAIL: received data from unexpected topic: " + record);
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 37 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class ProcessorTopologyTestDriver method process.

/**
 * Send an input message with the given key, value and timestamp on the specified topic to the topology, and then commit the messages.
 *
 * @param topicName the name of the topic on which the message is to be sent
 * @param key the raw message key
 * @param value the raw message value
 * @param timestamp the raw message timestamp
 */
public void process(final String topicName, final byte[] key, final byte[] value, final long timestamp) {
    final TopicPartition tp = partitionsByTopic.get(topicName);
    if (tp != null) {
        // Add the record ...
        final long offset = offsetsByTopicPartition.get(tp).incrementAndGet();
        task.addRecords(tp, records(new ConsumerRecord<>(tp.topic(), tp.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value)));
        producer.clear();
        // Process the record ...
        task.process();
        ((InternalProcessorContext) task.context()).setRecordContext(new ProcessorRecordContext(timestamp, offset, tp.partition(), topicName));
        task.commit();
        // Capture all the records sent to the producer ...
        for (final ProducerRecord<byte[], byte[]> record : producer.history()) {
            Queue<ProducerRecord<byte[], byte[]>> outputRecords = outputRecordsByTopic.get(record.topic());
            if (outputRecords == null) {
                outputRecords = new LinkedList<>();
                outputRecordsByTopic.put(record.topic(), outputRecords);
            }
            outputRecords.add(record);
            // Forward back into the topology if the produced record is to an internal or a source topic ...
            if (internalTopics.contains(record.topic()) || topology.sourceTopics().contains(record.topic())) {
                process(record.topic(), record.key(), record.value(), record.timestamp());
            }
        }
    } else {
        final TopicPartition global = globalPartitionsByTopic.get(topicName);
        if (global == null) {
            throw new IllegalArgumentException("Unexpected topic: " + topicName);
        }
        final long offset = offsetsByTopicPartition.get(global).incrementAndGet();
        globalStateTask.update(new ConsumerRecord<>(global.topic(), global.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value));
        globalStateTask.flushState();
    }
}
Also used : ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) TopicPartition(org.apache.kafka.common.TopicPartition) InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 38 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class StreamTaskTest method shouldReturnOffsetsForRepartitionTopicsForPurging.

@Test
public void shouldReturnOffsetsForRepartitionTopicsForPurging() {
    final TopicPartition repartition = new TopicPartition("repartition", 1);
    final ProcessorTopology topology = ProcessorTopology.withRepartitionTopics(Utils.<ProcessorNode>mkList(source1, source2), new HashMap<String, SourceNode>() {

        {
            put(topic1, source1);
            put(repartition.topic(), source2);
        }
    }, Collections.singleton(repartition.topic()));
    consumer.assign(Arrays.asList(partition1, repartition));
    task = new StreamTask(taskId00, Utils.mkSet(partition1, repartition), topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, null, time, producer);
    task.initializeStateStores();
    task.initializeTopology();
    task.addRecords(partition1, Collections.singletonList(new ConsumerRecord<>(partition1.topic(), partition1.partition(), 5L, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
    task.addRecords(repartition, Collections.singletonList(new ConsumerRecord<>(repartition.topic(), repartition.partition(), 10L, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
    assertTrue(task.process());
    assertTrue(task.process());
    task.commit();
    Map<TopicPartition, Long> map = task.purgableOffsets();
    assertThat(map, equalTo(Collections.singletonMap(repartition, 11L)));
}
Also used : MockSourceNode(org.apache.kafka.test.MockSourceNode) TopicPartition(org.apache.kafka.common.TopicPartition) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Example 39 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class RecordQueueTest method shouldNotThrowStreamsExceptionWhenKeyDeserializationFailsWithSkipHandler.

@Test
public void shouldNotThrowStreamsExceptionWhenKeyDeserializationFailsWithSkipHandler() throws Exception {
    final byte[] key = Serdes.Long().serializer().serialize("foo", 1L);
    final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(new ConsumerRecord<>("topic", 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, key, recordValue));
    final StateSerdes anyStateSerde = StateSerdes.withBuiltinTypes("anyName", Bytes.class, Bytes.class);
    queueThatSkipsDeserializeErrors.addRawRecords(records);
    assertEquals(0, queueThatSkipsDeserializeErrors.size());
}
Also used : StateSerdes(org.apache.kafka.streams.state.StateSerdes) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Example 40 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.

the class RecordQueueTest method shouldDropOnNegativeTimestamp.

@Test
public void shouldDropOnNegativeTimestamp() {
    final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(new ConsumerRecord<>("topic", 1, 1, -1L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue));
    final RecordQueue queue = new RecordQueue(new TopicPartition(topics[0], 1), new MockSourceNode<>(topics, intDeserializer, intDeserializer), new LogAndSkipOnInvalidTimestamp(), new LogAndContinueExceptionHandler(), null, new LogContext());
    queue.addRawRecords(records);
    assertEquals(0, queue.size());
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) LogAndSkipOnInvalidTimestamp(org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp) LogContext(org.apache.kafka.common.utils.LogContext) LogAndContinueExceptionHandler(org.apache.kafka.streams.errors.LogAndContinueExceptionHandler) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Aggregations

ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)309 TopicPartition (org.apache.kafka.common.TopicPartition)158 Test (org.junit.Test)145 ArrayList (java.util.ArrayList)120 List (java.util.List)99 HashMap (java.util.HashMap)97 Map (java.util.Map)70 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)61 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)51 Test (org.junit.jupiter.api.Test)35 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)33 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)31 LinkedHashMap (java.util.LinkedHashMap)30 Header (org.apache.kafka.common.header.Header)29 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)28 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)28 TimeUnit (java.util.concurrent.TimeUnit)27 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 ByteBuffer (java.nio.ByteBuffer)22