Search in sources :

Example 1 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class SourceNodeRecordDeserializer method deserialize.

@Override
public ConsumerRecord<Object, Object> deserialize(final ConsumerRecord<byte[], byte[]> rawRecord) {
    final Object key;
    try {
        key = sourceNode.deserializeKey(rawRecord.topic(), rawRecord.key());
    } catch (Exception e) {
        throw new StreamsException(format("Failed to deserialize key for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e);
    }
    final Object value;
    try {
        value = sourceNode.deserializeValue(rawRecord.topic(), rawRecord.value());
    } catch (Exception e) {
        throw new StreamsException(format("Failed to deserialize value for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e);
    }
    return new ConsumerRecord<>(rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), rawRecord.timestamp(), TimestampType.CREATE_TIME, rawRecord.checksum(), rawRecord.serializedKeySize(), rawRecord.serializedValueSize(), key, value);
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) StreamsException(org.apache.kafka.streams.errors.StreamsException) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 2 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class ProcessorStateManager method updateStandbyStates.

public List<ConsumerRecord<byte[], byte[]>> updateStandbyStates(TopicPartition storePartition, List<ConsumerRecord<byte[], byte[]>> records) {
    long limit = offsetLimit(storePartition);
    List<ConsumerRecord<byte[], byte[]>> remainingRecords = null;
    // restore states from changelog records
    StateRestoreCallback restoreCallback = restoreCallbacks.get(storePartition.topic());
    long lastOffset = -1L;
    int count = 0;
    for (ConsumerRecord<byte[], byte[]> record : records) {
        if (record.offset() < limit) {
            try {
                restoreCallback.restore(record.key(), record.value());
            } catch (Exception e) {
                throw new ProcessorStateException(String.format("%s exception caught while trying to restore state from %s", logPrefix, storePartition), e);
            }
            lastOffset = record.offset();
        } else {
            if (remainingRecords == null)
                remainingRecords = new ArrayList<>(records.size() - count);
            remainingRecords.add(record);
        }
        count++;
    }
    // record the restored offset for its change log partition
    restoredOffsets.put(storePartition, lastOffset + 1);
    return remainingRecords;
}
Also used : StateRestoreCallback(org.apache.kafka.streams.processor.StateRestoreCallback) ArrayList(java.util.ArrayList) ProcessorStateException(org.apache.kafka.streams.errors.ProcessorStateException) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) ProcessorStateException(org.apache.kafka.streams.errors.ProcessorStateException) IOException(java.io.IOException) StreamsException(org.apache.kafka.streams.errors.StreamsException) LockException(org.apache.kafka.streams.errors.LockException)

Example 3 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class StreamTaskTest method shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContext.

@SuppressWarnings("unchecked")
@Test
public void shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContext() throws Exception {
    final MockSourceNode processorNode = new MockSourceNode(topic1, intDeserializer, intDeserializer) {

        @Override
        public void process(final Object key, final Object value) {
            throw new KafkaException("KABOOM!");
        }
    };
    final List<ProcessorNode> processorNodes = Collections.<ProcessorNode>singletonList(processorNode);
    final Map<String, SourceNode> sourceNodes = Collections.<String, SourceNode>singletonMap(topic1[0], processorNode);
    final ProcessorTopology topology = new ProcessorTopology(processorNodes, sourceNodes, Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>emptyList(), Collections.<String, String>emptyMap(), Collections.<StateStore>emptyList());
    task.close();
    task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, testCache, time, recordCollector);
    final int offset = 20;
    task.addRecords(partition1, Collections.singletonList(new ConsumerRecord<>(partition1.topic(), partition1.partition(), offset, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
    try {
        task.process();
        fail("Should've thrown StreamsException");
    } catch (StreamsException e) {
        final String message = e.getMessage();
        assertTrue("message=" + message + " should contain topic", message.contains("topic=" + topic1[0]));
        assertTrue("message=" + message + " should contain partition", message.contains("partition=" + partition1.partition()));
        assertTrue("message=" + message + " should contain offset", message.contains("offset=" + offset));
        assertTrue("message=" + message + " should contain processor", message.contains("processor=" + processorNode.name()));
    }
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) MockSourceNode(org.apache.kafka.test.MockSourceNode) MockProcessorNode(org.apache.kafka.test.MockProcessorNode) MockSourceNode(org.apache.kafka.test.MockSourceNode) KafkaException(org.apache.kafka.common.KafkaException) Test(org.junit.Test)

Example 4 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class StandbyTaskTest method testUpdateNonPersistentStore.

@SuppressWarnings("unchecked")
@Test(expected = Exception.class)
public void testUpdateNonPersistentStore() throws Exception {
    StreamsConfig config = createConfig(baseDir);
    StandbyTask task = new StandbyTask(taskId, applicationId, topicPartitions, topology, consumer, changelogReader, config, null, stateDirectory);
    restoreStateConsumer.assign(new ArrayList<>(task.changeLogPartitions()));
    task.update(partition1, records(new ConsumerRecord<>(partition1.topic(), partition1.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
}
Also used : ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 5 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class StandbyTaskTest method testUpdateKTable.

@SuppressWarnings("unchecked")
@Test
public void testUpdateKTable() throws Exception {
    consumer.assign(Utils.mkList(ktable));
    Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(0L));
    consumer.commitSync(committedOffsets);
    restoreStateConsumer.updatePartitions("ktable1", Utils.mkList(new PartitionInfo("ktable1", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("ktable1", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("ktable1", 2, Node.noNode(), new Node[0], new Node[0])));
    StreamsConfig config = createConfig(baseDir);
    StandbyTask task = new StandbyTask(taskId, applicationId, ktablePartitions, ktableTopology, consumer, changelogReader, config, null, stateDirectory);
    restoreStateConsumer.assign(new ArrayList<>(task.changeLogPartitions()));
    for (ConsumerRecord<Integer, Integer> record : Arrays.asList(new ConsumerRecord<>(ktable.topic(), ktable.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 3, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 40, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 4, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 50, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 5, 100))) {
        restoreStateConsumer.bufferRecord(record);
    }
    for (Map.Entry<TopicPartition, Long> entry : task.checkpointedOffsets().entrySet()) {
        TopicPartition partition = entry.getKey();
        long offset = entry.getValue();
        if (offset >= 0) {
            restoreStateConsumer.seek(partition, offset);
        } else {
            restoreStateConsumer.seekToBeginning(singleton(partition));
        }
    }
    // The commit offset is at 0L. Records should not be processed
    List<ConsumerRecord<byte[], byte[]>> remaining = task.update(ktable, restoreStateConsumer.poll(100).records(ktable));
    assertEquals(5, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(10L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset has not reached, yet.
    remaining = task.update(ktable, remaining);
    assertEquals(5, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(11L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // one record should be processed.
    remaining = task.update(ktable, remaining);
    assertEquals(4, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(45L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset is now 45. All record except for the last one should be processed.
    remaining = task.update(ktable, remaining);
    assertEquals(1, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(50L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset is now 50. Still the last record remains.
    remaining = task.update(ktable, remaining);
    assertEquals(1, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(60L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset is now 60. No record should be left.
    remaining = task.update(ktable, remaining);
    assertNull(remaining);
    task.closeStateManager(true);
    File taskDir = stateDirectory.directoryForTask(taskId);
    OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME));
    Map<TopicPartition, Long> offsets = checkpoint.read();
    assertEquals(1, offsets.size());
    assertEquals(new Long(51L), offsets.get(ktable));
}
Also used : OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) HashMap(java.util.HashMap) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) PartitionInfo(org.apache.kafka.common.PartitionInfo) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)309 TopicPartition (org.apache.kafka.common.TopicPartition)158 Test (org.junit.Test)145 ArrayList (java.util.ArrayList)120 List (java.util.List)99 HashMap (java.util.HashMap)97 Map (java.util.Map)70 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)61 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)51 Test (org.junit.jupiter.api.Test)35 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)33 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)31 LinkedHashMap (java.util.LinkedHashMap)30 Header (org.apache.kafka.common.header.Header)29 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)28 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)28 TimeUnit (java.util.concurrent.TimeUnit)27 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 ByteBuffer (java.nio.ByteBuffer)22