use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class SourceNodeRecordDeserializer method deserialize.
@Override
public ConsumerRecord<Object, Object> deserialize(final ConsumerRecord<byte[], byte[]> rawRecord) {
final Object key;
try {
key = sourceNode.deserializeKey(rawRecord.topic(), rawRecord.key());
} catch (Exception e) {
throw new StreamsException(format("Failed to deserialize key for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e);
}
final Object value;
try {
value = sourceNode.deserializeValue(rawRecord.topic(), rawRecord.value());
} catch (Exception e) {
throw new StreamsException(format("Failed to deserialize value for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e);
}
return new ConsumerRecord<>(rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), rawRecord.timestamp(), TimestampType.CREATE_TIME, rawRecord.checksum(), rawRecord.serializedKeySize(), rawRecord.serializedValueSize(), key, value);
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class ProcessorStateManager method updateStandbyStates.
public List<ConsumerRecord<byte[], byte[]>> updateStandbyStates(TopicPartition storePartition, List<ConsumerRecord<byte[], byte[]>> records) {
long limit = offsetLimit(storePartition);
List<ConsumerRecord<byte[], byte[]>> remainingRecords = null;
// restore states from changelog records
StateRestoreCallback restoreCallback = restoreCallbacks.get(storePartition.topic());
long lastOffset = -1L;
int count = 0;
for (ConsumerRecord<byte[], byte[]> record : records) {
if (record.offset() < limit) {
try {
restoreCallback.restore(record.key(), record.value());
} catch (Exception e) {
throw new ProcessorStateException(String.format("%s exception caught while trying to restore state from %s", logPrefix, storePartition), e);
}
lastOffset = record.offset();
} else {
if (remainingRecords == null)
remainingRecords = new ArrayList<>(records.size() - count);
remainingRecords.add(record);
}
count++;
}
// record the restored offset for its change log partition
restoredOffsets.put(storePartition, lastOffset + 1);
return remainingRecords;
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class StreamTaskTest method shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContext.
@SuppressWarnings("unchecked")
@Test
public void shouldWrapKafkaExceptionsWithStreamsExceptionAndAddContext() throws Exception {
final MockSourceNode processorNode = new MockSourceNode(topic1, intDeserializer, intDeserializer) {
@Override
public void process(final Object key, final Object value) {
throw new KafkaException("KABOOM!");
}
};
final List<ProcessorNode> processorNodes = Collections.<ProcessorNode>singletonList(processorNode);
final Map<String, SourceNode> sourceNodes = Collections.<String, SourceNode>singletonMap(topic1[0], processorNode);
final ProcessorTopology topology = new ProcessorTopology(processorNodes, sourceNodes, Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>emptyList(), Collections.<String, String>emptyMap(), Collections.<StateStore>emptyList());
task.close();
task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, testCache, time, recordCollector);
final int offset = 20;
task.addRecords(partition1, Collections.singletonList(new ConsumerRecord<>(partition1.topic(), partition1.partition(), offset, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
try {
task.process();
fail("Should've thrown StreamsException");
} catch (StreamsException e) {
final String message = e.getMessage();
assertTrue("message=" + message + " should contain topic", message.contains("topic=" + topic1[0]));
assertTrue("message=" + message + " should contain partition", message.contains("partition=" + partition1.partition()));
assertTrue("message=" + message + " should contain offset", message.contains("offset=" + offset));
assertTrue("message=" + message + " should contain processor", message.contains("processor=" + processorNode.name()));
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class StandbyTaskTest method testUpdateNonPersistentStore.
@SuppressWarnings("unchecked")
@Test(expected = Exception.class)
public void testUpdateNonPersistentStore() throws Exception {
StreamsConfig config = createConfig(baseDir);
StandbyTask task = new StandbyTask(taskId, applicationId, topicPartitions, topology, consumer, changelogReader, config, null, stateDirectory);
restoreStateConsumer.assign(new ArrayList<>(task.changeLogPartitions()));
task.update(partition1, records(new ConsumerRecord<>(partition1.topic(), partition1.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class StandbyTaskTest method testUpdateKTable.
@SuppressWarnings("unchecked")
@Test
public void testUpdateKTable() throws Exception {
consumer.assign(Utils.mkList(ktable));
Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(0L));
consumer.commitSync(committedOffsets);
restoreStateConsumer.updatePartitions("ktable1", Utils.mkList(new PartitionInfo("ktable1", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("ktable1", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("ktable1", 2, Node.noNode(), new Node[0], new Node[0])));
StreamsConfig config = createConfig(baseDir);
StandbyTask task = new StandbyTask(taskId, applicationId, ktablePartitions, ktableTopology, consumer, changelogReader, config, null, stateDirectory);
restoreStateConsumer.assign(new ArrayList<>(task.changeLogPartitions()));
for (ConsumerRecord<Integer, Integer> record : Arrays.asList(new ConsumerRecord<>(ktable.topic(), ktable.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 3, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 40, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 4, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 50, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 5, 100))) {
restoreStateConsumer.bufferRecord(record);
}
for (Map.Entry<TopicPartition, Long> entry : task.checkpointedOffsets().entrySet()) {
TopicPartition partition = entry.getKey();
long offset = entry.getValue();
if (offset >= 0) {
restoreStateConsumer.seek(partition, offset);
} else {
restoreStateConsumer.seekToBeginning(singleton(partition));
}
}
// The commit offset is at 0L. Records should not be processed
List<ConsumerRecord<byte[], byte[]>> remaining = task.update(ktable, restoreStateConsumer.poll(100).records(ktable));
assertEquals(5, remaining.size());
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(10L));
consumer.commitSync(committedOffsets);
// update offset limits
task.commit();
// The commit offset has not reached, yet.
remaining = task.update(ktable, remaining);
assertEquals(5, remaining.size());
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(11L));
consumer.commitSync(committedOffsets);
// update offset limits
task.commit();
// one record should be processed.
remaining = task.update(ktable, remaining);
assertEquals(4, remaining.size());
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(45L));
consumer.commitSync(committedOffsets);
// update offset limits
task.commit();
// The commit offset is now 45. All record except for the last one should be processed.
remaining = task.update(ktable, remaining);
assertEquals(1, remaining.size());
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(50L));
consumer.commitSync(committedOffsets);
// update offset limits
task.commit();
// The commit offset is now 50. Still the last record remains.
remaining = task.update(ktable, remaining);
assertEquals(1, remaining.size());
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(60L));
consumer.commitSync(committedOffsets);
// update offset limits
task.commit();
// The commit offset is now 60. No record should be left.
remaining = task.update(ktable, remaining);
assertNull(remaining);
task.closeStateManager(true);
File taskDir = stateDirectory.directoryForTask(taskId);
OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME));
Map<TopicPartition, Long> offsets = checkpoint.read();
assertEquals(1, offsets.size());
assertEquals(new Long(51L), offsets.get(ktable));
}
Aggregations