use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class EosTestDriver method addRecord.
private static void addRecord(final ConsumerRecord<byte[], byte[]> record, final Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> recordPerTopicPerPartition, final boolean withRepartitioning) {
final String topic = record.topic();
final TopicPartition partition = new TopicPartition(topic, record.partition());
if (verifyTopic(topic, withRepartitioning)) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> topicRecordsPerPartition = recordPerTopicPerPartition.get(topic);
if (topicRecordsPerPartition == null) {
topicRecordsPerPartition = new HashMap<>();
recordPerTopicPerPartition.put(topic, topicRecordsPerPartition);
}
List<ConsumerRecord<byte[], byte[]>> records = topicRecordsPerPartition.get(partition);
if (records == null) {
records = new ArrayList<>();
topicRecordsPerPartition.put(partition, records);
}
records.add(record);
} else {
throw new RuntimeException("FAIL: received data from unexpected topic: " + record);
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class ProcessorTopologyTestDriver method process.
/**
* Send an input message with the given key, value and timestamp on the specified topic to the topology, and then commit the messages.
*
* @param topicName the name of the topic on which the message is to be sent
* @param key the raw message key
* @param value the raw message value
* @param timestamp the raw message timestamp
*/
public void process(final String topicName, final byte[] key, final byte[] value, final long timestamp) {
final TopicPartition tp = partitionsByTopic.get(topicName);
if (tp != null) {
// Add the record ...
final long offset = offsetsByTopicPartition.get(tp).incrementAndGet();
task.addRecords(tp, records(new ConsumerRecord<>(tp.topic(), tp.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value)));
producer.clear();
// Process the record ...
task.process();
((InternalProcessorContext) task.context()).setRecordContext(new ProcessorRecordContext(timestamp, offset, tp.partition(), topicName));
task.commit();
// Capture all the records sent to the producer ...
for (final ProducerRecord<byte[], byte[]> record : producer.history()) {
Queue<ProducerRecord<byte[], byte[]>> outputRecords = outputRecordsByTopic.get(record.topic());
if (outputRecords == null) {
outputRecords = new LinkedList<>();
outputRecordsByTopic.put(record.topic(), outputRecords);
}
outputRecords.add(record);
// Forward back into the topology if the produced record is to an internal or a source topic ...
if (internalTopics.contains(record.topic()) || topology.sourceTopics().contains(record.topic())) {
process(record.topic(), record.key(), record.value(), record.timestamp());
}
}
} else {
final TopicPartition global = globalPartitionsByTopic.get(topicName);
if (global == null) {
throw new IllegalArgumentException("Unexpected topic: " + topicName);
}
final long offset = offsetsByTopicPartition.get(global).incrementAndGet();
globalStateTask.update(new ConsumerRecord<>(global.topic(), global.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value));
globalStateTask.flushState();
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class StreamTaskTest method shouldReturnOffsetsForRepartitionTopicsForPurging.
@Test
public void shouldReturnOffsetsForRepartitionTopicsForPurging() {
final TopicPartition repartition = new TopicPartition("repartition", 1);
final ProcessorTopology topology = ProcessorTopology.withRepartitionTopics(Utils.<ProcessorNode>mkList(source1, source2), new HashMap<String, SourceNode>() {
{
put(topic1, source1);
put(repartition.topic(), source2);
}
}, Collections.singleton(repartition.topic()));
consumer.assign(Arrays.asList(partition1, repartition));
task = new StreamTask(taskId00, Utils.mkSet(partition1, repartition), topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, null, time, producer);
task.initializeStateStores();
task.initializeTopology();
task.addRecords(partition1, Collections.singletonList(new ConsumerRecord<>(partition1.topic(), partition1.partition(), 5L, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
task.addRecords(repartition, Collections.singletonList(new ConsumerRecord<>(repartition.topic(), repartition.partition(), 10L, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)));
assertTrue(task.process());
assertTrue(task.process());
task.commit();
Map<TopicPartition, Long> map = task.purgableOffsets();
assertThat(map, equalTo(Collections.singletonMap(repartition, 11L)));
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class RecordQueueTest method shouldNotThrowStreamsExceptionWhenKeyDeserializationFailsWithSkipHandler.
@Test
public void shouldNotThrowStreamsExceptionWhenKeyDeserializationFailsWithSkipHandler() throws Exception {
final byte[] key = Serdes.Long().serializer().serialize("foo", 1L);
final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(new ConsumerRecord<>("topic", 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, key, recordValue));
final StateSerdes anyStateSerde = StateSerdes.withBuiltinTypes("anyName", Bytes.class, Bytes.class);
queueThatSkipsDeserializeErrors.addRawRecords(records);
assertEquals(0, queueThatSkipsDeserializeErrors.size());
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project apache-kafka-on-k8s by banzaicloud.
the class RecordQueueTest method shouldDropOnNegativeTimestamp.
@Test
public void shouldDropOnNegativeTimestamp() {
final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(new ConsumerRecord<>("topic", 1, 1, -1L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue));
final RecordQueue queue = new RecordQueue(new TopicPartition(topics[0], 1), new MockSourceNode<>(topics, intDeserializer, intDeserializer), new LogAndSkipOnInvalidTimestamp(), new LogAndContinueExceptionHandler(), null, new LogContext());
queue.addRawRecords(records);
assertEquals(0, queue.size());
}
Aggregations