use of org.akhq.models.Record in project akhq by tchiotludo.
the class TopicControllerTest method dataDelete.
@Test
@Order(5)
void dataDelete() {
Record retrieve = this.retrieve(HttpRequest.DELETE(CREATE_TOPIC_URL + "/data", ImmutableMap.of("key", new String(Base64.getEncoder().encode("my-key".getBytes())), "partition", 1)), Record.class);
assertEquals(1, retrieve.getOffset());
// get data
// @TODO: Failed to see the message
// records = this.retrieveNextList(HttpRequest.GET(CREATE_TOPIC_URL + "/data"), Record.class);
// assertEquals(2, records.getResults().size());
// assertEquals("my-value", records.getResults().get(0).getValue());
// assertNull(records.getResults().get(1).getValue());
}
use of org.akhq.models.Record in project akhq by tchiotludo.
the class RecordRepository method search.
public Flowable<Event<SearchEvent>> search(Topic topic, Options options) throws ExecutionException, InterruptedException {
AtomicInteger matchesCount = new AtomicInteger();
Properties properties = new Properties();
properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, options.getSize());
return Flowable.generate(() -> {
KafkaConsumer<byte[], byte[]> consumer = this.kafkaModule.getConsumer(options.clusterId, properties);
Map<TopicPartition, Long> partitions = getTopicPartitionForSortOldest(topic, options, consumer);
if (partitions.size() == 0) {
return new SearchState(consumer, null);
}
consumer.assign(partitions.keySet());
partitions.forEach(consumer::seek);
partitions.forEach((topicPartition, first) -> log.trace("Search [topic: {}] [partition: {}] [start: {}]", topicPartition.topic(), topicPartition.partition(), first));
return new SearchState(consumer, new SearchEvent(topic));
}, (searchState, emitter) -> {
SearchEvent searchEvent = searchState.getSearchEvent();
KafkaConsumer<byte[], byte[]> consumer = searchState.getConsumer();
// end
if (searchEvent == null || searchEvent.emptyPoll == 666) {
emitter.onNext(new SearchEvent(topic).end());
emitter.onComplete();
consumer.close();
return new SearchState(consumer, searchEvent);
}
SearchEvent currentEvent = new SearchEvent(searchEvent);
ConsumerRecords<byte[], byte[]> records = this.poll(consumer);
if (records.isEmpty()) {
currentEvent.emptyPoll++;
} else {
currentEvent.emptyPoll = 0;
}
List<Record> list = new ArrayList<>();
for (ConsumerRecord<byte[], byte[]> record : records) {
currentEvent.updateProgress(record);
Record current = newRecord(record, options, topic);
if (searchFilter(options, current)) {
list.add(current);
matchesCount.getAndIncrement();
log.trace("Record [topic: {}] [partition: {}] [offset: {}] [key: {}]", record.topic(), record.partition(), record.offset(), record.key());
}
}
currentEvent.records = list;
if (currentEvent.emptyPoll >= 1) {
currentEvent.emptyPoll = 666;
emitter.onNext(currentEvent.end());
} else if (matchesCount.get() >= options.getSize()) {
currentEvent.emptyPoll = 666;
emitter.onNext(currentEvent.progress(options));
} else {
emitter.onNext(currentEvent.progress(options));
}
return new SearchState(consumer, currentEvent);
});
}
use of org.akhq.models.Record in project akhq by tchiotludo.
the class RecordRepository method getLastRecord.
public Map<String, Record> getLastRecord(String clusterId, List<String> topicsName) throws ExecutionException, InterruptedException {
Map<String, Topic> topics = topicRepository.findByName(clusterId, topicsName).stream().collect(Collectors.toMap(Topic::getName, Function.identity()));
List<TopicPartition> topicPartitions = topics.values().stream().flatMap(topic -> topic.getPartitions().stream()).map(partition -> new TopicPartition(partition.getTopic(), partition.getId())).collect(Collectors.toList());
KafkaConsumer<byte[], byte[]> consumer = kafkaModule.getConsumer(clusterId, new Properties() {
{
put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, topicPartitions.size() * 3);
}
});
consumer.assign(topicPartitions);
consumer.endOffsets(consumer.assignment()).forEach((topicPartition, offset) -> {
consumer.seek(topicPartition, Math.max(0, offset - 2));
});
ConcurrentHashMap<String, Record> records = new ConcurrentHashMap<>();
this.poll(consumer).forEach(record -> {
if (!records.containsKey(record.topic())) {
records.put(record.topic(), newRecord(record, clusterId, topics.get(record.topic())));
} else {
Record current = records.get(record.topic());
if (current.getTimestamp().toInstant().toEpochMilli() < record.timestamp()) {
records.put(record.topic(), newRecord(record, clusterId, topics.get(record.topic())));
}
}
});
consumer.close();
return records;
}
use of org.akhq.models.Record in project akhq by tchiotludo.
the class RecordRepository method consumeOldest.
private List<Record> consumeOldest(Topic topic, Options options) {
KafkaConsumer<byte[], byte[]> consumer = this.kafkaModule.getConsumer(options.clusterId);
Map<TopicPartition, Long> partitions = getTopicPartitionForSortOldest(topic, options, consumer);
List<Record> list = new ArrayList<>();
if (partitions.size() > 0) {
consumer.assign(partitions.keySet());
partitions.forEach(consumer::seek);
if (log.isTraceEnabled()) {
partitions.forEach((topicPartition, first) -> log.trace("Consume [topic: {}] [partition: {}] [start: {}]", topicPartition.topic(), topicPartition.partition(), first));
}
ConsumerRecords<byte[], byte[]> records = this.poll(consumer);
for (ConsumerRecord<byte[], byte[]> record : records) {
Record current = newRecord(record, options, topic);
if (searchFilter(options, current)) {
list.add(current);
}
}
}
consumer.close();
list.sort(Comparator.comparing(Record::getTimestamp));
return list;
}
use of org.akhq.models.Record in project akhq by tchiotludo.
the class RecordRepository method newRecord.
private Record newRecord(ConsumerRecord<byte[], byte[]> record, String clusterId, Topic topic) {
SchemaRegistryType schemaRegistryType = this.schemaRegistryRepository.getSchemaRegistryType(clusterId);
SchemaRegistryClient client = this.kafkaModule.getRegistryClient(clusterId);
return new Record(client, record, this.schemaRegistryRepository.getSchemaRegistryType(clusterId), this.schemaRegistryRepository.getKafkaAvroDeserializer(clusterId), schemaRegistryType == SchemaRegistryType.CONFLUENT ? this.schemaRegistryRepository.getKafkaJsonDeserializer(clusterId) : null, schemaRegistryType == SchemaRegistryType.CONFLUENT ? this.schemaRegistryRepository.getKafkaProtoDeserializer(clusterId) : null, this.avroToJsonSerializer, this.customDeserializerRepository.getProtobufToJsonDeserializer(clusterId), this.customDeserializerRepository.getAvroToJsonDeserializer(clusterId), avroWireFormatConverter.convertValueToWireFormat(record, client, this.schemaRegistryRepository.getSchemaRegistryType(clusterId)), topic);
}
Aggregations