use of org.akhq.models.Record in project akhq by tchiotludo.
the class RecordRepository method newRecord.
private Record newRecord(ConsumerRecord<byte[], byte[]> record, BaseOptions options, Topic topic) {
SchemaRegistryType schemaRegistryType = this.schemaRegistryRepository.getSchemaRegistryType(options.clusterId);
SchemaRegistryClient client = this.kafkaModule.getRegistryClient(options.clusterId);
return new Record(client, record, schemaRegistryType, this.schemaRegistryRepository.getKafkaAvroDeserializer(options.clusterId), schemaRegistryType == SchemaRegistryType.CONFLUENT ? this.schemaRegistryRepository.getKafkaJsonDeserializer(options.clusterId) : null, schemaRegistryType == SchemaRegistryType.CONFLUENT ? this.schemaRegistryRepository.getKafkaProtoDeserializer(options.clusterId) : null, this.avroToJsonSerializer, this.customDeserializerRepository.getProtobufToJsonDeserializer(options.clusterId), this.customDeserializerRepository.getAvroToJsonDeserializer(options.clusterId), avroWireFormatConverter.convertValueToWireFormat(record, client, this.schemaRegistryRepository.getSchemaRegistryType(options.clusterId)), topic);
}
use of org.akhq.models.Record in project akhq by tchiotludo.
the class RecordRepository method tail.
public Flowable<Event<TailEvent>> tail(String clusterId, TailOptions options) {
return Flowable.generate(() -> {
KafkaConsumer<byte[], byte[]> consumer = this.kafkaModule.getConsumer(options.clusterId);
Map<String, Topic> topics = topicRepository.findByName(clusterId, options.topics).stream().collect(Collectors.toMap(Topic::getName, Function.identity()));
consumer.assign(topics.values().stream().flatMap(topic -> topic.getPartitions().stream().map(partition -> new TopicPartition(topic.getName(), partition.getId()))).collect(Collectors.toList()));
if (options.getAfter() != null) {
options.getAfter().forEach(s -> {
String[] split = s.split(",");
consumer.seek(new TopicPartition(split[0], Integer.parseInt(split[1])), Long.parseLong(split[2]));
});
}
return new TailState(consumer, new TailEvent(), topics);
}, (state, subscriber) -> {
ConsumerRecords<byte[], byte[]> records = this.poll(state.getConsumer());
TailEvent tailEvent = state.getTailEvent();
List<Record> list = new ArrayList<>();
for (ConsumerRecord<byte[], byte[]> record : records) {
tailEvent.offsets.put(ImmutableMap.of(record.topic(), record.partition()), record.offset());
Record current = newRecord(record, options, state.getTopics().get(record.topic()));
if (searchFilter(options, current)) {
list.add(current);
log.trace("Record [topic: {}] [partition: {}] [offset: {}] [key: {}]", record.topic(), record.partition(), record.offset(), record.key());
}
}
tailEvent.records = list;
subscriber.onNext(Event.of(tailEvent).name("tailBody"));
state.tailEvent = tailEvent;
return state;
});
}
use of org.akhq.models.Record in project akhq by tchiotludo.
the class RecordRepositoryTest method produceAndConsumeRecordUsingJsonSchema.
@Test
void produceAndConsumeRecordUsingJsonSchema() throws ExecutionException, InterruptedException, IOException, RestClientException {
Schema keyJsonSchema = registerSchema("json_schema/key.json", KafkaTestCluster.TOPIC_JSON_SCHEMA + "-key");
Schema valueJsonSchema = registerSchema("json_schema/album.json", KafkaTestCluster.TOPIC_JSON_SCHEMA + "-value");
Album objectSatisfyingJsonSchema = new Album("title", List.of("artist_1", "artist_2"), 1989, List.of("song_1", "song_2"));
String recordAsJsonString = objectMapper.writeValueAsString(objectSatisfyingJsonSchema);
String keyJsonString = new JSONObject(Collections.singletonMap("id", "83fff9f8-b47a-4bf7-863b-9942c4369f06")).toString();
RecordMetadata producedRecordMetadata = repository.produce(KafkaTestCluster.CLUSTER_ID, KafkaTestCluster.TOPIC_JSON_SCHEMA, recordAsJsonString, Collections.emptyMap(), Optional.of(keyJsonString), Optional.empty(), Optional.empty(), Optional.of(keyJsonSchema.getId()), Optional.of(valueJsonSchema.getId()));
RecordRepository.Options options = new RecordRepository.Options(environment, KafkaTestCluster.CLUSTER_ID, KafkaTestCluster.TOPIC_JSON_SCHEMA);
List<Record> records = consumeAllRecord(options);
Optional<Record> consumedRecord = records.stream().filter(record -> Objects.equals(record.getKey(), keyJsonString)).findFirst();
assertTrue(consumedRecord.isPresent());
Record recordToAssert = consumedRecord.get();
assertEquals(recordToAssert.getKey(), keyJsonString);
assertEquals(recordToAssert.getValue(), recordAsJsonString);
assertEquals(recordToAssert.getValueSchemaId(), valueJsonSchema.getId());
// clear schema registry as it is shared between tests
schemaRegistryRepository.delete(KafkaTestCluster.CLUSTER_ID, keyJsonSchema.getSubject());
schemaRegistryRepository.delete(KafkaTestCluster.CLUSTER_ID, valueJsonSchema.getSubject());
}
use of org.akhq.models.Record in project akhq by tchiotludo.
the class RecordRepositoryTest method consumeAllRecord.
private List<Record> consumeAllRecord(RecordRepository.Options options) throws ExecutionException, InterruptedException {
boolean hasNext = true;
List<Record> all = new ArrayList<>();
do {
List<Record> datas = repository.consume(KafkaTestCluster.CLUSTER_ID, options);
all.addAll(datas);
datas.forEach(record -> log.debug("Records [Topic: {}] [Partition: {}] [Offset: {}] [Key: {}] [Value: {}]", record.getTopic(), record.getPartition(), record.getOffset(), record.getKey(), record.getValue()));
log.info("Consume {} records", datas.size());
URIBuilder after = options.after(datas, URIBuilder.empty());
if (datas.size() == 0) {
hasNext = false;
} else if (after != null) {
options.setAfter(after.getParametersByName("after").get(0).getValue());
}
} while (hasNext);
return all;
}
use of org.akhq.models.Record in project akhq by tchiotludo.
the class SseControllerTest method searchApi.
@Test
void searchApi() {
RxSseClient sseClient = embeddedServer.getApplicationContext().createBean(RxSseClient.class, embeddedServer.getURL());
List<Record> results = sseClient.eventStream(BASE_URL + "/" + KafkaTestCluster.TOPIC_HUGE + "/data/search?searchByKey=key_100_C", TopicController.SearchRecord.class).toList().blockingGet().stream().flatMap(r -> r.getData() != null && r.getData().getRecords() != null ? r.getData().getRecords().stream() : Stream.empty()).collect(Collectors.toList());
assertThat(results.size(), is(3));
}
Aggregations