use of io.kestra.runner.kafka.configs.TopicsConfig in project kestra by kestra-io.
the class KafkaFlowListenersTest method invalidFlows.
@Test
public void invalidFlows() throws JsonProcessingException, InterruptedException {
TopicsConfig topicsConfig = KafkaQueue.topicsConfig(applicationContext, Flow.class);
KafkaProducerService.Producer<String> producer = kafkaProducerService.of(KafkaFlowListenersTest.class, Serdes.String());
producer.send(new ProducerRecord<>(topicsConfig.getName(), "", JacksonMapper.ofJson().writeValueAsString(Map.of("id", "invalid", "namespace", "io.kestra.unittest", "revision", 1, "tasks", List.of(Map.of("id", "invalid", "type", "io.kestra.core.tasks.debugs.Echo", "level", "invalid"))))));
producer.send(new ProducerRecord<>(topicsConfig.getName(), "", JacksonMapper.ofJson().writeValueAsString(Map.of("id", "invalid", "namespace", "io.kestra.unittest", "revision", 1, "tasks", List.of(Map.of("id", "invalid", "type", "io.kestra.core.tasks.debugs.Invalid", "level", "invalid"))))));
String flowId = IdUtils.create();
Flow flow = create(flowId, IdUtils.create());
flowRepository.create(flow);
CountDownLatch countDownLatch = new CountDownLatch(1);
flowListenersService.listen(flows -> {
if (flows.stream().anyMatch(f -> f.getId().equals(flowId))) {
countDownLatch.countDown();
}
});
countDownLatch.await(1, TimeUnit.MINUTES);
assertThat(countDownLatch.getCount(), is(0L));
}
use of io.kestra.runner.kafka.configs.TopicsConfig in project kestra by kestra-io.
the class KafkaElasticIndexerTest method run.
@Test
void run() throws IOException, InterruptedException {
String topic = this.topicsConfig.stream().filter(indicesConfig -> indicesConfig.getCls() == Execution.class).findFirst().orElseThrow().getName();
CountDownLatch countDownLatch = new CountDownLatch(1);
RestHighLevelClient elasticClientSpy = spy(elasticClient);
doAnswer(invocation -> {
countDownLatch.countDown();
return invocation.callRealMethod();
}).when(elasticClientSpy).bulk(any(), any());
KafkaConsumerService kafkaConsumerServiceSpy = mock(KafkaConsumerService.class);
MockConsumer<String, String> mockConsumer = mockConsumer(topic);
doReturn(mockConsumer).when(kafkaConsumerServiceSpy).of(any(), any(), any());
ConsumerRecord<String, String> first = buildExecutionRecord(topic, 0);
mockConsumer.addRecord(first);
mockConsumer.addRecord(buildExecutionRecord(topic, 1));
mockConsumer.addRecord(buildExecutionRecord(topic, 2));
mockConsumer.addRecord(buildExecutionRecord(topic, 3));
mockConsumer.addRecord(buildExecutionRecord(topic, 4));
mockConsumer.addRecord(buildRecord(topic, first.key(), null, 5));
KafkaElasticIndexer indexer = new KafkaElasticIndexer(metricRegistry, elasticClientSpy, indexerConfig, topicsConfig, indicesConfigs, elasticSearchIndicesService, kafkaConsumerServiceSpy, executorsUtils);
Thread thread = new Thread(indexer);
thread.start();
countDownLatch.await();
assertThat(countDownLatch.getCount(), is(0L));
}
use of io.kestra.runner.kafka.configs.TopicsConfig in project kestra by kestra-io.
the class KafkaDeserializationExceptionHandler method handle.
@Override
public DeserializationHandlerResponse handle(ProcessorContext context, ConsumerRecord<byte[], byte[]> record, Exception exception) {
String message = "Exception caught during deserialization, stream will continue! applicationId: {}, taskId: {}, topic: {}, partition: {}, offset: {}";
Object[] args = { context.applicationId(), context.taskId(), record.topic(), record.partition(), record.offset(), exception };
TopicsConfig topicsConfig = KafkaQueue.topicsConfigByTopicName(applicationContext, record.topic());
if (topicsConfig.getCls() == Flow.class || topicsConfig.getCls() == Template.class) {
if (log.isDebugEnabled()) {
log.debug(message, args);
}
} else {
log.warn(message, args);
}
return DeserializationHandlerResponse.CONTINUE;
}
use of io.kestra.runner.kafka.configs.TopicsConfig in project kestra by kestra-io.
the class KafkaAdminService method createIfNotExist.
@SuppressWarnings("deprecation")
public void createIfNotExist(TopicsConfig topicConfig) {
NewTopic newTopic = new NewTopic(topicConfig.getName(), topicConfig.getPartitions() != null ? topicConfig.getPartitions() : topicDefaultsConfig.getPartitions(), topicConfig.getReplicationFactor() != null ? topicConfig.getReplicationFactor() : topicDefaultsConfig.getReplicationFactor());
Map<String, String> properties = new HashMap<>();
if (topicDefaultsConfig.getProperties() != null) {
properties.putAll(topicDefaultsConfig.getProperties());
}
if (topicConfig.getProperties() != null) {
properties.putAll(topicConfig.getProperties());
}
newTopic.configs(properties);
try {
this.of().createTopics(Collections.singletonList(newTopic)).all().get();
log.info("Topic '{}' created", newTopic.name());
} catch (ExecutionException | InterruptedException | TimeoutException e) {
if (e.getCause() instanceof TopicExistsException) {
try {
adminClient.alterConfigs(new HashMap<>() {
{
put(new ConfigResource(ConfigResource.Type.TOPIC, newTopic.name()), new org.apache.kafka.clients.admin.Config(newTopic.configs().entrySet().stream().map(config -> new ConfigEntry(config.getKey(), config.getValue())).collect(Collectors.toList())));
}
}).all().get();
log.info("Topic Config '{}' updated", newTopic.name());
} catch (ExecutionException | InterruptedException exception) {
if (!(exception.getCause() instanceof TopicExistsException)) {
log.warn("Unable to update topic '{}'", newTopic.name(), exception);
}
}
} else {
throw new RuntimeException(e);
}
}
}
use of io.kestra.runner.kafka.configs.TopicsConfig in project kestra by kestra-io.
the class KafkaQueueTest method topicsConfig.
@Test
void topicsConfig() {
TopicsConfig topicsConfig = KafkaQueue.topicsConfig(applicationContext, Execution.class);
assertThat(topicsConfig.getCls(), is(Execution.class));
TopicsConfig byName = KafkaQueue.topicsConfigByTopicName(applicationContext, topicsConfig.getName());
assertThat(byName, is(topicsConfig));
topicsConfig = KafkaQueue.topicsConfig(applicationContext, KafkaStreamSourceService.TOPIC_FLOWLAST);
assertThat(topicsConfig.getKey(), is(KafkaStreamSourceService.TOPIC_FLOWLAST));
byName = KafkaQueue.topicsConfigByTopicName(applicationContext, topicsConfig.getName());
assertThat(byName, is(topicsConfig));
}
Aggregations