use of io.kestra.core.models.triggers.Trigger in project kestra by kestra-io.
the class ElasticSearchTriggerRepositoryTest method all.
@Test
void all() {
Trigger.TriggerBuilder<?, ?> builder = trigger();
Optional<Trigger> find = triggerRepository.findLast(builder.build());
assertThat(find.isPresent(), is(false));
Trigger save = triggerRepository.save(builder.build());
find = triggerRepository.findLast(save);
assertThat(find.isPresent(), is(true));
assertThat(find.get().getExecutionId(), is(save.getExecutionId()));
save = triggerRepository.save(builder.executionId(IdUtils.create()).build());
find = triggerRepository.findLast(save);
assertThat(find.isPresent(), is(true));
assertThat(find.get().getExecutionId(), is(save.getExecutionId()));
triggerRepository.save(trigger().build());
triggerRepository.save(trigger().build());
triggerRepository.save(trigger().build());
List<Trigger> all = triggerRepository.findAll();
assertThat(all.size(), is(4));
}
use of io.kestra.core.models.triggers.Trigger in project kestra by kestra-io.
the class ExecutorFlowLast method topology.
public StreamsBuilder topology() {
StreamsBuilder builder = new KafkaStreamsBuilder();
// last global KTable
GlobalKTable<String, Flow> flowGlobalKTable = builder.globalTable(kafkaAdminService.getTopicName(KafkaStreamSourceService.TOPIC_FLOWLAST), Consumed.with(Serdes.String(), JsonSerde.of(Flow.class)).withName("GlobalKTable.FlowLast"), Materialized.<String, Flow, KeyValueStore<Bytes, byte[]>>as("last").withKeySerde(Serdes.String()).withValueSerde(JsonSerde.of(Flow.class)));
// stream
KStream<String, Flow> stream = builder.stream(kafkaAdminService.getTopicName(Flow.class), Consumed.with(Serdes.String(), JsonSerde.of(Flow.class, false)).withName("Stream.Flow"));
// logs
stream = KafkaStreamSourceService.logIfEnabled(log, stream, (key, value) -> log.trace("Flow in '{}.{}' with revision {}", value.getNamespace(), value.getId(), value.getRevision()), "Main");
// join with previous if more recent revision
KStream<String, ExecutorFlowLast.FlowWithPrevious> streamWithPrevious = stream.filter((key, value) -> value != null, Named.as("Main.notNull")).selectKey((key, value) -> value.uidWithoutRevision(), Named.as("Main.selectKey")).leftJoin(flowGlobalKTable, (key, value) -> key, (readOnlyKey, current, previous) -> {
if (previous == null) {
return new ExecutorFlowLast.FlowWithPrevious(current, null);
} else if (current.getRevision() < previous.getRevision()) {
return null;
} else {
return new ExecutorFlowLast.FlowWithPrevious(current, previous);
}
}, Named.as("Main.join")).filter((key, value) -> value != null, Named.as("Main.joinNotNull"));
// remove triggers
streamWithPrevious.flatMap((key, value) -> {
List<AbstractTrigger> deletedTriggers = new ArrayList<>();
if (value.getFlow().isDeleted()) {
deletedTriggers = ListUtils.emptyOnNull(value.getFlow().getTriggers());
} else if (value.getPrevious() != null) {
deletedTriggers = FlowService.findRemovedTrigger(value.getFlow(), value.getPrevious());
}
return deletedTriggers.stream().map(t -> new KeyValue<>(queueService.key(Trigger.of(value.getFlow(), t)), (Trigger) null)).collect(Collectors.toList());
}, Named.as("DeleteTrigger.flatMap")).to(kafkaAdminService.getTopicName(Trigger.class), Produced.with(Serdes.String(), JsonSerde.of(Trigger.class)).withName("To.Trigger"));
// send to last and don't drop deleted flow in order to keep last version
streamWithPrevious.map((key, value) -> new KeyValue<>(value.getFlow().uidWithoutRevision(), value.getFlow()), Named.as("Main.Map")).to(kafkaAdminService.getTopicName(KafkaStreamSourceService.TOPIC_FLOWLAST), Produced.with(Serdes.String(), JsonSerde.of(Flow.class)).withName("To.FlowLast"));
return builder;
}
Aggregations