use of io.kestra.core.runners.WorkerTask in project kestra by kestra-io.
the class ExecutorWorkerRunning method topology.
public StreamsBuilder topology() {
StreamsBuilder builder = new KafkaStreamsBuilder();
builder.addGlobalStore(Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(WORKERINSTANCE_STATE_STORE_NAME), Serdes.String(), JsonSerde.of(WorkerInstance.class)), kafkaAdminService.getTopicName(KafkaStreamSourceService.TOPIC_EXECUTOR_WORKERINSTANCE), Consumed.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("GlobalStore.ExecutorWorkerInstance"), () -> new GlobalStateProcessor<>(WORKERINSTANCE_STATE_STORE_NAME));
// only used as state store
builder.globalTable(kafkaAdminService.getTopicName(WorkerTaskRunning.class), Consumed.with(Serdes.String(), JsonSerde.of(WorkerTaskRunning.class)).withName("GlobalKTable.WorkerTaskRunning"), Materialized.<String, WorkerTaskRunning, KeyValueStore<Bytes, byte[]>>as(WORKER_RUNNING_STATE_STORE_NAME).withKeySerde(Serdes.String()).withValueSerde(JsonSerde.of(WorkerTaskRunning.class)));
KStream<String, WorkerInstance> workerInstanceKStream = builder.stream(kafkaAdminService.getTopicName(WorkerInstance.class), Consumed.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("KStream.WorkerInstance"));
workerInstanceKStream.to(kafkaAdminService.getTopicName(TOPIC_EXECUTOR_WORKERINSTANCE), Produced.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("DetectNewWorker.toExecutorWorkerInstance"));
KStream<String, WorkerInstanceTransformer.Result> stream = workerInstanceKStream.transformValues(WorkerInstanceTransformer::new, Named.as("DetectNewWorker.workerInstanceTransformer")).flatMapValues((readOnlyKey, value) -> value, Named.as("DetectNewWorker.flapMapList"));
// we resend the worker task from evicted worker
KStream<String, WorkerTask> resultWorkerTask = stream.flatMapValues((readOnlyKey, value) -> value.getWorkerTasksToSend(), Named.as("DetectNewWorkerTask.flapMapWorkerTaskToSend"));
// and remove from running since already sent
resultWorkerTask.map((key, value) -> KeyValue.pair(value.getTaskRun().getId(), (WorkerTaskRunning) null), Named.as("DetectNewWorkerTask.workerTaskRunningToNull")).to(kafkaAdminService.getTopicName(WorkerTaskRunning.class), Produced.with(Serdes.String(), JsonSerde.of(WorkerTaskRunning.class)).withName("DetectNewWorker.toWorkerTaskRunning"));
KafkaStreamSourceService.logIfEnabled(log, resultWorkerTask, (key, value) -> executorService.log(log, false, value), "DetectNewWorkerTask").to(kafkaAdminService.getTopicName(WorkerTask.class), Produced.with(Serdes.String(), JsonSerde.of(WorkerTask.class)).withName("DetectNewWorkerTask.toWorkerTask"));
// we resend the WorkerInstance update
KStream<String, WorkerInstance> updatedStream = KafkaStreamSourceService.logIfEnabled(log, stream, (key, value) -> log.debug("Instance updated: {}", value), "DetectNewWorkerInstance").map((key, value) -> value.getWorkerInstanceUpdated(), Named.as("DetectNewWorkerInstance.mapInstance"));
// cleanup executor workerinstance state store
updatedStream.filter((key, value) -> value != null, Named.as("DetectNewWorkerInstance.filterNotNull")).to(kafkaAdminService.getTopicName(TOPIC_EXECUTOR_WORKERINSTANCE), Produced.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("DetectNewWorkerInstance.toExecutorWorkerInstance"));
updatedStream.to(kafkaAdminService.getTopicName(WorkerInstance.class), Produced.with(Serdes.String(), JsonSerde.of(WorkerInstance.class)).withName("DetectNewWorkerInstance.toWorkerInstance"));
return builder;
}
use of io.kestra.core.runners.WorkerTask in project kestra by kestra-io.
the class KafkaWorkerTaskQueue method receive.
public Runnable receive(Class<?> consumerGroup, Consumer<WorkerTask> consumer) {
AtomicBoolean running = new AtomicBoolean(true);
poolExecutor.execute(() -> {
kafkaProducer.initTransactions();
org.apache.kafka.clients.consumer.Consumer<String, WorkerTask> kafkaConsumer = kafkaConsumerService.of(KafkaWorkerTaskQueue.class, JsonSerde.of(WorkerTask.class), consumerRebalanceListener(), consumerGroup);
kafkaConsumers.add(kafkaConsumer);
kafkaConsumer.subscribe(Collections.singleton(topicsConfigWorkerTask.getName()));
while (running.get()) {
try {
ConsumerRecords<String, WorkerTask> records = kafkaConsumer.poll(Duration.ofMillis(500));
if (!records.isEmpty()) {
kafkaProducer.beginTransaction();
records.forEach(record -> {
if (workerInstance.get() == null) {
Await.until(() -> workerInstance.get() != null);
}
WorkerTaskRunning workerTaskRunning = WorkerTaskRunning.of(record.value(), workerInstance.get(), record.partition());
this.kafkaProducer.send(new ProducerRecord<>(topicsConfigWorkerTaskRunning.getName(), this.queueService.key(workerTaskRunning), workerTaskRunning));
});
// we commit first all offset before submit task to worker
kafkaProducer.sendOffsetsToTransaction(KafkaConsumerService.maxOffsets(records), kafkaConfigService.getConsumerGroupName(consumerGroup));
kafkaProducer.commitTransaction();
// now, we can submit to worker to be sure we don't have a WorkerTaskResult before commiting the offset!
records.forEach(record -> {
consumer.accept(record.value());
});
}
} catch (WakeupException e) {
log.debug("Received Wakeup on {}!", this.getClass().getName());
// first call, we want to shutdown, so pause the consumer, will be closed after properly on second call
if (kafkaConsumer.paused().size() == 0) {
kafkaConsumer.pause(kafkaConsumer.assignment());
} else {
running.set(false);
}
}
}
kafkaConsumers.remove(kafkaConsumer);
kafkaConsumer.close();
});
return () -> running.set(false);
}
Aggregations