use of io.camunda.zeebe.engine.processing.streamprocessor.StreamProcessorLifecycleAware in project zeebe by zeebe-io.
the class TestStreams method buildStreamProcessor.
private StreamProcessor buildStreamProcessor(final SynchronousLogStream stream, final ZeebeDbFactory zeebeDbFactory, final TypedRecordProcessorFactory factory, final boolean awaitOpening) {
final var storage = createRuntimeFolder(stream);
final var snapshot = storage.getParent().resolve(SNAPSHOT_FOLDER);
final var recoveredLatch = new CountDownLatch(1);
final var recoveredAwaiter = new StreamProcessorLifecycleAware() {
@Override
public void onRecovered(final ReadonlyProcessingContext context) {
recoveredLatch.countDown();
}
};
final TypedRecordProcessorFactory wrappedFactory = (ctx) -> factory.createProcessors(ctx).withListener(recoveredAwaiter);
final ZeebeDb<?> zeebeDb;
if (snapshotWasTaken) {
zeebeDb = zeebeDbFactory.createDb(snapshot.toFile());
} else {
zeebeDb = zeebeDbFactory.createDb(storage.toFile());
}
final String logName = stream.getLogName();
final StreamProcessor streamProcessor = StreamProcessor.builder().logStream(stream.getAsyncLogStream()).zeebeDb(zeebeDb).actorSchedulingService(actorScheduler).commandResponseWriter(mockCommandResponseWriter).listener(mockStreamProcessorListener).streamProcessorFactory(wrappedFactory).eventApplierFactory(eventApplierFactory).streamProcessorMode(streamProcessorMode).build();
final var openFuture = streamProcessor.openAsync(false);
if (awaitOpening) {
// and recovery
try {
recoveredLatch.await(15, TimeUnit.SECONDS);
} catch (final InterruptedException e) {
Thread.interrupted();
}
}
openFuture.join(15, TimeUnit.SECONDS);
final LogContext context = logContextMap.get(logName);
final ProcessorContext processorContext = ProcessorContext.createStreamContext(context, streamProcessor, zeebeDb, storage, snapshot);
streamContextMap.put(logName, processorContext);
closeables.manage(processorContext);
return streamProcessor;
}
use of io.camunda.zeebe.engine.processing.streamprocessor.StreamProcessorLifecycleAware in project zeebe by camunda.
the class TestStreams method buildStreamProcessor.
private StreamProcessor buildStreamProcessor(final SynchronousLogStream stream, final ZeebeDbFactory zeebeDbFactory, final TypedRecordProcessorFactory factory, final boolean awaitOpening) {
final var storage = createRuntimeFolder(stream);
final var snapshot = storage.getParent().resolve(SNAPSHOT_FOLDER);
final var recoveredLatch = new CountDownLatch(1);
final var recoveredAwaiter = new StreamProcessorLifecycleAware() {
@Override
public void onRecovered(final ReadonlyProcessingContext context) {
recoveredLatch.countDown();
}
};
final TypedRecordProcessorFactory wrappedFactory = (ctx) -> factory.createProcessors(ctx).withListener(recoveredAwaiter);
final ZeebeDb<?> zeebeDb;
if (snapshotWasTaken) {
zeebeDb = zeebeDbFactory.createDb(snapshot.toFile());
} else {
zeebeDb = zeebeDbFactory.createDb(storage.toFile());
}
final String logName = stream.getLogName();
final StreamProcessor streamProcessor = StreamProcessor.builder().logStream(stream.getAsyncLogStream()).zeebeDb(zeebeDb).actorSchedulingService(actorScheduler).commandResponseWriter(mockCommandResponseWriter).listener(mockStreamProcessorListener).streamProcessorFactory(wrappedFactory).eventApplierFactory(eventApplierFactory).streamProcessorMode(streamProcessorMode).build();
final var openFuture = streamProcessor.openAsync(false);
if (awaitOpening) {
// and recovery
try {
recoveredLatch.await(15, TimeUnit.SECONDS);
} catch (final InterruptedException e) {
Thread.interrupted();
}
}
openFuture.join(15, TimeUnit.SECONDS);
final LogContext context = logContextMap.get(logName);
final ProcessorContext processorContext = ProcessorContext.createStreamContext(context, streamProcessor, zeebeDb, storage, snapshot);
streamContextMap.put(logName, processorContext);
closeables.manage(processorContext);
return streamProcessor;
}
use of io.camunda.zeebe.engine.processing.streamprocessor.StreamProcessorLifecycleAware in project zeebe by camunda-cloud.
the class TestStreams method buildStreamProcessor.
private StreamProcessor buildStreamProcessor(final SynchronousLogStream stream, final ZeebeDbFactory zeebeDbFactory, final TypedRecordProcessorFactory factory, final boolean awaitOpening) {
final var storage = createRuntimeFolder(stream);
final var snapshot = storage.getParent().resolve(SNAPSHOT_FOLDER);
final var recoveredLatch = new CountDownLatch(1);
final var recoveredAwaiter = new StreamProcessorLifecycleAware() {
@Override
public void onRecovered(final ReadonlyProcessingContext context) {
recoveredLatch.countDown();
}
};
final TypedRecordProcessorFactory wrappedFactory = (ctx) -> factory.createProcessors(ctx).withListener(recoveredAwaiter);
final ZeebeDb<?> zeebeDb;
if (snapshotWasTaken) {
zeebeDb = zeebeDbFactory.createDb(snapshot.toFile());
} else {
zeebeDb = zeebeDbFactory.createDb(storage.toFile());
}
final String logName = stream.getLogName();
final StreamProcessor streamProcessor = StreamProcessor.builder().logStream(stream.getAsyncLogStream()).zeebeDb(zeebeDb).actorSchedulingService(actorScheduler).commandResponseWriter(mockCommandResponseWriter).listener(mockStreamProcessorListener).streamProcessorFactory(wrappedFactory).eventApplierFactory(eventApplierFactory).streamProcessorMode(streamProcessorMode).build();
final var openFuture = streamProcessor.openAsync(false);
if (awaitOpening) {
// and recovery
try {
recoveredLatch.await(15, TimeUnit.SECONDS);
} catch (final InterruptedException e) {
Thread.interrupted();
}
}
openFuture.join(15, TimeUnit.SECONDS);
final LogContext context = logContextMap.get(logName);
final ProcessorContext processorContext = ProcessorContext.createStreamContext(context, streamProcessor, zeebeDb, storage, snapshot);
streamContextMap.put(logName, processorContext);
closeables.manage(processorContext);
return streamProcessor;
}
use of io.camunda.zeebe.engine.processing.streamprocessor.StreamProcessorLifecycleAware in project zeebe by zeebe-io.
the class TestStreams method buildStreamProcessor.
public StreamProcessor buildStreamProcessor(final SynchronousLogStream stream, final ZeebeDbFactory zeebeDbFactory, final TypedRecordProcessorFactory factory, final Function<LogStreamBatchWriter, TypedStreamWriter> streamWriterFactory, final boolean awaitOpening) {
final var storage = createRuntimeFolder(stream);
final var snapshot = storage.getParent().resolve(SNAPSHOT_FOLDER);
final var recoveredLatch = new CountDownLatch(1);
final var recoveredAwaiter = new StreamProcessorLifecycleAware() {
@Override
public void onRecovered(final ReadonlyProcessingContext context) {
recoveredLatch.countDown();
}
};
final TypedRecordProcessorFactory wrappedFactory = (ctx) -> factory.createProcessors(ctx).withListener(recoveredAwaiter);
final ZeebeDb<?> zeebeDb;
if (snapshotWasTaken) {
zeebeDb = zeebeDbFactory.createDb(snapshot.toFile());
} else {
zeebeDb = zeebeDbFactory.createDb(storage.toFile());
}
final String logName = stream.getLogName();
final var builder = StreamProcessor.builder().logStream(stream.getAsyncLogStream()).zeebeDb(zeebeDb).actorSchedulingService(actorScheduler).commandResponseWriter(mockCommandResponseWriter).listener(mockStreamProcessorListener).streamProcessorFactory(wrappedFactory).eventApplierFactory(eventApplierFactory).streamProcessorMode(streamProcessorMode);
if (streamWriterFactory != null) {
builder.typedStreamWriterFactory(streamWriterFactory);
}
final StreamProcessor streamProcessor = builder.build();
final var openFuture = streamProcessor.openAsync(false);
if (awaitOpening) {
// and recovery
try {
recoveredLatch.await(15, TimeUnit.SECONDS);
} catch (final InterruptedException e) {
Thread.interrupted();
}
}
openFuture.join(15, TimeUnit.SECONDS);
final LogContext context = logContextMap.get(logName);
final ProcessorContext processorContext = ProcessorContext.createStreamContext(context, streamProcessor, zeebeDb, storage, snapshot);
streamContextMap.put(logName, processorContext);
closeables.manage(processorContext);
return streamProcessor;
}
use of io.camunda.zeebe.engine.processing.streamprocessor.StreamProcessorLifecycleAware in project zeebe by camunda-cloud.
the class JobEventProcessors method addJobProcessors.
public static void addJobProcessors(final TypedRecordProcessors typedRecordProcessors, final MutableZeebeState zeebeState, final Consumer<String> onJobsAvailableCallback, final BpmnEventPublicationBehavior eventPublicationBehavior, final Writers writers, final JobMetrics jobMetrics, final EventTriggerBehavior eventTriggerBehavior) {
final var jobState = zeebeState.getJobState();
final var keyGenerator = zeebeState.getKeyGenerator();
final EventHandle eventHandle = new EventHandle(keyGenerator, zeebeState.getEventScopeInstanceState(), writers, zeebeState.getProcessState(), eventTriggerBehavior);
final var jobBackoffChecker = new JobBackoffChecker(jobState);
typedRecordProcessors.onCommand(ValueType.JOB, JobIntent.COMPLETE, new JobCompleteProcessor(zeebeState, jobMetrics, eventHandle)).onCommand(ValueType.JOB, JobIntent.FAIL, new JobFailProcessor(zeebeState, zeebeState.getKeyGenerator(), jobMetrics, jobBackoffChecker)).onCommand(ValueType.JOB, JobIntent.THROW_ERROR, new JobThrowErrorProcessor(zeebeState, eventPublicationBehavior, keyGenerator, jobMetrics)).onCommand(ValueType.JOB, JobIntent.TIME_OUT, new JobTimeOutProcessor(zeebeState, jobMetrics)).onCommand(ValueType.JOB, JobIntent.UPDATE_RETRIES, new JobUpdateRetriesProcessor(zeebeState)).onCommand(ValueType.JOB, JobIntent.CANCEL, new JobCancelProcessor(zeebeState, jobMetrics)).onCommand(ValueType.JOB, JobIntent.RECUR_AFTER_BACKOFF, new JobRecurProcessor(zeebeState)).onCommand(ValueType.JOB_BATCH, JobBatchIntent.ACTIVATE, new JobBatchActivateProcessor(writers, zeebeState, zeebeState.getKeyGenerator(), jobMetrics)).withListener(new JobTimeoutTrigger(jobState)).withListener(jobBackoffChecker).withListener(new StreamProcessorLifecycleAware() {
@Override
public void onRecovered(final ReadonlyProcessingContext context) {
jobState.setJobsAvailableCallback(onJobsAvailableCallback);
}
});
}
Aggregations