use of io.camunda.zeebe.logstreams.util.SynchronousLogStream in project zeebe by zeebe-io.
the class TestStreams method writeBatch.
public long writeBatch(final String logName, final RecordToWrite[] recordToWrites) {
final SynchronousLogStream logStream = getLogStream(logName);
final LogStreamBatchWriter logStreamBatchWriter = logStream.newLogStreamBatchWriter();
for (final RecordToWrite recordToWrite : recordToWrites) {
logStreamBatchWriter.event().key(recordToWrite.getKey()).sourceIndex(recordToWrite.getSourceIndex()).metadataWriter(recordToWrite.getRecordMetadata()).valueWriter(recordToWrite.getUnifiedRecordValue()).done();
}
return logStreamBatchWriter.tryWrite();
}
use of io.camunda.zeebe.logstreams.util.SynchronousLogStream in project zeebe by zeebe-io.
the class TestStreams method buildStreamProcessor.
private StreamProcessor buildStreamProcessor(final SynchronousLogStream stream, final ZeebeDbFactory zeebeDbFactory, final TypedRecordProcessorFactory factory, final boolean awaitOpening) {
final var storage = createRuntimeFolder(stream);
final var snapshot = storage.getParent().resolve(SNAPSHOT_FOLDER);
final var recoveredLatch = new CountDownLatch(1);
final var recoveredAwaiter = new StreamProcessorLifecycleAware() {
@Override
public void onRecovered(final ReadonlyProcessingContext context) {
recoveredLatch.countDown();
}
};
final TypedRecordProcessorFactory wrappedFactory = (ctx) -> factory.createProcessors(ctx).withListener(recoveredAwaiter);
final ZeebeDb<?> zeebeDb;
if (snapshotWasTaken) {
zeebeDb = zeebeDbFactory.createDb(snapshot.toFile());
} else {
zeebeDb = zeebeDbFactory.createDb(storage.toFile());
}
final String logName = stream.getLogName();
final StreamProcessor streamProcessor = StreamProcessor.builder().logStream(stream.getAsyncLogStream()).zeebeDb(zeebeDb).actorSchedulingService(actorScheduler).commandResponseWriter(mockCommandResponseWriter).listener(mockStreamProcessorListener).streamProcessorFactory(wrappedFactory).eventApplierFactory(eventApplierFactory).streamProcessorMode(streamProcessorMode).build();
final var openFuture = streamProcessor.openAsync(false);
if (awaitOpening) {
// and recovery
try {
recoveredLatch.await(15, TimeUnit.SECONDS);
} catch (final InterruptedException e) {
Thread.interrupted();
}
}
openFuture.join(15, TimeUnit.SECONDS);
final LogContext context = logContextMap.get(logName);
final ProcessorContext processorContext = ProcessorContext.createStreamContext(context, streamProcessor, zeebeDb, storage, snapshot);
streamContextMap.put(logName, processorContext);
closeables.manage(processorContext);
return streamProcessor;
}
use of io.camunda.zeebe.logstreams.util.SynchronousLogStream in project zeebe by camunda.
the class StreamProcessorRule method printAllRecords.
public void printAllRecords() {
int partitionId = startPartitionId;
for (int i = 0; i < partitionCount; i++) {
final SynchronousLogStream logStream = streams.getLogStream(getLogName(partitionId++));
LogStreamPrinter.printRecords(logStream);
}
}
use of io.camunda.zeebe.logstreams.util.SynchronousLogStream in project zeebe by camunda.
the class TestStreams method buildStreamProcessor.
private StreamProcessor buildStreamProcessor(final SynchronousLogStream stream, final ZeebeDbFactory zeebeDbFactory, final TypedRecordProcessorFactory factory, final boolean awaitOpening) {
final var storage = createRuntimeFolder(stream);
final var snapshot = storage.getParent().resolve(SNAPSHOT_FOLDER);
final var recoveredLatch = new CountDownLatch(1);
final var recoveredAwaiter = new StreamProcessorLifecycleAware() {
@Override
public void onRecovered(final ReadonlyProcessingContext context) {
recoveredLatch.countDown();
}
};
final TypedRecordProcessorFactory wrappedFactory = (ctx) -> factory.createProcessors(ctx).withListener(recoveredAwaiter);
final ZeebeDb<?> zeebeDb;
if (snapshotWasTaken) {
zeebeDb = zeebeDbFactory.createDb(snapshot.toFile());
} else {
zeebeDb = zeebeDbFactory.createDb(storage.toFile());
}
final String logName = stream.getLogName();
final StreamProcessor streamProcessor = StreamProcessor.builder().logStream(stream.getAsyncLogStream()).zeebeDb(zeebeDb).actorSchedulingService(actorScheduler).commandResponseWriter(mockCommandResponseWriter).listener(mockStreamProcessorListener).streamProcessorFactory(wrappedFactory).eventApplierFactory(eventApplierFactory).streamProcessorMode(streamProcessorMode).build();
final var openFuture = streamProcessor.openAsync(false);
if (awaitOpening) {
// and recovery
try {
recoveredLatch.await(15, TimeUnit.SECONDS);
} catch (final InterruptedException e) {
Thread.interrupted();
}
}
openFuture.join(15, TimeUnit.SECONDS);
final LogContext context = logContextMap.get(logName);
final ProcessorContext processorContext = ProcessorContext.createStreamContext(context, streamProcessor, zeebeDb, storage, snapshot);
streamContextMap.put(logName, processorContext);
closeables.manage(processorContext);
return streamProcessor;
}
use of io.camunda.zeebe.logstreams.util.SynchronousLogStream in project zeebe by camunda.
the class TestStreams method events.
public Stream<LoggedEvent> events(final String logName) {
final SynchronousLogStream logStream = getLogStream(logName);
final LogStreamReader reader = logStream.newLogStreamReader();
closeables.manage(reader);
reader.seekToFirstEvent();
final Iterable<LoggedEvent> iterable = () -> reader;
return StreamSupport.stream(iterable.spliterator(), false);
}
Aggregations