use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class StreamSegmentMapperTests method testCreateAlreadyExists.
/**
* General test for verifying behavior when a Segment/Transaction is attempted to be created but it already exists.
*
* @param segmentName The name of the segment/transaction to create.
* @param createSegment A BiFunction that is given an instance of a StreamSegmentMapper and a Collection of AttributeUpdates
* that, when invoked, will create the given segment.
*/
private void testCreateAlreadyExists(String segmentName, BiFunction<StreamSegmentMapper, Collection<AttributeUpdate>, CompletableFuture<?>> createSegment) {
final String stateSegmentName = StreamSegmentNameUtils.getStateSegmentName(segmentName);
final Map<UUID, Long> correctAttributes = Collections.singletonMap(UUID.randomUUID(), 123L);
final Collection<AttributeUpdate> correctAttributeUpdates = correctAttributes.entrySet().stream().map(e -> new AttributeUpdate(e.getKey(), AttributeUpdateType.Replace, e.getValue())).collect(Collectors.toList());
final Map<UUID, Long> badAttributes = Collections.singletonMap(UUID.randomUUID(), 456L);
final Collection<AttributeUpdate> badAttributeUpdates = badAttributes.entrySet().stream().map(e -> new AttributeUpdate(e.getKey(), AttributeUpdateType.Replace, e.getValue())).collect(Collectors.toList());
@Cleanup TestContext context = new TestContext();
@Cleanup val storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
val store = new SegmentStateStore(storage, executorService());
val mapper = new StreamSegmentMapper(context.metadata, context.operationLog, store, context.noOpMetadataCleanup, storage, executorService());
// 1. Segment Exists, and so does State File (and it's not corrupted) -> Exception must be bubbled up.
createSegment.apply(mapper, correctAttributeUpdates).join();
AssertExtensions.assertThrows("createNewStreamSegment did not fail when Segment already exists.", () -> createSegment.apply(mapper, badAttributeUpdates), ex -> ex instanceof StreamSegmentExistsException);
val state1 = store.get(segmentName, TIMEOUT).join();
AssertExtensions.assertMapEquals("Unexpected attributes after failed attempt to recreate correctly created segment", correctAttributes, state1.getAttributes());
// 2. Segment Exists, but with empty State File: State file re-created & no exception bubbled up.
storage.openWrite(stateSegmentName).thenCompose(handle -> storage.delete(handle, TIMEOUT)).thenCompose(v -> storage.create(stateSegmentName, TIMEOUT)).join();
Assert.assertNull("Expected a null SegmentState.", store.get(segmentName, TIMEOUT).join());
createSegment.apply(mapper, correctAttributeUpdates).join();
val state2 = store.get(segmentName, TIMEOUT).join();
AssertExtensions.assertMapEquals("Unexpected attributes after successful attempt to complete segment creation (missing state file)", correctAttributes, state2.getAttributes());
// 3. Segment Exists, but with corrupted State File: State file re-created & no exception bubbled up.
storage.openWrite(stateSegmentName).thenCompose(handle -> storage.delete(handle, TIMEOUT)).thenCompose(v -> storage.create(stateSegmentName, TIMEOUT)).thenCompose(v -> storage.openWrite(stateSegmentName)).thenCompose(handle -> storage.write(handle, 0, new ByteArrayInputStream(new byte[1]), 1, TIMEOUT)).join();
AssertExtensions.assertThrows("Expected a DataCorruptionException when reading a corrupted State File.", () -> store.get(segmentName, TIMEOUT), ex -> ex instanceof DataCorruptionException);
createSegment.apply(mapper, correctAttributeUpdates).join();
val state3 = store.get(segmentName, TIMEOUT).join();
AssertExtensions.assertMapEquals("Unexpected attributes after successful attempt to complete segment creation (corrupted state file)", correctAttributes, state3.getAttributes());
// 4. Segment Exists with non-zero length, but with empty/corrupted State File: State File re-created and exception thrown.
storage.openWrite(stateSegmentName).thenCompose(handle -> storage.delete(handle, TIMEOUT)).thenCompose(v -> storage.create(stateSegmentName, TIMEOUT)).thenCompose(v -> storage.openWrite(segmentName)).thenCompose(handle -> storage.write(handle, 0, new ByteArrayInputStream(new byte[1]), 1, TIMEOUT)).join();
AssertExtensions.assertThrows("createNewStreamSegment did not fail when Segment already exists (non-zero length, missing state file).", () -> createSegment.apply(mapper, correctAttributeUpdates), ex -> ex instanceof StreamSegmentExistsException);
val state4 = store.get(segmentName, TIMEOUT).join();
AssertExtensions.assertMapEquals("Unexpected attributes after failed attempt to recreate segment with non-zero length", correctAttributes, state4.getAttributes());
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class DurableLogTests method testRecoveryFailures.
/**
* Tests the DurableLog recovery process in a scenario when there are failures during the process
* (these may or may not be DataCorruptionExceptions).
*/
@Test
public void testRecoveryFailures() throws Exception {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Fail DataLog reads after X reads.
int failReadAfter = 2;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
HashSet<Long> streamSegmentIds;
List<OperationWithCompletion> completionFutures;
// First DurableLog. We use this for generating data.
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Process all generated operations and wait for them to complete
completionFutures = processOperations(operations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Recovery failure due to DataLog Failures.
metadata = new MetadataBuilder(CONTAINER_ID).build();
dataLog.set(null);
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// Inject some artificial error into the DataLogRead after a few reads.
ErrorInjector<Exception> readNextInjector = new ErrorInjector<>(count -> count > failReadAfter, () -> new DataLogNotAvailableException("intentional"));
dataLog.get().setReadErrorInjectors(null, readNextInjector);
// Verify the exception thrown from startAsync() is of the right kind. This exception will be wrapped in
// multiple layers, so we need to dig deep into it.
AssertExtensions.assertThrows("Recovery did not fail properly when expecting DurableDataLogException.", () -> durableLog.startAsync().awaitRunning(), ex -> {
if (ex instanceof IllegalStateException) {
ex = ex.getCause();
}
if (ex == null) {
try {
// We need this to enter a FAILED state to get its failure cause.
durableLog.awaitTerminated();
} catch (Exception ex2) {
ex = durableLog.failureCause();
}
}
ex = Exceptions.unwrap(ex);
return ex instanceof DataLogNotAvailableException && ex.getMessage().equals("intentional");
});
}
// Recovery failure due to DataCorruptionException.
metadata = new MetadataBuilder(CONTAINER_ID).build();
dataLog.set(null);
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
// Reset error injectors to nothing.
dataLog.get().setReadErrorInjectors(null, null);
AtomicInteger readCounter = new AtomicInteger();
dataLog.get().setReadInterceptor(readItem -> {
if (readCounter.incrementAndGet() > failReadAfter && readItem.getLength() > DataFrame.MIN_ENTRY_LENGTH_NEEDED) {
// Mangle with the payload and overwrite its contents with a DataFrame having a bogus
// previous sequence number.
DataFrame df = DataFrame.ofSize(readItem.getLength());
df.seal();
ArrayView serialization = df.getData();
return new InjectedReadItem(serialization.getReader(), serialization.getLength(), readItem.getAddress());
}
return readItem;
});
// Verify the exception thrown from startAsync() is of the right kind. This exception will be wrapped in
// multiple layers, so we need to dig deep into it.
AssertExtensions.assertThrows("Recovery did not fail properly when expecting DataCorruptionException.", () -> durableLog.startAsync().awaitRunning(), ex -> {
if (ex instanceof IllegalStateException) {
ex = ex.getCause();
}
return Exceptions.unwrap(ex) instanceof DataCorruptionException;
});
// Verify that the underlying DurableDataLog has been disabled.
val disabledDataLog = dataLogFactory.createDurableDataLog(CONTAINER_ID);
AssertExtensions.assertThrows("DurableDataLog has not been disabled following a recovery failure with DataCorruptionException.", () -> disabledDataLog.initialize(TIMEOUT), ex -> ex instanceof DataLogDisabledException);
}
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class DurableLogTests method testTruncateWithStorageMetadataCheckpoints.
/**
* Tests the ability of the truncate() method to auto-queue (and wait for) mini-metadata checkpoints containing items
* that are not updated via normal operations. Such items include StorageLength and IsSealedInStorage.
*/
@Test
public void testTruncateWithStorageMetadataCheckpoints() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
@Cleanup val readIndex1 = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, cacheFactory, storage, cacheManager, executorService());
HashSet<Long> streamSegmentIds;
List<OperationWithCompletion> completionFutures;
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex1, executorService())) {
durableLog.startAsync().awaitRunning();
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata1, durableLog, storage);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Update the metadata with Storage-related data. Set some arbitrary StorageOffsets and seal 50% of the segments in storage.
long storageOffset = 0;
for (long segmentId : streamSegmentIds) {
val sm = metadata1.getStreamSegmentMetadata(segmentId);
sm.setStorageLength(Math.min(storageOffset, sm.getLength()));
storageOffset++;
if (sm.isSealed() && storageOffset % 2 == 0) {
sm.markSealedInStorage();
}
}
// Truncate at the last possible truncation point.
val originalOperations = readUpToSequenceNumber(durableLog, metadata1.getOperationSequenceNumber());
long lastCheckpointSeqNo = -1;
for (Operation o : originalOperations) {
if (o instanceof MetadataCheckpointOperation) {
lastCheckpointSeqNo = o.getSequenceNumber();
}
}
AssertExtensions.assertGreaterThan("Could not find any truncation points.", 0, lastCheckpointSeqNo);
durableLog.truncate(lastCheckpointSeqNo, TIMEOUT).join();
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Start a second DurableLog and then verify the metadata.
val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup val readIndex2 = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, cacheFactory, storage, cacheManager, executorService());
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex2, executorService())) {
durableLog.startAsync().awaitRunning();
// Check Metadata1 vs Metadata2
for (long segmentId : streamSegmentIds) {
val sm1 = metadata1.getStreamSegmentMetadata(segmentId);
val sm2 = metadata2.getStreamSegmentMetadata(segmentId);
Assert.assertEquals("StorageLength differs for recovered segment " + segmentId, sm1.getStorageLength(), sm2.getStorageLength());
Assert.assertEquals("IsSealedInStorage differs for recovered segment " + segmentId, sm1.isSealedInStorage(), sm2.isSealedInStorage());
}
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class DurableLogTests method testRecoveryWithNoFailures.
// endregion
// region Recovery
/**
* Tests the DurableLog recovery process in a scenario when there are no failures during the process.
*/
@Test
public void testRecoveryWithNoFailures() throws Exception {
int streamSegmentCount = 50;
int transactionsPerStreamSegment = 2;
int appendsPerStreamSegment = 20;
boolean mergeTransactions = true;
boolean sealStreamSegments = true;
// Setup a DurableLog and start it.
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
HashSet<Long> streamSegmentIds;
AbstractMap<Long, Long> transactions;
List<OperationWithCompletion> completionFutures;
List<Operation> originalOperations;
// First DurableLog. We use this for generating data.
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
transactions = createTransactionsWithOperations(streamSegmentIds, transactionsPerStreamSegment, metadata, durableLog, storage);
List<Operation> operations = generateOperations(streamSegmentIds, transactions, appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, mergeTransactions, sealStreamSegments);
// Process all generated operations and wait for them to complete
completionFutures = processOperations(operations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Get a list of all the operations, before recovery.
originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Second DurableLog. We use this for recovery.
metadata = new MetadataBuilder(CONTAINER_ID).build();
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
List<Operation> recoveredOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
assertRecoveredOperationsMatch(originalOperations, recoveredOperations);
performMetadataChecks(streamSegmentIds, new HashSet<>(), transactions, completionFutures, metadata, mergeTransactions, sealStreamSegments);
performReadIndexChecks(completionFutures, readIndex);
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.
the class DurableLogTests method testTruncateWithoutRecovery.
// endregion
// region Truncation
/**
* Tests the truncate() method without doing any recovery.
*/
@Test
public void testTruncateWithoutRecovery() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
@Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Hook up a listener to figure out when truncation actually happens.
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
HashSet<Long> streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Add one of these at the end to ensure we can truncate everything.
queuedOperations.add(new MetadataCheckpointOperation());
List<OperationWithCompletion> completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Get a list of all the operations, before truncation.
List<Operation> originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
boolean fullTruncationPossible = false;
// At the end, verify all operations and all entries in the DataLog were truncated.
for (int i = 0; i < originalOperations.size(); i++) {
Operation currentOperation = originalOperations.get(i);
truncationOccurred.set(false);
if (currentOperation instanceof MetadataCheckpointOperation) {
// Need to figure out if the operation we're about to truncate to is actually the first in the log;
// in that case, we should not be expecting any truncation.
boolean isTruncationPointFirstOperation = durableLog.read(-1, 1, TIMEOUT).join().next() instanceof MetadataCheckpointOperation;
// Perform the truncation.
durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
if (!isTruncationPointFirstOperation) {
Assert.assertTrue("No truncation occurred even though a valid Truncation Point was passed: " + currentOperation.getSequenceNumber(), truncationOccurred.get());
}
// Verify all operations up to, and including this one have been removed.
Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
verifyFirstItemIsMetadataCheckpoint(reader);
if (i < originalOperations.size() - 1) {
Operation firstOp = reader.next();
OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
} else {
// Sometimes the Truncation Point is on the same DataFrame as other data, and it's the last DataFrame;
// In that case, it cannot be truncated, since truncating the frame would mean losing the Checkpoint as well.
fullTruncationPossible = !reader.hasNext();
}
} else {
// Verify we are not allowed to truncate on non-valid Truncation Points.
AssertExtensions.assertThrows("DurableLog allowed truncation on a non-MetadataCheckpointOperation.", () -> durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT), ex -> ex instanceof IllegalArgumentException);
// Verify the Operation Log is still intact.
Iterator<Operation> reader = durableLog.read(-1, 1, TIMEOUT).join();
Assert.assertTrue("No elements left in the log even though no truncation occurred.", reader.hasNext());
Operation firstOp = reader.next();
AssertExtensions.assertLessThanOrEqual("It appears that Operations were removed from the Log even though no truncation happened.", currentOperation.getSequenceNumber(), firstOp.getSequenceNumber());
}
}
// Verify that we can still queue operations to the DurableLog and they can be read.
// In this case we'll just queue some StreamSegmentMapOperations.
StreamSegmentMapOperation newOp = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name("foo").build());
if (!fullTruncationPossible) {
// We were not able to do a full truncation before. Do one now, since we are guaranteed to have a new DataFrame available.
MetadataCheckpointOperation lastCheckpoint = new MetadataCheckpointOperation();
durableLog.add(lastCheckpoint, TIMEOUT).join();
durableLog.truncate(lastCheckpoint.getSequenceNumber(), TIMEOUT).join();
}
durableLog.add(newOp, TIMEOUT).join();
// Full Checkpoint + Storage Checkpoint (auto-added)+ new op
final int expectedOperationCount = 3;
List<Operation> newOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
Assert.assertEquals("Unexpected number of operations added after full truncation.", expectedOperationCount, newOperations.size());
Assert.assertTrue("Expecting the first operation after full truncation to be a MetadataCheckpointOperation.", newOperations.get(0) instanceof MetadataCheckpointOperation);
Assert.assertTrue("Expecting a StorageMetadataCheckpointOperation to be auto-added after full truncation.", newOperations.get(1) instanceof StorageMetadataCheckpointOperation);
Assert.assertEquals("Unexpected Operation encountered after full truncation.", newOp, newOperations.get(2));
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
Aggregations