use of io.pravega.segmentstore.storage.DurableDataLog in project pravega by pravega.
the class OperationProcessorTests method testConcurrentStopAndCommit.
/**
* Tests a scenario where the OperationProcessor is shut down while a DataFrame is being processed and will eventually
* complete successfully - however its operation should be cancelled.
*/
@Test
public void testConcurrentStopAndCommit() throws Exception {
@Cleanup TestContext context = new TestContext();
// Generate some test data.
val segmentId = createStreamSegmentsInMetadata(1, context.metadata).stream().findFirst().orElse(-1L);
List<Operation> operations = Collections.singletonList(new StreamSegmentAppendOperation(segmentId, new byte[1], null));
CompletableFuture<LogAddress> appendCallback = new CompletableFuture<>();
// Setup an OperationProcessor with a custom DurableDataLog and start it.
@Cleanup DurableDataLog dataLog = new ManualAppendOnlyDurableDataLog(() -> appendCallback);
dataLog.initialize(TIMEOUT);
@Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
operationProcessor.startAsync().awaitRunning();
// Process all generated operations.
OperationWithCompletion completionFuture = processOperations(operations, operationProcessor).stream().findFirst().orElse(null);
operationProcessor.stopAsync();
appendCallback.complete(new TestLogAddress(1));
// Stop the processor.
operationProcessor.awaitTerminated();
// Wait for the operation to complete. The operation should have been cancelled (due to the OperationProcessor
// shutting down) - no other exception (or successful completion is accepted).
AssertExtensions.assertThrows("Operation did not fail with the right exception.", () -> completionFuture.completion, ex -> ex instanceof CancellationException || ex instanceof ObjectClosedException);
}
use of io.pravega.segmentstore.storage.DurableDataLog in project pravega by pravega.
the class BookKeeperLogTests method testAppendPermanentFailures.
/**
* Tests the ability to retry writes when Bookies fail.
*/
@Test
public void testAppendPermanentFailures() throws Exception {
try (DurableDataLog log = createDurableDataLog()) {
log.initialize(TIMEOUT);
List<CompletableFuture<LogAddress>> appendFutures = new ArrayList<>();
try {
// Suspend a bookie (this will trigger write errors).
stopFirstBookie();
// Issue appends in parallel.
int writeCount = getWriteCount();
for (int i = 0; i < writeCount; i++) {
appendFutures.add(log.append(new ByteArraySegment(getWriteData()), TIMEOUT));
}
// Verify that all writes failed or got cancelled.
AtomicBoolean cancellationEncountered = new AtomicBoolean(false);
for (val f : appendFutures) {
AssertExtensions.assertThrows("Write did not fail correctly.", () -> f.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> {
cancellationEncountered.set(cancellationEncountered.get() || ex instanceof CancellationException);
if (cancellationEncountered.get()) {
return ex instanceof CancellationException;
} else {
return ex instanceof RetriesExhaustedException || ex instanceof DurableDataLogException;
}
});
}
} finally {
// Don't forget to resume the bookie, but only AFTER we are done testing.
restartFirstBookie();
}
}
}
use of io.pravega.segmentstore.storage.DurableDataLog in project pravega by pravega.
the class BookKeeperLogTests method testAppendTransientBookieFailure.
/**
* Tests the ability to retry writes when Bookies fail.
*/
@Test
public void testAppendTransientBookieFailure() throws Exception {
TreeMap<LogAddress, byte[]> writeData = new TreeMap<>(Comparator.comparingLong(LogAddress::getSequence));
try (DurableDataLog log = createDurableDataLog()) {
log.initialize(TIMEOUT);
val dataList = new ArrayList<byte[]>();
val futures = new ArrayList<CompletableFuture<LogAddress>>();
try {
// Suspend a bookie (this will trigger write errors).
stopFirstBookie();
// Issue appends in parallel, without waiting for them.
int writeCount = getWriteCount();
for (int i = 0; i < writeCount; i++) {
byte[] data = getWriteData();
futures.add(log.append(new ByteArraySegment(data), TIMEOUT));
dataList.add(data);
}
} finally {
// Resume the bookie with the appends still in flight.
restartFirstBookie();
}
// Wait for all writes to complete, then reassemble the data in the order set by LogAddress.
val addresses = Futures.allOfWithResults(futures).join();
for (int i = 0; i < dataList.size(); i++) {
writeData.put(addresses.get(i), dataList.get(i));
}
}
// Verify data.
try (DurableDataLog log = createDurableDataLog()) {
log.initialize(TIMEOUT);
verifyReads(log, writeData);
}
}
Aggregations