use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class DurableLogTests method performLogOperationChecks.
// endregion
// region Helpers
private void performLogOperationChecks(List<OperationWithCompletion> operations, DurableLog durableLog) {
// Log Operation based checks
long lastSeqNo = -1;
val successfulOperations = operations.stream().filter(oc -> !oc.completion.isCompletedExceptionally()).map(oc -> oc.operation).filter(Operation::canSerialize).collect(Collectors.toList());
// Writing to the DurableLog is done asynchronously, so wait for the last operation to arrive there before reading.
durableLog.read(successfulOperations.get(successfulOperations.size() - 1).getSequenceNumber() - 1, 1, TIMEOUT).join();
// Issue the read for the entire log now.
Iterator<Operation> logIterator = durableLog.read(-1L, operations.size() + 1, TIMEOUT).join();
verifyFirstItemIsMetadataCheckpoint(logIterator);
OperationComparer comparer = new OperationComparer(true);
for (Operation expectedOp : successfulOperations) {
// Verify that the operations have been completed and assigned sequential Sequence Numbers.
AssertExtensions.assertGreaterThan("Operations were not assigned sequential Sequence Numbers.", lastSeqNo, expectedOp.getSequenceNumber());
lastSeqNo = expectedOp.getSequenceNumber();
// MemoryLog: verify that the operations match that of the expected list.
Assert.assertTrue("No more items left to read from DurableLog. Expected: " + expectedOp, logIterator.hasNext());
// Ok to use assertEquals because we are actually expecting the same object here.
comparer.assertEquals("Unexpected Operation in MemoryLog.", expectedOp, logIterator.next());
}
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class DurableLogTests method processOperations.
@SneakyThrows
private List<OperationWithCompletion> processOperations(Collection<Operation> operations, DurableLog durableLog, int waitEvery) {
List<OperationWithCompletion> completionFutures = new ArrayList<>();
int index = 0;
for (Operation o : operations) {
index++;
CompletableFuture<Void> completionFuture;
try {
completionFuture = durableLog.add(o, TIMEOUT);
} catch (Exception ex) {
completionFuture = Futures.failedFuture(ex);
}
completionFutures.add(new OperationWithCompletion(o, completionFuture));
if (index % waitEvery == 0) {
completionFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
}
return completionFutures;
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class DurableLogTests method testAddWithDataLogFailures.
/**
* Tests the ability of the DurableLog to process Operations when there are DataLog write failures.
*/
@Test
public void testAddWithDataLogFailures() throws Exception {
int streamSegmentCount = 10;
int appendsPerStreamSegment = 80;
// Fail (asynchronously) after X DataFrame commits (to DataLog).
int failAsyncAfter = 5;
// Setup a DurableLog and start it.
@Cleanup ContainerSetup setup = new ContainerSetup(executorService());
@Cleanup DurableLog durableLog = setup.createDurableLog();
durableLog.startAsync().awaitRunning();
Assert.assertNotNull("Internal error: could not grab a pointer to the created TestDurableDataLog.", setup.dataLog.get());
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, setup.metadata);
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
ErrorInjector<Exception> aSyncErrorInjector = new ErrorInjector<>(count -> count >= failAsyncAfter, () -> new DurableDataLogException("intentional"));
setup.dataLog.get().setAppendErrorInjectors(null, aSyncErrorInjector);
// Process all generated operations.
List<OperationWithCompletion> completionFutures = processOperations(operations, durableLog);
// Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, super::isExpectedExceptionForNonDataCorruption);
// Wait for the DurableLog to shutdown with failure.
ServiceListeners.awaitShutdown(durableLog, TIMEOUT, false);
Assert.assertEquals("Expected the DurableLog to fail after DurableDataLogException encountered.", Service.State.FAILED, durableLog.state());
// We can't really check the DurableLog or the DurableDataLog contents since they are both closed.
performMetadataChecks(streamSegmentIds, new HashSet<>(), new HashMap<>(), completionFutures, setup.metadata, false, false);
performReadIndexChecks(completionFutures, setup.readIndex);
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class DurableLogTests method testTruncateWithoutRecovery.
// endregion
// region Truncation
/**
* Tests the truncate() method without doing any recovery.
*/
@Test
public void testTruncateWithoutRecovery() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
@Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Hook up a listener to figure out when truncation actually happens.
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
HashSet<Long> streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Add one of these at the end to ensure we can truncate everything.
queuedOperations.add(new MetadataCheckpointOperation());
List<OperationWithCompletion> completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Get a list of all the operations, before truncation.
List<Operation> originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
boolean fullTruncationPossible = false;
// At the end, verify all operations and all entries in the DataLog were truncated.
for (int i = 0; i < originalOperations.size(); i++) {
Operation currentOperation = originalOperations.get(i);
truncationOccurred.set(false);
if (currentOperation instanceof MetadataCheckpointOperation) {
// Need to figure out if the operation we're about to truncate to is actually the first in the log;
// in that case, we should not be expecting any truncation.
boolean isTruncationPointFirstOperation = durableLog.read(-1, 1, TIMEOUT).join().next() instanceof MetadataCheckpointOperation;
// Perform the truncation.
durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
if (!isTruncationPointFirstOperation) {
Assert.assertTrue("No truncation occurred even though a valid Truncation Point was passed: " + currentOperation.getSequenceNumber(), truncationOccurred.get());
}
// Verify all operations up to, and including this one have been removed.
Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
verifyFirstItemIsMetadataCheckpoint(reader);
if (i < originalOperations.size() - 1) {
Operation firstOp = reader.next();
OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
} else {
// Sometimes the Truncation Point is on the same DataFrame as other data, and it's the last DataFrame;
// In that case, it cannot be truncated, since truncating the frame would mean losing the Checkpoint as well.
fullTruncationPossible = !reader.hasNext();
}
} else {
// Verify we are not allowed to truncate on non-valid Truncation Points.
AssertExtensions.assertThrows("DurableLog allowed truncation on a non-MetadataCheckpointOperation.", () -> durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT), ex -> ex instanceof IllegalArgumentException);
// Verify the Operation Log is still intact.
Iterator<Operation> reader = durableLog.read(-1, 1, TIMEOUT).join();
Assert.assertTrue("No elements left in the log even though no truncation occurred.", reader.hasNext());
Operation firstOp = reader.next();
AssertExtensions.assertLessThanOrEqual("It appears that Operations were removed from the Log even though no truncation happened.", currentOperation.getSequenceNumber(), firstOp.getSequenceNumber());
}
}
// Verify that we can still queue operations to the DurableLog and they can be read.
// In this case we'll just queue some StreamSegmentMapOperations.
StreamSegmentMapOperation newOp = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name("foo").build());
if (!fullTruncationPossible) {
// We were not able to do a full truncation before. Do one now, since we are guaranteed to have a new DataFrame available.
MetadataCheckpointOperation lastCheckpoint = new MetadataCheckpointOperation();
durableLog.add(lastCheckpoint, TIMEOUT).join();
durableLog.truncate(lastCheckpoint.getSequenceNumber(), TIMEOUT).join();
}
durableLog.add(newOp, TIMEOUT).join();
// Full Checkpoint + Storage Checkpoint (auto-added)+ new op
final int expectedOperationCount = 3;
List<Operation> newOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
Assert.assertEquals("Unexpected number of operations added after full truncation.", expectedOperationCount, newOperations.size());
Assert.assertTrue("Expecting the first operation after full truncation to be a MetadataCheckpointOperation.", newOperations.get(0) instanceof MetadataCheckpointOperation);
Assert.assertTrue("Expecting a StorageMetadataCheckpointOperation to be auto-added after full truncation.", newOperations.get(1) instanceof StorageMetadataCheckpointOperation);
Assert.assertEquals("Unexpected Operation encountered after full truncation.", newOp, newOperations.get(2));
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class DurableLog method read.
@Override
public CompletableFuture<Iterator<Operation>> read(long afterSequenceNumber, int maxCount, Duration timeout) {
ensureRunning();
log.debug("{}: Read (AfterSequenceNumber = {}, MaxCount = {}).", this.traceObjectId, afterSequenceNumber, maxCount);
Iterator<Operation> logReadResult = this.inMemoryOperationLog.read(afterSequenceNumber, maxCount);
if (logReadResult.hasNext()) {
// Data is readily available.
return CompletableFuture.completedFuture(logReadResult);
} else {
// Register a tail read and return the future for it.
CompletableFuture<Iterator<Operation>> result = null;
Operation lastOp;
synchronized (this.tailReads) {
lastOp = this.inMemoryOperationLog.getLast();
if (lastOp == null || lastOp.getSequenceNumber() <= afterSequenceNumber) {
// We cannot fulfill this at this moment; let it be triggered when we do get a new operation.
TailRead tailRead = new TailRead(afterSequenceNumber, maxCount, timeout, this.executor);
result = tailRead.future;
this.tailReads.add(tailRead);
result.whenComplete((r, ex) -> unregisterTailRead(tailRead));
}
}
if (result == null) {
// If we get here, it means that we have since received an operation (after the original call, but before
// entering the synchronized block above); re-issue the read and return the result.
logReadResult = this.inMemoryOperationLog.read(afterSequenceNumber, maxCount);
assert logReadResult.hasNext() : String.format("Unable to read anything after SeqNo %d, even though last operation SeqNo == %d", afterSequenceNumber, lastOp == null ? -1 : lastOp.getSequenceNumber());
result = CompletableFuture.completedFuture(logReadResult);
}
return result;
}
}
Aggregations