use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class DurableLogTests method testTailReads.
/**
* Tests the ability to block reads if the read is at the tail and no more data is available (for now).
*/
@Test
public void testTailReads() throws Exception {
final int operationCount = 10;
final long segmentId = 1;
final String segmentName = Long.toString(segmentId);
// Setup a DurableLog and start it.
@Cleanup ContainerSetup setup = new ContainerSetup(executorService());
@Cleanup DurableLog durableLog = setup.createDurableLog();
durableLog.startAsync().awaitRunning();
// Create a segment, which will be used for testing later.
UpdateableSegmentMetadata segmentMetadata = setup.metadata.mapStreamSegmentId(segmentName, segmentId);
segmentMetadata.setLength(0);
segmentMetadata.setStorageLength(0);
// Setup a bunch of read operations, and make sure they are blocked (since there is no data).
ArrayList<CompletableFuture<Iterator<Operation>>> readFutures = new ArrayList<>();
for (int i = 0; i < operationCount; i++) {
long afterSeqNo = i + 1;
CompletableFuture<Iterator<Operation>> readFuture = durableLog.read(afterSeqNo, operationCount, TIMEOUT);
Assert.assertFalse("read() returned a completed future when there is no data available (afterSeqNo = " + afterSeqNo + ").", readFuture.isDone());
readFutures.add(readFuture);
}
// Add one operation at at time, and each time, verify that the correct Read got activated.
OperationComparer operationComparer = new OperationComparer(true);
for (int appendId = 0; appendId < operationCount; appendId++) {
Operation operation = new StreamSegmentAppendOperation(segmentId, ("foo" + Integer.toString(appendId)).getBytes(), null);
durableLog.add(operation, TIMEOUT).join();
for (int readId = 0; readId < readFutures.size(); readId++) {
val readFuture = readFutures.get(readId);
boolean expectedComplete = readId <= appendId;
if (expectedComplete) {
// The internal callback happens asynchronously, so wait for this future to complete in a bit.
readFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
Assert.assertEquals(String.format("Unexpected read completion status for read after seqNo %d after adding op with seqNo %d", readId + 1, operation.getSequenceNumber()), expectedComplete, readFutures.get(readId).isDone());
if (appendId == readId) {
// Verify that the read result matches the operation.
Iterator<Operation> readResult = readFuture.join();
// Verify that we actually have a non-empty read result.
Assert.assertTrue(String.format("Empty read result read after seqNo %d after adding op with seqNo %d", readId + 1, operation.getSequenceNumber()), readResult.hasNext());
// Verify the read result.
Operation readOp = readResult.next();
operationComparer.assertEquals(String.format("Unexpected result operation for read after seqNo %d after adding op with seqNo %d", readId + 1, operation.getSequenceNumber()), operation, readOp);
// Verify that we don't have more than one read result.
Assert.assertFalse(String.format("Not expecting more than one result for read after seqNo %d after adding op with seqNo %d", readId + 1, operation.getSequenceNumber()), readResult.hasNext());
}
}
}
// Verify that such reads are cancelled when the DurableLog is closed.
CompletableFuture<Iterator<Operation>> readFuture = durableLog.read(operationCount + 2, operationCount, TIMEOUT);
Assert.assertFalse("read() returned a completed future when there is no data available (afterSeqNo = MAX).", readFuture.isDone());
durableLog.stopAsync().awaitTerminated();
Assert.assertTrue("A tail read was not cancelled when the DurableLog was stopped.", readFuture.isCompletedExceptionally());
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class DurableLogTests method testAddWithDataCorruptionFailures.
/**
* Tests the ability of the DurableLog to process Operations when a simulated DataCorruptionException
* is generated.
*/
@Test
public void testAddWithDataCorruptionFailures() throws Exception {
int streamSegmentCount = 10;
int appendsPerStreamSegment = 80;
int failAtOperationIndex = 123;
// Setup a DurableLog and start it.
@Cleanup ContainerSetup setup = new ContainerSetup(executorService());
DurableLogConfig config = setup.durableLogConfig == null ? ContainerSetup.defaultDurableLogConfig() : setup.durableLogConfig;
CorruptedDurableLog.FAIL_AT_INDEX.set(failAtOperationIndex);
val durableLog = new CorruptedDurableLog(config, setup);
durableLog.startAsync().awaitRunning();
Assert.assertNotNull("Internal error: could not grab a pointer to the created TestDurableDataLog.", setup.dataLog.get());
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, setup.metadata);
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Process all generated operations.
List<OperationWithCompletion> completionFutures = processOperations(operations, durableLog);
// Wait for the service to fail (and make sure it failed).
AssertExtensions.assertThrows("DurableLog did not shut down with failure.", () -> ServiceListeners.awaitShutdown(durableLog, true), ex -> ex instanceof IllegalStateException);
Assert.assertEquals("Unexpected service state after encountering DataCorruptionException.", Service.State.FAILED, durableLog.state());
// Verify that the "right" operations failed, while the others succeeded.
int successCount = 0;
boolean encounteredFirstFailure = false;
for (int i = 0; i < completionFutures.size(); i++) {
OperationWithCompletion oc = completionFutures.get(i);
if (!oc.operation.canSerialize()) {
// Non-serializable operations (i.e., ProbeOperations always complete normally).
continue;
}
// Once an operation failed (in our scenario), no other operation can succeed.
if (encounteredFirstFailure) {
Assert.assertTrue("Encountered successful operation after a failed operation.", oc.completion.isCompletedExceptionally());
}
// with it, which is why it's hard to determine precisely what the first expected failed operation is.
if (oc.completion.isCompletedExceptionally()) {
// If we do find a failed one in this area, make sure it is failed with DataCorruptionException.
AssertExtensions.assertThrows("Unexpected exception for failed Operation.", oc.completion::join, super::isExpectedExceptionForDataCorruption);
encounteredFirstFailure = true;
} else {
successCount++;
}
}
AssertExtensions.assertGreaterThan("No operation succeeded.", 0, successCount);
// There is no point in performing any other checks. A DataCorruptionException means the Metadata (and the general
// state of the Container) is in an undefined state.
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class MemoryStateUpdaterTests method populate.
private ArrayList<Operation> populate(MemoryStateUpdater updater, int segmentCount, int operationCountPerType) throws DataCorruptionException {
ArrayList<Operation> operations = new ArrayList<>();
long offset = 0;
for (int i = 0; i < segmentCount; i++) {
for (int j = 0; j < operationCountPerType; j++) {
StreamSegmentMapOperation mapOp = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name("a").length(i * j).build());
mapOp.setStreamSegmentId(i);
operations.add(mapOp);
StreamSegmentAppendOperation appendOp = new StreamSegmentAppendOperation(i, Integer.toString(i).getBytes(), null);
appendOp.setStreamSegmentOffset(offset);
offset += appendOp.getData().length;
operations.add(appendOp);
operations.add(new MergeTransactionOperation(i, j));
}
}
for (int i = 0; i < operations.size(); i++) {
operations.get(i).setSequenceNumber(i);
}
updater.process(operations.iterator());
return operations;
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class MemoryStateUpdaterTests method testProcess.
/**
* Tests the functionality of the process() method.
*/
@Test
public void testProcess() throws Exception {
int segmentCount = 10;
int operationCountPerType = 5;
// Add to MTL + Add to ReadIndex (append; beginMerge).
SequencedItemList<Operation> opLog = new SequencedItemList<>();
ArrayList<TestReadIndex.MethodInvocation> methodInvocations = new ArrayList<>();
TestReadIndex readIndex = new TestReadIndex(methodInvocations::add);
AtomicInteger flushCallbackCallCount = new AtomicInteger();
MemoryStateUpdater updater = new MemoryStateUpdater(opLog, readIndex, flushCallbackCallCount::incrementAndGet);
ArrayList<Operation> operations = populate(updater, segmentCount, operationCountPerType);
// Verify they were properly processed.
int triggerFutureCount = (int) methodInvocations.stream().filter(mi -> mi.methodName.equals(TestReadIndex.TRIGGER_FUTURE_READS)).count();
int addCount = methodInvocations.size() - triggerFutureCount;
Assert.assertEquals("Unexpected number of items added to ReadIndex.", operations.size() - segmentCount * operationCountPerType, addCount);
Assert.assertEquals("Unexpected number of calls to the ReadIndex triggerFutureReads method.", 1, triggerFutureCount);
Assert.assertEquals("Unexpected number of calls to the flushCallback provided in the constructor.", 1, flushCallbackCallCount.get());
// Verify add calls.
Iterator<Operation> logIterator = opLog.read(-1, operations.size());
int currentIndex = -1;
int currentReadIndex = -1;
while (logIterator.hasNext()) {
currentIndex++;
Operation expected = operations.get(currentIndex);
Operation actual = logIterator.next();
if (expected instanceof StorageOperation) {
currentReadIndex++;
TestReadIndex.MethodInvocation invokedMethod = methodInvocations.get(currentReadIndex);
if (expected instanceof StreamSegmentAppendOperation) {
Assert.assertTrue("StreamSegmentAppendOperation was not added as a CachedStreamSegmentAppendOperation to the Memory Log.", actual instanceof CachedStreamSegmentAppendOperation);
StreamSegmentAppendOperation appendOp = (StreamSegmentAppendOperation) expected;
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was not added to the ReadIndex.", TestReadIndex.APPEND, invokedMethod.methodName);
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getStreamSegmentId(), invokedMethod.args.get("streamSegmentId"));
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getStreamSegmentOffset(), invokedMethod.args.get("offset"));
Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getData(), invokedMethod.args.get("data"));
} else if (expected instanceof MergeTransactionOperation) {
MergeTransactionOperation mergeOp = (MergeTransactionOperation) expected;
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was not added to the ReadIndex.", TestReadIndex.BEGIN_MERGE, invokedMethod.methodName);
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getStreamSegmentId(), invokedMethod.args.get("targetStreamSegmentId"));
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getStreamSegmentOffset(), invokedMethod.args.get("offset"));
Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getTransactionSegmentId(), invokedMethod.args.get("sourceStreamSegmentId"));
}
}
}
// Verify triggerFutureReads args.
@SuppressWarnings("unchecked") Collection<Long> triggerSegmentIds = (Collection<Long>) methodInvocations.stream().filter(mi -> mi.methodName.equals(TestReadIndex.TRIGGER_FUTURE_READS)).findFirst().get().args.get("streamSegmentIds");
val expectedSegmentIds = operations.stream().filter(op -> op instanceof SegmentOperation).map(op -> ((SegmentOperation) op).getStreamSegmentId()).collect(Collectors.toSet());
AssertExtensions.assertContainsSameElements("ReadIndex.triggerFutureReads() was called with the wrong set of StreamSegmentIds.", expectedSegmentIds, triggerSegmentIds);
// Test DataCorruptionException.
AssertExtensions.assertThrows("MemoryStateUpdater accepted an operation that was out of order.", // This does not have a SequenceNumber set, so it should trigger a DCE.
() -> updater.process(new MergeTransactionOperation(1, 2)), ex -> ex instanceof DataCorruptionException);
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class OperationLogTestBase method generateOperations.
// endregion
// region Operation Generation
/**
* Generates a List of Log Operations that contains the following operations, in the "correct" order.
* <ol>
* <li> A set of StreamSegmentAppend Operations (based on the streamSegmentIds arg).
* <li> A set of StreamSegmentSeal and MergeTransaction Operations (based on the TransactionIds and mergeTransactions arg).
* <li> A set of StreamSegmentSeal Operations (based on the sealStreamSegments arg).
* </ol>
*/
List<Operation> generateOperations(Collection<Long> streamSegmentIds, Map<Long, Long> transactionIds, int appendsPerStreamSegment, int metadataCheckpointsEvery, boolean mergeTransactions, boolean sealStreamSegments) {
List<Operation> result = new ArrayList<>();
// Add some appends.
int appendId = 0;
for (long streamSegmentId : streamSegmentIds) {
for (int i = 0; i < appendsPerStreamSegment; i++) {
val attributes = Collections.singletonList(new AttributeUpdate(UUID.randomUUID(), AttributeUpdateType.Replace, i));
result.add(new StreamSegmentAppendOperation(streamSegmentId, generateAppendData(appendId), attributes));
addCheckpointIfNeeded(result, metadataCheckpointsEvery);
appendId++;
}
}
addProbe(result);
for (long transactionId : transactionIds.keySet()) {
for (int i = 0; i < appendsPerStreamSegment; i++) {
val attributes = Collections.singletonList(new AttributeUpdate(UUID.randomUUID(), AttributeUpdateType.Replace, i));
result.add(new StreamSegmentAppendOperation(transactionId, generateAppendData(appendId), attributes));
addCheckpointIfNeeded(result, metadataCheckpointsEvery);
appendId++;
}
}
addProbe(result);
// Merge Transactions.
if (mergeTransactions) {
// Key = TransactionId, Value = Parent Id.
transactionIds.entrySet().forEach(mapping -> {
result.add(new StreamSegmentSealOperation(mapping.getKey()));
addCheckpointIfNeeded(result, metadataCheckpointsEvery);
result.add(new MergeTransactionOperation(mapping.getValue(), mapping.getKey()));
addCheckpointIfNeeded(result, metadataCheckpointsEvery);
});
addProbe(result);
}
// Seal the StreamSegments.
if (sealStreamSegments) {
streamSegmentIds.forEach(streamSegmentId -> {
result.add(new StreamSegmentSealOperation(streamSegmentId));
addCheckpointIfNeeded(result, metadataCheckpointsEvery);
});
addProbe(result);
}
return result;
}
Aggregations