use of io.pravega.test.common.IntentionalException in project pravega by pravega.
the class SegmentAggregatorTests method testReconcileAppends.
// endregion
// region Unknown outcome operation reconciliation
/**
* Tests the ability of the SegmentAggregator to reconcile AppendOperations (Cached/NonCached).
*/
@Test
public void testReconcileAppends() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int appendCount = 1000;
final int failEvery = 3;
@Cleanup TestContext context = new TestContext(config);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
// The writes always succeed, but every few times we return some random error, indicating that they didn't.
AtomicInteger writeCount = new AtomicInteger();
AtomicReference<Exception> setException = new AtomicReference<>();
context.storage.setWriteInterceptor((segmentName, offset, data, length, storage) -> {
if (writeCount.incrementAndGet() % failEvery == 0) {
// Time to wreak some havoc.
return storage.write(writeHandle(segmentName), offset, data, length, TIMEOUT).thenAccept(v -> {
IntentionalException ex = new IntentionalException(String.format("S=%s,O=%d,L=%d", segmentName, offset, length));
setException.set(ex);
throw ex;
});
} else {
setException.set(null);
return null;
}
});
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
}
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
while (context.segmentAggregator.mustFlush()) {
// Call flush() and inspect the result.
FlushResult flushResult = null;
try {
flushResult = context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
Assert.assertNotNull("No FlushResult provided.", flushResult);
} catch (Exception ex) {
if (setException.get() != null) {
Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
} else {
// Only expecting a BadOffsetException after our own injected exception.
Throwable realEx = Exceptions.unwrap(ex);
Assert.assertTrue("Unexpected exception thrown: " + realEx, realEx instanceof BadOffsetException);
}
}
// Check flush result.
if (flushResult != null) {
AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
}
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
}
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.test.common.IntentionalException in project pravega by pravega.
the class SegmentAggregatorTests method testReconcileSeal.
/**
* Tests the ability of the SegmentAggregator to reconcile StreamSegmentSealOperations.
*/
@Test
public void testReconcileSeal() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
// The seal succeeds, but we throw some random error, indicating that it didn't.
context.storage.setSealInterceptor((segmentName, storage) -> {
storage.seal(writeHandle(segmentName), TIMEOUT).join();
throw new IntentionalException(String.format("S=%s", segmentName));
});
// Attempt to seal.
StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(sealOp);
// First time: attempt to flush/seal, which must end in failure.
AssertExtensions.assertThrows("IntentionalException did not propagate to flush() caller.", () -> context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> Exceptions.unwrap(ex) instanceof IntentionalException);
context.storage.setSealInterceptor(null);
// Second time: we are in reconciliation mode, so flush must succeed (and update internal state based on storage).
context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Verify outcome.
Assert.assertTrue("Segment not marked as sealed in storage (in metadata).", context.segmentAggregator.getMetadata().isSealedInStorage());
Assert.assertTrue("SegmentAggregator not closed.", context.segmentAggregator.isClosed());
}
use of io.pravega.test.common.IntentionalException in project pravega by pravega.
the class OperationProcessorTests method testWithOperationSerializationFailures.
/**
* Tests the ability of the OperationProcessor to process Operations when Serialization errors happen.
*/
@Test
public void testWithOperationSerializationFailures() throws Exception {
int streamSegmentCount = 10;
int appendsPerStreamSegment = 80;
// Fail every X appends encountered.
int failAppendFrequency = 7;
@Cleanup TestContext context = new TestContext();
// Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Replace some of the Append Operations with a FailedAppendOperation. Some operations fail at the beginning,
// some at the end of the serialization.
int appendCount = 0;
HashSet<Integer> failedOperationIndices = new HashSet<>();
for (int i = 0; i < operations.size(); i++) {
if (operations.get(i) instanceof StreamSegmentAppendOperation) {
if ((appendCount++) % failAppendFrequency == 0) {
operations.set(i, new FailedStreamSegmentAppendOperation((StreamSegmentAppendOperation) operations.get(i)));
failedOperationIndices.add(i);
}
}
}
// Setup an OperationProcessor and start it.
@Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
dataLog.initialize(TIMEOUT);
@Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
operationProcessor.startAsync().awaitRunning();
// Process all generated operations.
List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
// Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof IntentionalException);
// Verify that the "right" operations failed, while the others succeeded.
for (int i = 0; i < completionFutures.size(); i++) {
OperationWithCompletion oc = completionFutures.get(i);
if (failedOperationIndices.contains(i)) {
AssertExtensions.assertThrows("Unexpected exception for failed Operation.", oc.completion::join, ex -> ex instanceof IntentionalException);
} else {
// Verify no exception was thrown.
oc.completion.join();
}
}
performLogOperationChecks(completionFutures, context.memoryLog, dataLog, context.metadata);
performMetadataChecks(streamSegmentIds, new HashSet<>(), new HashMap<>(), completionFutures, context.metadata, false, false);
performReadIndexChecks(completionFutures, context.readIndex);
operationProcessor.stopAsync().awaitTerminated();
}
use of io.pravega.test.common.IntentionalException in project pravega by pravega.
the class CompletableOperationTests method testFail.
/**
* Tests the functionality of the fail() method.
*/
@Test
public void testFail() {
MetadataCheckpointOperation op = new MetadataCheckpointOperation();
AtomicBoolean successCallbackCalled = new AtomicBoolean();
AtomicBoolean failureCallbackCalled = new AtomicBoolean();
CompletableOperation co = new CompletableOperation(op, seqNo -> successCallbackCalled.set(true), ex -> failureCallbackCalled.set(true));
co.fail(new IntentionalException());
Assert.assertTrue("Failure callback was not invoked for valid fail() call.", failureCallbackCalled.get());
Assert.assertFalse("Success callback invoked for valid fail() call.", successCallbackCalled.get());
}
use of io.pravega.test.common.IntentionalException in project pravega by pravega.
the class RedirectedReadResultEntryTests method testGetContent.
/**
* Tests the ability to retry (and switch base) when a failure occurred in getContent().
*/
@Test
public void testGetContent() {
// More than one retry (by design, it will only retry one time; the next time it will simply throw).
MockReadResultEntry t1 = new MockReadResultEntry(1, 1);
RedirectedReadResultEntry e1 = new TestRedirectedReadResultEntry(t1, 0, (o, l) -> t1, executorService());
t1.getContent().completeExceptionally(new StreamSegmentNotExistsException("foo"));
AssertExtensions.assertThrows("getContent() did not throw when attempting to retry more than once.", e1::getContent, ex -> ex instanceof StreamSegmentNotExistsException);
// Ineligible exception.
MockReadResultEntry t2 = new MockReadResultEntry(1, 1);
RedirectedReadResultEntry e2 = new TestRedirectedReadResultEntry(t2, 0, (o, l) -> t2, executorService());
t2.getContent().completeExceptionally(new IntentionalException());
AssertExtensions.assertThrows("getContent() did not throw when an ineligible exception got thrown.", e2::getContent, ex -> ex instanceof IntentionalException);
// Given back another Redirect.
MockReadResultEntry t3 = new MockReadResultEntry(1, 1);
RedirectedReadResultEntry e3 = new TestRedirectedReadResultEntry(t3, 0, (o, l) -> e1, executorService());
t3.getContent().completeExceptionally(new StreamSegmentNotExistsException("foo"));
AssertExtensions.assertThrows("getContent() did not throw when a retry yielded another RedirectReadResultEntry.", e3::getContent, ex -> ex instanceof StreamSegmentNotExistsException);
// Given redirect function fails.
MockReadResultEntry t4 = new MockReadResultEntry(1, 1);
t4.getContent().completeExceptionally(new StreamSegmentNotExistsException("foo"));
RedirectedReadResultEntry e4 = new TestRedirectedReadResultEntry(t4, 0, (o, l) -> {
throw new IntentionalException();
}, executorService());
AssertExtensions.assertThrows("getContent() did not throw when retry failed.", e4::getContent, ex -> ex instanceof StreamSegmentNotExistsException);
// One that works correctly.
MockReadResultEntry t5Bad = new MockReadResultEntry(1, 1);
t5Bad.getContent().completeExceptionally(new StreamSegmentNotExistsException("foo"));
MockReadResultEntry t5Good = new MockReadResultEntry(2, 1);
t1.setCompletionCallback(i -> {
// Do nothing.
});
t5Good.getContent().complete(new ReadResultEntryContents(new ByteArrayInputStream(new byte[1]), 1));
RedirectedReadResultEntry e5 = new TestRedirectedReadResultEntry(t5Bad, 1, (o, l) -> t5Good, executorService());
val finalResult = e5.getContent().join();
Assert.assertEquals("Unexpected result from getCompletionCallback after successful redirect.", t5Bad.getCompletionCallback(), t5Good.getCompletionCallback());
Assert.assertEquals("Unexpected result from getRequestedReadLength after successful redirect.", t5Bad.getRequestedReadLength(), e5.getRequestedReadLength());
Assert.assertEquals("Unexpected result from getStreamSegmentOffset after successful redirect.", t5Good.getStreamSegmentOffset(), e5.getStreamSegmentOffset());
Assert.assertEquals("Unexpected result from getContent after successful redirect.", t5Good.getContent().join(), finalResult);
}
Aggregations