use of io.pravega.segmentstore.contracts.StreamSegmentSealedException in project pravega by pravega.
the class OperationProcessorTests method testWithInvalidOperations.
/**
* Tests the ability of the OperationProcessor to process Operations when encountering invalid operations (such as
* appends to StreamSegments that do not exist or to those that are sealed). This covers the following exceptions:
* * StreamSegmentNotExistsException
* * StreamSegmentSealedException
* * General MetadataUpdateException.
*/
@Test
public void testWithInvalidOperations() throws Exception {
int streamSegmentCount = 10;
int appendsPerStreamSegment = 40;
// We are going to prematurely seal this StreamSegment.
long sealedStreamSegmentId = 6;
// We are going to prematurely mark this StreamSegment as deleted.
long deletedStreamSegmentId = 8;
// This is a bogus StreamSegment, that does not exist.
long nonExistentStreamSegmentId;
@Cleanup TestContext context = new TestContext();
// Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
nonExistentStreamSegmentId = streamSegmentIds.size();
streamSegmentIds.add(nonExistentStreamSegmentId);
context.metadata.getStreamSegmentMetadata(sealedStreamSegmentId).markSealed();
context.metadata.getStreamSegmentMetadata(deletedStreamSegmentId).markDeleted();
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Setup an OperationProcessor and start it.
@Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
dataLog.initialize(TIMEOUT);
@Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
operationProcessor.startAsync().awaitRunning();
// Process all generated operations.
List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
// Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof MetadataUpdateException || ex instanceof StreamSegmentException);
HashSet<Long> streamSegmentsWithNoContents = new HashSet<>();
streamSegmentsWithNoContents.add(sealedStreamSegmentId);
streamSegmentsWithNoContents.add(deletedStreamSegmentId);
streamSegmentsWithNoContents.add(nonExistentStreamSegmentId);
// Verify that the "right" operations failed, while the others succeeded.
for (OperationWithCompletion oc : completionFutures) {
if (oc.operation instanceof StorageOperation) {
long streamSegmentId = ((StorageOperation) oc.operation).getStreamSegmentId();
if (streamSegmentsWithNoContents.contains(streamSegmentId)) {
Assert.assertTrue("Completion future for invalid StreamSegment " + streamSegmentId + " did not complete exceptionally.", oc.completion.isCompletedExceptionally());
Predicate<Throwable> errorValidator;
if (streamSegmentId == sealedStreamSegmentId) {
errorValidator = ex -> ex instanceof StreamSegmentSealedException;
} else if (streamSegmentId == deletedStreamSegmentId) {
errorValidator = ex -> ex instanceof StreamSegmentNotExistsException;
} else {
errorValidator = ex -> ex instanceof MetadataUpdateException;
}
AssertExtensions.assertThrows("Unexpected exception for failed Operation.", oc.completion::join, errorValidator);
continue;
}
}
// If we get here, we must verify no exception was thrown.
oc.completion.join();
}
performLogOperationChecks(completionFutures, context.memoryLog, dataLog, context.metadata);
performMetadataChecks(streamSegmentIds, streamSegmentsWithNoContents, new HashMap<>(), completionFutures, context.metadata, false, false);
performReadIndexChecks(completionFutures, context.readIndex);
operationProcessor.stopAsync().awaitTerminated();
}
use of io.pravega.segmentstore.contracts.StreamSegmentSealedException in project pravega by pravega.
the class ContainerReadIndexTests method testFutureReads.
/**
* Tests the behavior of Future Reads. Scenarios tested include:
* * Regular appends
* * Segment sealing
* * Transaction merging.
*/
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testFutureReads() throws Exception {
// About 40-50% of the entire segment length.
final int nonSealReadLimit = APPENDS_PER_SEGMENT * 25;
// How many appends to trigger Future reads.
final int triggerFutureReadsEvery = 3;
@Cleanup TestContext context = new TestContext();
ArrayList<Long> segmentIds = createSegments(context);
HashMap<Long, ArrayList<Long>> transactionsBySegment = createTransactions(segmentIds, context);
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
HashMap<Long, ByteArrayOutputStream> readContents = new HashMap<>();
HashSet<Long> segmentsToSeal = new HashSet<>();
ArrayList<AsyncReadResultProcessor> readProcessors = new ArrayList<>();
HashMap<Long, TestReadResultHandler> entryHandlers = new HashMap<>();
// 1. Put all segment names into one list, for easier appends (but still keep the original lists at hand - we'll need them later).
ArrayList<Long> allSegmentIds = new ArrayList<>(segmentIds);
transactionsBySegment.values().forEach(allSegmentIds::addAll);
AtomicInteger writeCount = new AtomicInteger();
Runnable triggerFutureReadsCallback = () -> {
if (writeCount.incrementAndGet() % triggerFutureReadsEvery == 0) {
context.readIndex.triggerFutureReads(segmentIds);
}
};
// should stop upon reaching the limit).
for (int i = 0; i < segmentIds.size(); i++) {
long segmentId = segmentIds.get(i);
ByteArrayOutputStream readContentsStream = new ByteArrayOutputStream();
readContents.put(segmentId, readContentsStream);
ReadResult readResult;
if (i < segmentIds.size() / 2) {
// We're going to seal this one at one point.
segmentsToSeal.add(segmentId);
readResult = context.readIndex.read(segmentId, 0, Integer.MAX_VALUE, TIMEOUT);
} else {
// Just a regular one, nothing special.
readResult = context.readIndex.read(segmentId, 0, nonSealReadLimit, TIMEOUT);
}
// The Read callback is only accumulating data in this test; we will then compare it against the real data.
TestReadResultHandler entryHandler = new TestReadResultHandler(readContentsStream, TIMEOUT);
entryHandlers.put(segmentId, entryHandler);
readProcessors.add(AsyncReadResultProcessor.process(readResult, entryHandler, executorService()));
}
// 3. Add a bunch of writes.
appendData(allSegmentIds, segmentContents, context, triggerFutureReadsCallback);
// 4. Merge all the Transactions.
beginMergeTransactions(transactionsBySegment, segmentContents, context);
completeMergeTransactions(transactionsBySegment, context);
context.readIndex.triggerFutureReads(segmentIds);
// 5. Add more appends (to the parent segments)
for (int i = 0; i < 5; i++) {
for (long segmentId : segmentIds) {
UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
byte[] data = getAppendData(segmentMetadata.getName(), segmentId, i, writeCount.incrementAndGet());
// Make sure we increase the Length prior to appending; the ReadIndex checks for this.
long offset = segmentMetadata.getLength();
segmentMetadata.setLength(offset + data.length);
context.readIndex.append(segmentId, offset, data);
recordAppend(segmentId, data, segmentContents);
triggerFutureReadsCallback.run();
}
}
// 6. Seal those segments that we need to seal.
segmentsToSeal.forEach(segmentId -> context.metadata.getStreamSegmentMetadata(segmentId).markSealed());
// Trigger future reads on all segments we know about; some may not have had a trigger in a while (see callback above).
context.readIndex.triggerFutureReads(segmentIds);
// Now wait for all the reads to complete, and verify their results against the expected output.
Futures.allOf(entryHandlers.values().stream().map(TestReadResultHandler::getCompleted).collect(Collectors.toList())).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
readProcessors.forEach(AsyncReadResultProcessor::close);
// Check to see if any errors got thrown (and caught) during the reading process).
for (Map.Entry<Long, TestReadResultHandler> e : entryHandlers.entrySet()) {
Throwable err = e.getValue().getError().get();
if (err != null) {
// The next check (see below) will verify if the segments were properly read).
if (!(err instanceof StreamSegmentSealedException && segmentsToSeal.contains(e.getKey()))) {
Assert.fail("Unexpected error happened while processing Segment " + e.getKey() + ": " + e.getValue().getError().get());
}
}
}
// Compare, byte-by-byte, the outcome of the tail reads.
Assert.assertEquals("Unexpected number of segments were read.", segmentContents.size(), readContents.size());
for (long segmentId : segmentIds) {
boolean isSealed = segmentsToSeal.contains(segmentId);
byte[] expectedData = segmentContents.get(segmentId).toByteArray();
byte[] actualData = readContents.get(segmentId).toByteArray();
int expectedLength = isSealed ? expectedData.length : nonSealReadLimit;
Assert.assertEquals("Unexpected read length for segment " + expectedData.length, expectedLength, actualData.length);
AssertExtensions.assertArrayEquals("Unexpected read contents for segment " + segmentId, expectedData, 0, actualData, 0, actualData.length);
}
}
use of io.pravega.segmentstore.contracts.StreamSegmentSealedException in project pravega by pravega.
the class PravegaRequestProcessor method handleException.
private Void handleException(long requestId, String segment, String operation, Throwable u) {
if (u == null) {
IllegalStateException exception = new IllegalStateException("No exception to handle.");
log.error("Error (Segment = '{}', Operation = '{}')", segment, operation, exception);
throw exception;
}
u = Exceptions.unwrap(u);
if (u instanceof StreamSegmentExistsException) {
log.info("Segment '{}' already exists and cannot perform operation '{}'.", segment, operation);
connection.send(new SegmentAlreadyExists(requestId, segment));
} else if (u instanceof StreamSegmentNotExistsException) {
log.warn("Segment '{}' does not exist and cannot perform operation '{}'.", segment, operation);
connection.send(new NoSuchSegment(requestId, segment));
} else if (u instanceof StreamSegmentSealedException) {
log.info("Segment '{}' is sealed and cannot perform operation '{}'.", segment, operation);
connection.send(new SegmentIsSealed(requestId, segment));
} else if (u instanceof ContainerNotFoundException) {
int containerId = ((ContainerNotFoundException) u).getContainerId();
log.warn("Wrong host. Segment = '{}' (Container {}) is not owned. Operation = '{}').", segment, containerId, operation);
connection.send(new WrongHost(requestId, segment, ""));
} else if (u instanceof CancellationException) {
log.info("Closing connection {} while performing {} due to {}.", connection, operation, u.getMessage());
connection.close();
} else if (u instanceof AuthenticationException) {
log.warn("Authentication error during '{}'.", operation);
connection.send(new WireCommands.AuthTokenCheckFailed(requestId));
connection.close();
} else if (u instanceof UnsupportedOperationException) {
log.warn("Unsupported Operation '{}'.", operation, u);
connection.send(new OperationUnsupported(requestId, operation));
} else if (u instanceof BadOffsetException) {
BadOffsetException badOffset = (BadOffsetException) u;
connection.send(new SegmentIsTruncated(requestId, segment, badOffset.getExpectedOffset()));
} else {
log.error("Error (Segment = '{}', Operation = '{}')", segment, operation, u);
// Closing connection should reinitialize things, and hopefully fix the problem
connection.close();
throw new IllegalStateException("Unknown exception.", u);
}
return null;
}
Aggregations