use of io.pravega.common.Exceptions in project pravega by pravega.
the class StreamSegmentContainer method deleteStreamSegment.
@Override
public CompletableFuture<Void> deleteStreamSegment(String streamSegmentName, Duration timeout) {
ensureRunning();
logRequest("deleteStreamSegment", streamSegmentName);
this.metrics.deleteSegment();
TimeoutTimer timer = new TimeoutTimer(timeout);
// metadata.deleteStreamSegment will delete the given StreamSegment and all Transactions associated with it.
// It returns a mapping of segment ids to names of StreamSegments that were deleted.
// As soon as this happens, all operations that deal with those segments will start throwing appropriate exceptions
// or ignore the segments altogether (such as StorageWriter).
Collection<SegmentMetadata> deletedSegments = this.metadata.deleteStreamSegment(streamSegmentName);
val deletionFutures = new ArrayList<CompletableFuture<Void>>();
for (SegmentMetadata toDelete : deletedSegments) {
deletionFutures.add(this.storage.openWrite(toDelete.getName()).thenComposeAsync(handle -> this.storage.delete(handle, timer.getRemaining()), this.executor).thenComposeAsync(v -> this.stateStore.remove(toDelete.getName(), timer.getRemaining()), this.executor).exceptionally(ex -> {
ex = Exceptions.unwrap(ex);
if (ex instanceof StreamSegmentNotExistsException && toDelete.isTransaction()) {
// did not get a chance to get updated.
return null;
}
throw new CompletionException(ex);
}));
}
notifyMetadataRemoved(deletedSegments);
return Futures.allOf(deletionFutures);
}
use of io.pravega.common.Exceptions in project pravega by pravega.
the class Futures method doWhileLoop.
/**
* Executes a code fragment returning a CompletableFutures while a condition on the returned value is satisfied.
*
* @param condition Predicate that indicates whether to proceed with the loop or not.
* @param loopBody A Supplier that returns a CompletableFuture which represents the body of the loop. This
* supplier is invoked every time the loopBody needs to execute.
* @param executor An Executor that is used to execute the condition and the loop support code.
* @param <T> Return type of the executor.
* @return A CompletableFuture that, when completed, indicates the loop terminated without any exception. If
* either the loopBody or condition throw/return Exceptions, these will be set as the result of this returned Future.
*/
public static <T> CompletableFuture<Void> doWhileLoop(Supplier<CompletableFuture<T>> loopBody, Predicate<T> condition, Executor executor) {
CompletableFuture<Void> result = new CompletableFuture<>();
// We implement the do-while loop using a regular loop, but we execute one iteration before we create the actual Loop object.
// Since this method has slightly different arguments than loop(), we need to make one adjustment:
// * After each iteration, we get the result and run it through 'condition' and use that to decide whether to continue.
AtomicBoolean canContinue = new AtomicBoolean();
Consumer<T> iterationResultHandler = ir -> canContinue.set(condition.test(ir));
loopBody.get().thenAccept(iterationResultHandler).thenRunAsync(() -> {
Loop<T> loop = new Loop<>(canContinue::get, loopBody, iterationResultHandler, result, executor);
executor.execute(loop);
}, executor).exceptionally(ex -> {
// Handle exceptions from the first iteration.
result.completeExceptionally(ex);
return null;
});
return result;
}
use of io.pravega.common.Exceptions in project pravega by pravega.
the class DataFrameReaderTests method testReadsNoFailure.
/**
* Tests the happy case: DataFrameReader can read from a DataLog when the are no exceptions.
*/
@Test
public void testReadsNoFailure() throws Exception {
// Fail every X records (write-wise).
int failEvery = 7;
ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(100, SMALL_RECORD_MIN_SIZE, SMALL_RECORD_MAX_SIZE, 0);
records.addAll(DataFrameTestHelpers.generateLogItems(100, LARGE_RECORD_MIN_SIZE, LARGE_RECORD_MAX_SIZE, records.size()));
// Have every other 'failEvery' record fail after writing 90% of itself.
for (int i = 0; i < records.size(); i += failEvery) {
records.get(i).failSerializationAfterComplete(0.9, new IOException("intentional " + i));
}
HashSet<Integer> failedIndices = new HashSet<>();
try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, executorService())) {
dataLog.initialize(TIMEOUT);
BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
val args = new DataFrameBuilder.Args(Callbacks::doNothing, Callbacks::doNothing, errorCallback, executorService());
try (DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args)) {
for (int i = 0; i < records.size(); i++) {
try {
b.append(records.get(i));
} catch (IOException ex) {
failedIndices.add(i);
}
}
b.flush();
}
TestSerializer logItemFactory = new TestSerializer();
DataFrameReader<TestLogItem> reader = new DataFrameReader<>(dataLog, logItemFactory, CONTAINER_ID);
List<TestLogItem> readItems = readAll(reader);
checkReadResult(records, failedIndices, readItems);
}
}
use of io.pravega.common.Exceptions in project pravega by pravega.
the class StorageWriterTests method testWithStorageCorruptionErrors.
/**
* Tests the StorageWriter in a Scenario where the Storage component throws data corruption exceptions (i.e., badOffset,
* and after reconciliation, the data is still corrupt).
*/
@Test
public void testWithStorageCorruptionErrors() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// Create a bunch of segments and Transactions.
ArrayList<Long> segmentIds = createSegments(context);
// Append data.
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
appendDataBreadthFirst(segmentIds, segmentContents, context);
// Corrupt (one segment should suffice).
byte[] corruptionData = "foo".getBytes();
SegmentHandle corruptedSegmentHandle = InMemoryStorage.newHandle(context.metadata.getStreamSegmentMetadata(segmentIds.get(0)).getName(), false);
Supplier<Exception> exceptionSupplier = () -> {
// Corrupt data. We use an internal method (append) to atomically write data at the end of the segment.
// GetLength+Write would not work well because there may be concurrent writes that modify the data between
// requesting the length and attempting to write, thus causing the corruption to fail.
// NOTE: this is a synchronous call, but append() is also a sync method. If append() would become async,
// care must be taken not to block a thread while waiting for it.
context.storage.append(corruptedSegmentHandle, new ByteArrayInputStream(corruptionData), corruptionData.length);
// Return some other kind of exception.
return new TimeoutException("Intentional");
};
// We only try to corrupt data once.
AtomicBoolean corruptionHappened = new AtomicBoolean();
context.storage.setWriteAsyncErrorInjector(new ErrorInjector<>(c -> !corruptionHappened.getAndSet(true), exceptionSupplier));
AssertExtensions.assertThrows("StorageWriter did not fail when a fatal data corruption error occurred.", () -> {
// The Corruption may happen early enough so the "awaitRunning" isn't complete yet. In that case,
// the writer will never reach its 'Running' state. As such, we need to make sure at least one of these
// will throw (either start or, if the failure happened after start, make sure it eventually fails and shuts down).
context.writer.startAsync().awaitRunning();
ServiceListeners.awaitShutdown(context.writer, TIMEOUT, true);
}, ex -> ex instanceof IllegalStateException);
ServiceListeners.awaitShutdown(context.writer, TIMEOUT, false);
Assert.assertTrue("Unexpected failure cause for StorageWriter.", Exceptions.unwrap(context.writer.failureCause()) instanceof ReconciliationFailureException);
}
use of io.pravega.common.Exceptions in project pravega by pravega.
the class SegmentAggregator method flushPendingAppends.
/**
* Flushes all Append Operations that can be flushed up to the maximum allowed flush size.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will contain the result from the flush operation.
*/
private CompletableFuture<FlushResult> flushPendingAppends(Duration timeout) {
// Gather an InputStream made up of all the operations we can flush.
FlushArgs flushArgs;
try {
flushArgs = getFlushArgs();
} catch (DataCorruptionException ex) {
return Futures.failedFuture(ex);
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushPendingAppends");
if (flushArgs.getLength() == 0) {
// Nothing to flush.
FlushResult result = new FlushResult();
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushPendingAppends", traceId, result);
return CompletableFuture.completedFuture(result);
}
// Flush them.
InputStream inputStream = flushArgs.getStream();
return this.storage.write(this.handle.get(), this.metadata.getStorageLength(), inputStream, flushArgs.getLength(), timeout).thenApplyAsync(v -> {
FlushResult result = updateStatePostFlush(flushArgs);
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushPendingAppends", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
if (Exceptions.unwrap(ex) instanceof BadOffsetException) {
// We attempted to write at an offset that already contained other data. This can happen for a number of
// reasons, but we do not have enough information here to determine why. We need to enter reconciliation
// mode, and hope for the best.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
Aggregations