use of java.util.concurrent.CompletionException in project pravega by pravega.
the class TaskTest method testLocking.
@Test
public void testLocking() {
TestTasks testTasks = new TestTasks(taskMetadataStore, executor, HOSTNAME);
CompletableFuture<Void> first = testTasks.testStreamLock(SCOPE, stream1);
CompletableFuture<Void> second = testTasks.testStreamLock(SCOPE, stream1);
try {
first.getNow(null);
second.getNow(null);
} catch (CompletionException ce) {
assertTrue(ce.getCause() instanceof LockFailedException);
}
}
use of java.util.concurrent.CompletionException in project pravega by pravega.
the class OperationProcessor method doRun.
@Override
protected CompletableFuture<Void> doRun() {
// The QueueProcessor is responsible with the processing of externally added Operations. It starts when the
// OperationProcessor starts and is shut down as soon as doStop() is invoked.
val queueProcessor = Futures.loop(this::isRunning, () -> throttle().thenComposeAsync(v -> this.operationQueue.take(MAX_READ_AT_ONCE), this.executor).thenAcceptAsync(this::processOperations, this.executor), this.executor);
// The CommitProcessor is responsible with the processing of those Operations that have already been committed to
// DurableDataLong and now need to be added to the in-memory State.
// As opposed from the QueueProcessor, this needs to process all pending commits and not discard them, even when
// we receive a stop signal (from doStop()), otherwise we could be left with an inconsistent in-memory state.
val commitProcessor = Futures.loop(() -> isRunning() || this.commitQueue.size() > 0, () -> this.commitQueue.take(MAX_COMMIT_QUEUE_SIZE).thenAcceptAsync(this::processCommits, this.executor), this.executor).whenComplete((r, ex) -> {
// The CommitProcessor is done. Safe to close its queue now, regardless of whether it failed or
// shut down normally.
this.commitQueue.close();
if (ex != null) {
throw new CompletionException(ex);
}
});
return CompletableFuture.allOf(queueProcessor, commitProcessor).exceptionally(this::iterationErrorHandler);
}
use of java.util.concurrent.CompletionException in project pravega by pravega.
the class SegmentAggregator method beginReconciliation.
// endregion
// region Reconciliation
/**
* Initiates the Storage reconciliation procedure. Gets the current state of the Segment from Storage, and based on that,
* does one of the following:
* * Nothing, if the Storage agrees with the Metadata.
* * Throws a show-stopping DataCorruptionException (wrapped in a CompletionException) if the situation is unrecoverable.
* * Initiates the Reconciliation Procedure, which allows the reconcile() method to execute.
*
* @param timer Timer for the operation.
* @return A CompletableFuture that indicates when the operation completed.
*/
private CompletableFuture<Void> beginReconciliation(TimeoutTimer timer) {
assert this.state.get() == AggregatorState.ReconciliationNeeded : "beginReconciliation cannot be called if state == " + this.state;
return this.storage.getStreamSegmentInfo(this.metadata.getName(), timer.getRemaining()).thenAcceptAsync(sp -> {
if (sp.getLength() > this.metadata.getLength()) {
// actor altering the Segment. We cannot recover automatically from this situation.
throw new CompletionException(new ReconciliationFailureException("Actual Segment length in Storage is larger than the Metadata Length.", this.metadata, sp));
} else if (sp.getLength() < this.metadata.getStorageLength()) {
// We cannot recover automatically from this situation.
throw new CompletionException(new ReconciliationFailureException("Actual Segment length in Storage is smaller than the Metadata StorageLength.", this.metadata, sp));
} else if (sp.getLength() == this.metadata.getStorageLength()) {
// Nothing to do.
return;
}
// If we get here, it means we have work to do. Set the state accordingly and move on.
this.reconciliationState.set(new ReconciliationState(this.metadata, sp));
setState(AggregatorState.Reconciling);
}, this.executor);
}
use of java.util.concurrent.CompletionException in project pravega by pravega.
the class StreamSegmentContainer method deleteStreamSegment.
@Override
public CompletableFuture<Void> deleteStreamSegment(String streamSegmentName, Duration timeout) {
ensureRunning();
logRequest("deleteStreamSegment", streamSegmentName);
this.metrics.deleteSegment();
TimeoutTimer timer = new TimeoutTimer(timeout);
// metadata.deleteStreamSegment will delete the given StreamSegment and all Transactions associated with it.
// It returns a mapping of segment ids to names of StreamSegments that were deleted.
// As soon as this happens, all operations that deal with those segments will start throwing appropriate exceptions
// or ignore the segments altogether (such as StorageWriter).
Collection<SegmentMetadata> deletedSegments = this.metadata.deleteStreamSegment(streamSegmentName);
val deletionFutures = new ArrayList<CompletableFuture<Void>>();
for (SegmentMetadata toDelete : deletedSegments) {
deletionFutures.add(this.storage.openWrite(toDelete.getName()).thenComposeAsync(handle -> this.storage.delete(handle, timer.getRemaining()), this.executor).thenComposeAsync(v -> this.stateStore.remove(toDelete.getName(), timer.getRemaining()), this.executor).exceptionally(ex -> {
ex = Exceptions.unwrap(ex);
if (ex instanceof StreamSegmentNotExistsException && toDelete.isTransaction()) {
// did not get a chance to get updated.
return null;
}
throw new CompletionException(ex);
}));
}
notifyMetadataRemoved(deletedSegments);
return Futures.allOf(deletionFutures);
}
use of java.util.concurrent.CompletionException in project pravega by pravega.
the class TxnSweeper method failOverTxn.
private CompletableFuture<Result> failOverTxn(String failedHost, TxnResource txn) {
String scope = txn.getScope();
String stream = txn.getStream();
UUID txnId = txn.getTxnId();
log.debug("Host = {}, processing transaction {}/{}/{}", failedHost, scope, stream, txnId);
return streamMetadataStore.getTransactionData(scope, stream, txnId, null, executor).handle((r, e) -> {
if (e != null) {
if (Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException) {
// transaction not found, which means it should already have completed. We will ignore such txns
return VersionedTransactionData.EMPTY;
} else {
throw new CompletionException(e);
}
}
return r;
}).thenComposeAsync(txData -> {
int epoch = txData.getEpoch();
switch(txData.getStatus()) {
case OPEN:
return failOverOpenTxn(failedHost, txn).handleAsync((v, e) -> new Result(txn, v, e), executor);
case ABORTING:
return failOverAbortingTxn(failedHost, epoch, txn).handleAsync((v, e) -> new Result(txn, v, e), executor);
case COMMITTING:
return failOverCommittingTxn(failedHost, epoch, txn).handleAsync((v, e) -> new Result(txn, v, e), executor);
case UNKNOWN:
default:
return streamMetadataStore.removeTxnFromIndex(failedHost, txn, true).thenApply(x -> new Result(txn, null, null));
}
}, executor).whenComplete((v, e) -> log.debug("Host = {}, processing transaction {}/{}/{} complete", failedHost, scope, stream, txnId));
}
Aggregations