use of io.pravega.common.concurrent.Futures in project pravega by pravega.
the class Producer method runOneIteration.
// endregion
// region Producer Implementation
/**
* Executes one iteration of the Producer.
* 1. Requests a new ProducerOperation from the DataSource.
* 2. Executes it.
* 3. Completes the ProducerOperation with either success or failure based on the outcome step #2.
*/
private CompletableFuture<Void> runOneIteration() {
this.iterationCount.incrementAndGet();
val futures = new ArrayList<CompletableFuture<Void>>();
for (int i = 0; i < this.config.getProducerParallelism(); i++) {
ProducerOperation op = this.dataSource.nextOperation();
if (op == null) {
// Nothing more to do.
this.canContinue.set(false);
break;
}
CompletableFuture<Void> result;
try {
CompletableFuture<Void> waitOn = op.getWaitOn();
if (waitOn != null) {
result = waitOn.exceptionally(ex -> null).thenComposeAsync(v -> executeOperation(op), this.executorService);
} else {
result = executeOperation(op);
}
} catch (Throwable ex) {
// Catch and handle sync errors.
op.completed(-1);
if (handleOperationError(ex, op)) {
// Exception handled; skip this iteration since there's nothing more we can do.
continue;
} else {
result = Futures.failedFuture(ex);
}
}
futures.add(result.exceptionally(ex -> {
// Catch and handle async errors.
if (handleOperationError(ex, op)) {
return null;
}
throw new CompletionException(ex);
}));
}
return Futures.allOf(futures);
}
use of io.pravega.common.concurrent.Futures in project pravega by pravega.
the class StreamSegmentContainerTests method activateAllSegments.
/**
* Ensures that all Segments defined in the given collection are loaded up into the Container's metadata.
* This is used to simplify a few tests that do not expect interference from StreamSegmentMapper's assignment logic
* (that is, they execute operations in a certain order and assume that those ops are added to OperationProcessor queue
* in that order; if StreamSegmentMapper interferes, there is no guarantee as to what this order will be).
*/
@SneakyThrows
private void activateAllSegments(Collection<String> segmentNames, TestContext context) {
val futures = segmentNames.stream().map(s -> activateSegment(s, context.container)).collect(Collectors.toList());
Futures.allOf(futures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
use of io.pravega.common.concurrent.Futures in project pravega by pravega.
the class StreamSegmentStoreTestBase method waitForSegmentInStorage.
private CompletableFuture<Void> waitForSegmentInStorage(SegmentProperties sp, StreamSegmentStore readOnlyStore) {
TimeoutTimer timer = new TimeoutTimer(TIMEOUT);
AtomicBoolean tryAgain = new AtomicBoolean(true);
return Futures.loop(tryAgain::get, () -> readOnlyStore.getStreamSegmentInfo(sp.getName(), false, TIMEOUT).thenCompose(storageProps -> {
if (sp.isSealed()) {
tryAgain.set(!storageProps.isSealed());
} else {
tryAgain.set(sp.getLength() != storageProps.getLength());
}
if (tryAgain.get() && !timer.hasRemaining()) {
return Futures.<Void>failedFuture(new TimeoutException(String.format("Segment %s did not complete in Storage in the allotted time.", sp.getName())));
} else {
return Futures.delayedFuture(Duration.ofMillis(100), executorService());
}
}), executorService());
}
use of io.pravega.common.concurrent.Futures in project pravega by pravega.
the class OrderedItemProcessorTests method testClose.
/**
* Tests that closing does cancel all pending items, except the processing ones.
*/
@Test
public void testClose() throws Exception {
final int itemCount = 2 * CAPACITY;
val processedItems = Collections.synchronizedCollection(new HashSet<Integer>());
val processFuture = new CompletableFuture<Integer>();
Function<Integer, CompletableFuture<Integer>> itemProcessor = i -> {
if (!processedItems.add(i)) {
Assert.fail("Duplicate item detected: " + i);
}
return processFuture;
};
val resultFutures = new ArrayList<CompletableFuture<Integer>>();
@Cleanup val p = new TestProcessor(CAPACITY, itemProcessor, executorService());
// Fill up to capacity, and beyond.
for (int i = 0; i < itemCount; i++) {
resultFutures.add(p.process(i));
}
val closeFuture = CompletableFuture.runAsync(p::close, executorService());
// Verify none of the items have been completed (or cancelled for that matter).
for (CompletableFuture<Integer> f : resultFutures) {
Assert.assertFalse("Future was completed after close() was called.", f.isDone());
}
Assert.assertFalse("close() returned even if there are pending operations to complete.", closeFuture.isDone());
processFuture.complete(0);
// This will ensure that all result futures are completed.
Futures.allOf(resultFutures).join();
// This will ensure that the close() method returns.
closeFuture.join();
}
use of io.pravega.common.concurrent.Futures in project pravega by pravega.
the class OrderedItemProcessorTests method testCapacityNotExceeded.
/**
* Tests a scenario where we add fewer items than the capacity allows. We want to verify that none of the items are
* queued up.
*/
@Test
public void testCapacityNotExceeded() throws Exception {
val processedItems = Collections.synchronizedCollection(new HashSet<Integer>());
val processFutures = Collections.synchronizedList(new ArrayList<CompletableFuture<Integer>>());
Function<Integer, CompletableFuture<Integer>> itemProcessor = i -> {
if (!processedItems.add(i)) {
Assert.fail("Duplicate item detected: " + i);
}
CompletableFuture<Integer> result = new CompletableFuture<>();
processFutures.add(result);
return result;
};
val resultFutures = new ArrayList<CompletableFuture<Integer>>();
@Cleanup val p = new TestProcessor(CAPACITY, itemProcessor, executorService());
for (int i = 0; i < CAPACITY; i++) {
resultFutures.add(p.process(i));
Assert.assertTrue("Item has not been immediately processed when under capacity: " + i, processedItems.contains(i));
}
// Finish up half the futures. We need a Semaphore so that we know when the OrderedItemProcessor actually
// finished cleaning up after these completed tasks, as that happens asynchronously inside the processor and we
// don't really have a hook into it, except by sub-classing it and intercepting 'executionComplete'.
val half = CAPACITY / 2;
val cleanupSignal = new Semaphore(half);
cleanupSignal.acquire(half);
p.setExecutionCompleteCallback(cleanupSignal::release);
for (int i = 0; i < half; i++) {
processFutures.get(i).complete(TRANSFORMER.apply(i));
}
// Wait until the processor finished internal cleanups.
cleanupSignal.acquire(half);
Futures.allOf(resultFutures.subList(0, half)).join();
// Now add even more and make sure we are under capacity.
for (int i = 0; i < CAPACITY / 2; i++) {
val item = CAPACITY + i;
resultFutures.add(p.process(item));
Assert.assertTrue("Item has not been immediately processed when under capacity: " + item, processedItems.contains(item));
}
// Finish up the remaining futures.
for (int i = 0; i < processFutures.size(); i++) {
val f = processFutures.get(i);
if (!f.isDone()) {
f.complete(TRANSFORMER.apply(i));
}
}
// Verify they have been executed in order.
val results = Futures.allOfWithResults(resultFutures).join();
for (int i = 0; i < results.size(); i++) {
Assert.assertEquals("Unexpected result at index " + i, TRANSFORMER.apply(i), results.get(i));
}
}
Aggregations