use of java.util.function.Supplier in project pravega by pravega.
the class OrderedItemProcessorTests method testCapacityExceeded.
/**
* Tests a scenario where we add more items than the capacity allows. We want to verify that the items are queued up
* and when their time comes, they get processed in order.
*/
@Test
public void testCapacityExceeded() {
final int maxDelayMillis = 20;
final int itemCount = 20 * CAPACITY;
val processedItems = Collections.synchronizedCollection(new HashSet<Integer>());
val processFuture = new CompletableFuture<Void>();
// Each item wait for a signal to complete. When the signal arrives, each takes a random time to complete.
val rnd = new Random(0);
Supplier<Duration> delaySupplier = () -> Duration.ofMillis(rnd.nextInt(maxDelayMillis));
Function<Integer, CompletableFuture<Integer>> itemProcessor = i -> {
if (!processedItems.add(i)) {
Assert.fail("Duplicate item detected: " + i);
}
CompletableFuture<Integer> result = new CompletableFuture<>();
processFuture.thenComposeAsync(v -> Futures.delayedFuture(delaySupplier.get(), executorService()), executorService()).whenCompleteAsync((r, ex) -> result.complete(TRANSFORMER.apply(i)));
return result;
};
val resultFutures = new ArrayList<CompletableFuture<Integer>>();
@Cleanup val p = new TestProcessor(CAPACITY, itemProcessor, executorService());
// Fill up to capacity, and beyond.
for (int i = 0; i < itemCount; i++) {
resultFutures.add(p.process(i));
if (i >= CAPACITY) {
Assert.assertFalse("Item has been immediately processed when over capacity: " + i, processedItems.contains(i));
}
}
// Finish up the items, and verify new ones are being processed.
processFuture.complete(null);
// Verify they have been executed in order.
val results = Futures.allOfWithResults(resultFutures).join();
for (int i = 0; i < results.size(); i++) {
Assert.assertEquals("Unexpected result at index " + i, TRANSFORMER.apply(i), results.get(i));
}
}
use of java.util.function.Supplier in project pravega by pravega.
the class ControllerEventProcessors method handleOrphanedReaders.
private CompletableFuture<Void> handleOrphanedReaders(final EventProcessorGroup<? extends ControllerEvent> group, final Supplier<Set<String>> processes) {
return withRetriesAsync(() -> CompletableFuture.supplyAsync(() -> {
try {
return group.getProcesses();
} catch (CheckpointStoreException e) {
if (e.getType().equals(CheckpointStoreException.Type.NoNode)) {
return Collections.<String>emptySet();
}
throw new CompletionException(e);
}
}, executor), RETRYABLE_PREDICATE, Integer.MAX_VALUE, executor).thenComposeAsync(groupProcesses -> withRetriesAsync(() -> CompletableFuture.supplyAsync(() -> {
try {
return new ImmutablePair<>(processes.get(), groupProcesses);
} catch (Exception e) {
log.error(String.format("Error fetching current processes%s", group.toString()), e);
throw new CompletionException(e);
}
}, executor), RETRYABLE_PREDICATE, Integer.MAX_VALUE, executor)).thenComposeAsync(pair -> {
Set<String> activeProcesses = pair.getLeft();
Set<String> registeredProcesses = pair.getRight();
if (registeredProcesses == null || registeredProcesses.isEmpty()) {
return CompletableFuture.completedFuture(null);
}
if (activeProcesses != null) {
registeredProcesses.removeAll(activeProcesses);
}
List<CompletableFuture<Void>> futureList = new ArrayList<>();
for (String process : registeredProcesses) {
futureList.add(withRetriesAsync(() -> CompletableFuture.runAsync(() -> {
try {
group.notifyProcessFailure(process);
} catch (CheckpointStoreException e) {
log.error(String.format("Error notifying failure of process=%s in event processor group %s", process, group.toString()), e);
throw new CompletionException(e);
}
}, executor), RETRYABLE_PREDICATE, Integer.MAX_VALUE, executor));
}
return Futures.allOf(futureList);
});
}
use of java.util.function.Supplier in project pravega by pravega.
the class SegmentAggregatorTests method testMergeWithStorageErrors.
/**
* Tests the flush() method with Append and MergeTransactionOperations.
*/
@Test
public void testMergeWithStorageErrors() throws Exception {
// Storage Errors
// This is number of appends per Segment/Transaction - there will be a lot of appends here.
final int appendCount = 100;
final int failSyncEvery = 2;
final int failAsyncEvery = 3;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
// Create and initialize all segments.
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
for (SegmentAggregator a : context.transactionAggregators) {
context.storage.create(a.getMetadata().getName(), TIMEOUT).join();
a.initialize(TIMEOUT).join();
}
// Store written data by segment - so we can check it later.
HashMap<Long, ByteArrayOutputStream> dataBySegment = new HashMap<>();
// Add a few appends to each Transaction aggregator and to the parent aggregator and seal all Transactions.
for (int i = 0; i < context.transactionAggregators.length; i++) {
SegmentAggregator transactionAggregator = context.transactionAggregators[i];
long transactionId = transactionAggregator.getMetadata().getId();
ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
dataBySegment.put(transactionId, writtenData);
for (int appendId = 0; appendId < appendCount; appendId++) {
StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionId, context);
transactionAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
}
transactionAggregator.add(generateSealAndUpdateMetadata(transactionId, context));
}
// Merge all the Transactions in the parent Segment.
@Cleanup ByteArrayOutputStream parentData = new ByteArrayOutputStream();
for (int transIndex = 0; transIndex < context.transactionAggregators.length; transIndex++) {
// Merge this Transaction into the parent & record its data in the final parent data array.
long transactionId = context.transactionAggregators[transIndex].getMetadata().getId();
context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(transactionId, context));
ByteArrayOutputStream transactionData = dataBySegment.get(transactionId);
parentData.write(transactionData.toByteArray());
transactionData.close();
}
// Have the writes fail every few attempts with a well known exception.
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setConcatSyncErrorInjector(new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
context.storage.setConcatAsyncErrorInjector(new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));
// Flush all the Aggregators, while checking that the right errors get handled and can be recovered from.
tryFlushAllSegments(context, () -> setException.set(null), setException::get);
// Verify that all Transactions are now fully merged.
for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
Assert.assertTrue("Merged Transaction was not marked as deleted in metadata.", transactionMetadata.isDeleted());
Assert.assertFalse("Merged Transaction still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
}
// Verify that in the end, the contents of the parents is as expected.
byte[] expectedData = parentData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed/merged to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of java.util.function.Supplier in project pravega by pravega.
the class SegmentAggregatorTests method testSealWithStorageErrors.
/**
* Tests the flush() method with Append and StreamSegmentSealOperations when there are Storage errors.
*/
@Test
public void testSealWithStorageErrors() throws Exception {
// Add some appends and seal, and then flush together. Verify that everything got flushed in one go.
final int appendCount = 1000;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Part 1: flush triggered by accumulated size.
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length (not bothering with flushing here; testFlushSeal() covers that).
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
}
// Generate and add a Seal Operation.
StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(sealOp);
// Have the writes fail every few attempts with a well known exception.
AtomicBoolean generateSyncException = new AtomicBoolean(true);
AtomicBoolean generateAsyncException = new AtomicBoolean(true);
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setSealSyncErrorInjector(new ErrorInjector<>(count -> generateSyncException.getAndSet(false), exceptionSupplier));
context.storage.setSealAsyncErrorInjector(new ErrorInjector<>(count -> generateAsyncException.getAndSet(false), exceptionSupplier));
// Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage.
int attemptCount = 4;
for (int i = 0; i < attemptCount; i++) {
// Repeat a number of times, at least once should work.
setException.set(null);
try {
FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
Assert.assertNotNull("No FlushResult provided.", flushResult);
} catch (Exception ex) {
if (setException.get() != null) {
Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
} else {
// Not expecting any exception this time.
throw ex;
}
}
if (!generateAsyncException.get() && !generateSyncException.get() && setException.get() == null) {
// We are done. We got at least one through.
break;
}
}
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed());
Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage());
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of java.util.function.Supplier in project pravega by pravega.
the class SegmentAggregatorTests method testFlushAppendWithStorageErrors.
/**
* Tests the behavior of flush() with appends and storage errors (on the write() method).
*/
@Test
public void testFlushAppendWithStorageErrors() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int appendCount = config.getFlushThresholdBytes() * 10;
final int failSyncEvery = 2;
final int failAsyncEvery = 3;
@Cleanup TestContext context = new TestContext(config);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
// Have the writes fail every few attempts with a well known exception.
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setWriteSyncErrorInjector(new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
context.storage.setWriteAsyncErrorInjector(new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Part 1: flush triggered by accumulated size.
int exceptionCount = 0;
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
// Call flush() and inspect the result.
setException.set(null);
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
FlushResult flushResult = null;
try {
flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
Assert.assertNotNull("No FlushResult provided.", flushResult);
} catch (Exception ex) {
if (setException.get() != null) {
Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
exceptionCount++;
} else {
// Not expecting any exception this time.
throw ex;
}
}
// Check flush result.
if (flushResult != null) {
AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
}
}
// Do one last flush at the end to make sure we clear out all the buffers, if there's anything else left.
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
context.storage.setWriteSyncErrorInjector(null);
context.storage.setWriteAsyncErrorInjector(null);
context.segmentAggregator.flush(TIMEOUT).join();
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
AssertExtensions.assertGreaterThan("Not enough errors injected.", 0, exceptionCount);
}
Aggregations