use of java.util.concurrent.TimeUnit.MILLISECONDS in project mule by mulesoft.
the class ExtensionNotificationsTestCase method sourceFiresNotificationsOnBackPressure.
@Test
public void sourceFiresNotificationsOnBackPressure() throws Exception {
Latch latch = new Latch();
String batchFailed = "BATCH_FAILED";
setUpListener(notification -> {
if (batchFailed.equals(notification.getAction().getIdentifier())) {
latch.release();
}
});
Flow flow = (Flow) getFlowConstruct("sourceNotificationsBackPressure");
flow.start();
latch.await(10000, MILLISECONDS);
flow.stop();
assertThat(listener.getNotifications(), hasSize(greaterThan(3)));
// Find first BATCH_FAILED
ExtensionNotification backPressureNotification = listener.getNotifications().stream().filter(n -> batchFailed.equals(n.getAction().getIdentifier())).findFirst().get();
// Find matching event notifications
List<ExtensionNotification> notifications = listener.getNotifications().stream().filter(n -> backPressureNotification.getEvent().getCorrelationId().equals(n.getEvent().getCorrelationId())).collect(toList());
assertThat(notifications, hasSize(4));
int batchNumber = (Integer) backPressureNotification.getData().getValue();
ExtensionNotification notification1 = notifications.get(0);
verifyNewBatch(notification1, batchNumber);
ExtensionNotification notification2 = notifications.get(1);
verifyNextBatch(notification2, 10L);
ExtensionNotification notification3 = notifications.get(2);
verifyNotificationAndValue(notification3, batchFailed, batchNumber);
ExtensionNotification notification4 = notifications.get(3);
verifyBatchTerminated(notification4, batchNumber);
}
use of java.util.concurrent.TimeUnit.MILLISECONDS in project mule by mulesoft.
the class HeisenbergRouters method concurrentRouteExecutor.
public void concurrentRouteExecutor(WhenRoute when, RouterCompletionCallback callback) {
Consumer<Chain> processor = (chain) -> {
final Latch latch = new Latch();
chain.process((result -> latch.release()), (error, result) -> latch.release());
try {
latch.await(10000, MILLISECONDS);
} catch (Exception e) {
throw new RuntimeException(e);
}
};
Thread first = new Thread(() -> processor.accept(when.getChain()));
Thread second = new Thread(() -> processor.accept(when.getChain()));
first.start();
second.start();
try {
first.join();
second.join();
} catch (Exception e) {
callback.error(e);
}
callback.success(Result.builder().output("SUCCESS").build());
}
use of java.util.concurrent.TimeUnit.MILLISECONDS in project presto by prestodb.
the class AbstractTestParquetReader method testCaching.
@Test
public void testCaching() throws Exception {
Cache<ParquetDataSourceId, ParquetFileMetadata> parquetFileMetadataCache = CacheBuilder.newBuilder().maximumWeight(new DataSize(1, MEGABYTE).toBytes()).weigher((id, metadata) -> ((ParquetFileMetadata) metadata).getMetadataSize()).expireAfterAccess(new Duration(10, MINUTES).toMillis(), MILLISECONDS).recordStats().build();
ParquetMetadataSource parquetMetadataSource = new CachingParquetMetadataSource(parquetFileMetadataCache, new MetadataReader());
try (ParquetTester.TempFile tempFile = new ParquetTester.TempFile("test", "parquet")) {
Iterable<Integer> values = intsBetween(0, 10);
Iterator<?>[] readValues = stream(new Iterable<?>[] { values }).map(Iterable::iterator).toArray(size -> new Iterator<?>[size]);
List<String> columnNames = singletonList("column1");
List<Type> columnTypes = singletonList(INTEGER);
writeParquetFileFromPresto(tempFile.getFile(), columnTypes, columnNames, readValues, 10, CompressionCodecName.GZIP);
testSingleRead(new Iterable<?>[] { values }, columnNames, columnTypes, parquetMetadataSource, tempFile.getFile());
assertEquals(parquetFileMetadataCache.stats().missCount(), 1);
assertEquals(parquetFileMetadataCache.stats().hitCount(), 0);
testSingleRead(new Iterable<?>[] { values }, columnNames, columnTypes, parquetMetadataSource, tempFile.getFile());
assertEquals(parquetFileMetadataCache.stats().missCount(), 1);
assertEquals(parquetFileMetadataCache.stats().hitCount(), 1);
testSingleRead(new Iterable<?>[] { values }, columnNames, columnTypes, parquetMetadataSource, tempFile.getFile());
assertEquals(parquetFileMetadataCache.stats().missCount(), 1);
assertEquals(parquetFileMetadataCache.stats().hitCount(), 2);
parquetFileMetadataCache.invalidateAll();
testSingleRead(new Iterable<?>[] { values }, columnNames, columnTypes, parquetMetadataSource, tempFile.getFile());
assertEquals(parquetFileMetadataCache.stats().missCount(), 2);
assertEquals(parquetFileMetadataCache.stats().hitCount(), 2);
testSingleRead(new Iterable<?>[] { values }, columnNames, columnTypes, parquetMetadataSource, tempFile.getFile());
assertEquals(parquetFileMetadataCache.stats().missCount(), 2);
assertEquals(parquetFileMetadataCache.stats().hitCount(), 3);
}
}
use of java.util.concurrent.TimeUnit.MILLISECONDS in project presto by prestodb.
the class TestExchangeClient method testInitialRequestLimit.
@Test
public void testInitialRequestLimit() {
DataSize bufferCapacity = new DataSize(16, MEGABYTE);
DataSize maxResponseSize = new DataSize(DEFAULT_MAX_PAGE_SIZE_IN_BYTES, BYTE);
CountDownLatch countDownLatch = new CountDownLatch(1);
MockExchangeRequestProcessor processor = new MockExchangeRequestProcessor(maxResponseSize) {
@Override
public Response handle(Request request) {
if (!awaitUninterruptibly(countDownLatch, 10, SECONDS)) {
throw new UncheckedTimeoutException();
}
return super.handle(request);
}
};
List<URI> locations = new ArrayList<>();
int numLocations = 16;
List<DataSize> expectedMaxSizes = new ArrayList<>();
// add pages
for (int i = 0; i < numLocations; i++) {
URI location = URI.create("http://localhost:" + (8080 + i));
locations.add(location);
processor.addPage(location, createPage(DEFAULT_MAX_PAGE_SIZE_IN_BYTES));
processor.addPage(location, createPage(DEFAULT_MAX_PAGE_SIZE_IN_BYTES));
processor.addPage(location, createPage(DEFAULT_MAX_PAGE_SIZE_IN_BYTES));
processor.setComplete(location);
expectedMaxSizes.add(maxResponseSize);
}
try (ExchangeClient exchangeClient = createExchangeClient(processor, bufferCapacity, maxResponseSize)) {
for (int i = 0; i < numLocations; i++) {
exchangeClient.addLocation(locations.get(i), TaskId.valueOf("taskid.0.0." + i));
}
exchangeClient.noMoreLocations();
assertFalse(exchangeClient.isClosed());
long start = System.nanoTime();
countDownLatch.countDown();
// wait for a page to be fetched
do {
// there is no thread coordination here, so sleep is the best we can do
assertLessThan(Duration.nanosSince(start), new Duration(5, TimeUnit.SECONDS));
sleepUninterruptibly(100, MILLISECONDS);
} while (exchangeClient.getStatus().getBufferedPages() < 16);
// Client should have sent 16 requests for a single page (0) and gotten them back
// The memory limit should be hit immediately and then it doesn't fetch the third page from each
assertEquals(exchangeClient.getStatus().getBufferedPages(), 16);
assertTrue(exchangeClient.getStatus().getBufferedBytes() > 0);
List<PageBufferClientStatus> pageBufferClientStatuses = exchangeClient.getStatus().getPageBufferClientStatuses();
assertEquals(16, pageBufferClientStatuses.stream().filter(status -> status.getPagesReceived() == 1).mapToInt(PageBufferClientStatus::getPagesReceived).sum());
assertEquals(processor.getRequestMaxSizes(), expectedMaxSizes);
for (int i = 0; i < numLocations * 3; i++) {
assertNotNull(getNextPage(exchangeClient));
}
do {
// there is no thread coordination here, so sleep is the best we can do
assertLessThan(Duration.nanosSince(start), new Duration(5, TimeUnit.SECONDS));
sleepUninterruptibly(100, MILLISECONDS);
} while (processor.getRequestMaxSizes().size() < 64);
for (int i = 0; i < 48; i++) {
expectedMaxSizes.add(maxResponseSize);
}
assertEquals(processor.getRequestMaxSizes(), expectedMaxSizes);
}
}
use of java.util.concurrent.TimeUnit.MILLISECONDS in project presto by prestodb.
the class AbstractTestOrcReader method testCaching.
@Test
public void testCaching() throws Exception {
Cache<OrcDataSourceId, OrcFileTail> orcFileTailCache = CacheBuilder.newBuilder().maximumWeight(new DataSize(1, MEGABYTE).toBytes()).weigher((id, tail) -> ((OrcFileTail) tail).getFooterSize() + ((OrcFileTail) tail).getMetadataSize()).expireAfterAccess(new Duration(10, MINUTES).toMillis(), MILLISECONDS).recordStats().build();
OrcFileTailSource orcFileTailSource = new CachingOrcFileTailSource(new StorageOrcFileTailSource(), orcFileTailCache);
Cache<StripeId, Slice> stripeFootercache = CacheBuilder.newBuilder().maximumWeight(new DataSize(1, MEGABYTE).toBytes()).weigher((id, footer) -> ((Slice) footer).length()).expireAfterAccess(new Duration(10, MINUTES).toMillis(), MILLISECONDS).recordStats().build();
Cache<StripeStreamId, Slice> stripeStreamCache = CacheBuilder.newBuilder().maximumWeight(new DataSize(1, MEGABYTE).toBytes()).weigher((id, stream) -> ((Slice) stream).length()).expireAfterAccess(new Duration(10, MINUTES).toMillis(), MILLISECONDS).recordStats().build();
Optional<Cache<StripeStreamId, List<RowGroupIndex>>> rowGroupIndexCache = Optional.of(CacheBuilder.newBuilder().maximumWeight(new DataSize(1, MEGABYTE).toBytes()).weigher((id, rowGroupIndices) -> toIntExact(((List<RowGroupIndex>) rowGroupIndices).stream().mapToLong(RowGroupIndex::getRetainedSizeInBytes).sum())).expireAfterAccess(new Duration(10, MINUTES).toMillis(), MILLISECONDS).recordStats().build());
StripeMetadataSource stripeMetadataSource = new CachingStripeMetadataSource(new StorageStripeMetadataSource(), stripeFootercache, stripeStreamCache, rowGroupIndexCache);
try (TempFile tempFile = createTempFile(10001)) {
OrcBatchRecordReader storageReader = createCustomOrcRecordReader(tempFile, ORC, OrcPredicate.TRUE, ImmutableList.of(BIGINT), INITIAL_BATCH_SIZE, orcFileTailSource, stripeMetadataSource, true, ImmutableMap.of(), false);
assertEquals(orcFileTailCache.stats().missCount(), 1);
assertEquals(orcFileTailCache.stats().hitCount(), 0);
OrcBatchRecordReader cacheReader = createCustomOrcRecordReader(tempFile, ORC, OrcPredicate.TRUE, ImmutableList.of(BIGINT), INITIAL_BATCH_SIZE, orcFileTailSource, stripeMetadataSource, true, ImmutableMap.of(), false);
assertEquals(orcFileTailCache.stats().missCount(), 1);
assertEquals(orcFileTailCache.stats().hitCount(), 1);
assertEquals(storageReader.getRetainedSizeInBytes(), cacheReader.getRetainedSizeInBytes());
assertEquals(storageReader.getFileRowCount(), cacheReader.getFileRowCount());
assertEquals(storageReader.getSplitLength(), cacheReader.getSplitLength());
storageReader.nextBatch();
assertEquals(stripeFootercache.stats().missCount(), 1);
assertEquals(stripeFootercache.stats().hitCount(), 0);
assertEquals(stripeStreamCache.stats().missCount(), 2);
assertEquals(stripeStreamCache.stats().hitCount(), 0);
assertEquals(rowGroupIndexCache.get().stats().missCount(), 1);
assertEquals(rowGroupIndexCache.get().stats().hitCount(), 0);
cacheReader.nextBatch();
assertEquals(stripeFootercache.stats().missCount(), 1);
assertEquals(stripeFootercache.stats().hitCount(), 1);
assertEquals(stripeStreamCache.stats().missCount(), 2);
assertEquals(stripeStreamCache.stats().hitCount(), 2);
assertEquals(rowGroupIndexCache.get().stats().missCount(), 1);
assertEquals(rowGroupIndexCache.get().stats().hitCount(), 1);
assertEquals(storageReader.readBlock(0).getInt(0), cacheReader.readBlock(0).getInt(0));
}
}
Aggregations