use of java.util.concurrent.TimeUnit.MILLISECONDS in project mule by mulesoft.
the class ExpiryMonitorTestCase method testNotExpiry.
@Test
public void testNotExpiry() throws InterruptedException {
Expirable e = () -> expire();
long startTime = currentTimeMillis();
monitor.addExpirable(EXPIRE_TIME, MILLISECONDS, e);
monitor.run();
assertThat(expired, is(false));
new PollingProber(EXPIRE_TIMEOUT, 50).check(new JUnitLambdaProbe(() -> {
assertThat(monitor.isRegistered(e), is(false));
assertThat(expired, is(true));
return true;
}, ae -> {
ae.printStackTrace();
return "" + currentTimeMillis() + " - " + monitor.toString();
}));
assertThat(expiredTime - startTime, greaterThanOrEqualTo(EXPIRE_TIME - DELTA_TIME));
}
use of java.util.concurrent.TimeUnit.MILLISECONDS in project mule by mulesoft.
the class ExpiryMonitorTestCase method testExpiry.
@Test
public void testExpiry() throws InterruptedException {
Expirable e = () -> expire();
monitor.addExpirable(EXPIRE_TIME, MILLISECONDS, e);
new PollingProber(EXPIRE_TIMEOUT, 50).check(new JUnitLambdaProbe(() -> {
assertThat(monitor.isRegistered(e), is(false));
assertThat(expired, is(true));
return true;
}, ae -> {
ae.printStackTrace();
return "" + currentTimeMillis() + " - " + monitor.toString();
}));
}
use of java.util.concurrent.TimeUnit.MILLISECONDS in project mule by mulesoft.
the class RoutersExecutionTestCase method concurrentRouterExecution.
/**
* Executes the same flow concurrently to check that no race condition exists because
* two different instances of Chain are being used
*/
@Test
public void concurrentRouterExecution() throws Exception {
executor = newFixedThreadPool(2);
final Latch beginLatch = new Latch();
final CountDownLatch assertLatch = new CountDownLatch(2);
final Consumer<Reference<CoreEvent>> runner = reference -> {
try {
beginLatch.await(10000, MILLISECONDS);
reference.set(flowRunner("singleRouteRouter").withPayload("CustomPayload").run());
assertLatch.countDown();
} catch (Exception e) {
fail(e.getMessage());
}
};
final Reference<CoreEvent> first = new Reference<>();
final Reference<CoreEvent> second = new Reference<>();
executor.submit(() -> runner.accept(first));
executor.submit(() -> runner.accept(second));
beginLatch.release();
assertLatch.await(10000, MILLISECONDS);
CoreEvent firstResult = first.get();
assertThat(firstResult, is(notNullValue()));
CoreEvent secondResult = second.get();
assertThat(secondResult, is(notNullValue()));
assertThat(secondResult, is(not(sameInstance(firstResult))));
assertThat(firstResult.getMessage().getPayload().getValue(), is("CustomPayload"));
assertThat(secondResult.getMessage().getPayload().getValue(), is("CustomPayload"));
}
use of java.util.concurrent.TimeUnit.MILLISECONDS in project presto by prestodb.
the class QueryStats method create.
public static QueryStats create(QueryStateTimer queryStateTimer, Optional<StageInfo> rootStage, int peakRunningTasks, DataSize peakUserMemoryReservation, DataSize peakTotalMemoryReservation, DataSize peakTaskUserMemory, DataSize peakTaskTotalMemory, DataSize peakNodeTotalMemory, RuntimeStats runtimeStats) {
int totalTasks = 0;
int runningTasks = 0;
int completedTasks = 0;
int totalDrivers = 0;
int queuedDrivers = 0;
int runningDrivers = 0;
int blockedDrivers = 0;
int completedDrivers = 0;
double cumulativeUserMemory = 0;
double cumulativeTotalMemory = 0;
long userMemoryReservation = 0;
long totalMemoryReservation = 0;
long totalScheduledTime = 0;
long totalCpuTime = 0;
long retriedCpuTime = 0;
long totalBlockedTime = 0;
long totalAllocation = 0;
long rawInputDataSize = 0;
long rawInputPositions = 0;
long processedInputDataSize = 0;
long processedInputPositions = 0;
long outputDataSize = 0;
long outputPositions = 0;
long writtenOutputPositions = 0;
long writtenOutputLogicalDataSize = 0;
long writtenOutputPhysicalDataSize = 0;
long writtenIntermediatePhysicalDataSize = 0;
ImmutableList.Builder<StageGcStatistics> stageGcStatistics = ImmutableList.builder();
boolean fullyBlocked = rootStage.isPresent();
Set<BlockedReason> blockedReasons = new HashSet<>();
ImmutableList.Builder<OperatorStats> operatorStatsSummary = ImmutableList.builder();
boolean completeInfo = true;
RuntimeStats mergedRuntimeStats = RuntimeStats.copyOf(runtimeStats);
for (StageInfo stageInfo : getAllStages(rootStage)) {
StageExecutionStats stageExecutionStats = stageInfo.getLatestAttemptExecutionInfo().getStats();
totalTasks += stageExecutionStats.getTotalTasks();
runningTasks += stageExecutionStats.getRunningTasks();
completedTasks += stageExecutionStats.getCompletedTasks();
totalDrivers += stageExecutionStats.getTotalDrivers();
queuedDrivers += stageExecutionStats.getQueuedDrivers();
runningDrivers += stageExecutionStats.getRunningDrivers();
blockedDrivers += stageExecutionStats.getBlockedDrivers();
completedDrivers += stageExecutionStats.getCompletedDrivers();
cumulativeUserMemory += stageExecutionStats.getCumulativeUserMemory();
cumulativeTotalMemory += stageExecutionStats.getCumulativeTotalMemory();
userMemoryReservation += stageExecutionStats.getUserMemoryReservation().toBytes();
totalMemoryReservation += stageExecutionStats.getTotalMemoryReservation().toBytes();
totalScheduledTime += stageExecutionStats.getTotalScheduledTime().roundTo(MILLISECONDS);
totalCpuTime += stageExecutionStats.getTotalCpuTime().roundTo(MILLISECONDS);
retriedCpuTime += computeRetriedCpuTime(stageInfo);
totalBlockedTime += stageExecutionStats.getTotalBlockedTime().roundTo(MILLISECONDS);
if (!stageInfo.getLatestAttemptExecutionInfo().getState().isDone()) {
fullyBlocked &= stageExecutionStats.isFullyBlocked();
blockedReasons.addAll(stageExecutionStats.getBlockedReasons());
}
totalAllocation += stageExecutionStats.getTotalAllocation().toBytes();
if (stageInfo.getPlan().isPresent()) {
PlanFragment plan = stageInfo.getPlan().get();
if (!plan.getTableScanSchedulingOrder().isEmpty()) {
rawInputDataSize += stageExecutionStats.getRawInputDataSize().toBytes();
rawInputPositions += stageExecutionStats.getRawInputPositions();
processedInputDataSize += stageExecutionStats.getProcessedInputDataSize().toBytes();
processedInputPositions += stageExecutionStats.getProcessedInputPositions();
}
if (plan.isOutputTableWriterFragment()) {
writtenOutputPositions += stageExecutionStats.getOperatorSummaries().stream().filter(stats -> stats.getOperatorType().equals(TableWriterOperator.class.getSimpleName())).mapToLong(OperatorStats::getInputPositions).sum();
writtenOutputLogicalDataSize += stageExecutionStats.getOperatorSummaries().stream().filter(stats -> stats.getOperatorType().equals(TableWriterOperator.class.getSimpleName())).mapToLong(stats -> stats.getInputDataSize().toBytes()).sum();
writtenOutputPhysicalDataSize += stageExecutionStats.getPhysicalWrittenDataSize().toBytes();
} else {
writtenIntermediatePhysicalDataSize += stageExecutionStats.getPhysicalWrittenDataSize().toBytes();
}
}
stageGcStatistics.add(stageExecutionStats.getGcInfo());
completeInfo = completeInfo && stageInfo.isFinalStageInfo();
operatorStatsSummary.addAll(stageExecutionStats.getOperatorSummaries());
// We prepend each metric name with the stage id to avoid merging metrics across stages.
int stageId = stageInfo.getStageId().getId();
stageExecutionStats.getRuntimeStats().getMetrics().forEach((name, metric) -> {
String metricName = String.format("S%d-%s", stageId, name);
mergedRuntimeStats.mergeMetric(metricName, metric);
});
}
if (rootStage.isPresent()) {
StageExecutionStats outputStageStats = rootStage.get().getLatestAttemptExecutionInfo().getStats();
outputDataSize += outputStageStats.getOutputDataSize().toBytes();
outputPositions += outputStageStats.getOutputPositions();
}
return new QueryStats(queryStateTimer.getCreateTime(), queryStateTimer.getExecutionStartTime().orElse(null), queryStateTimer.getLastHeartbeat(), queryStateTimer.getEndTime().orElse(null), queryStateTimer.getElapsedTime(), queryStateTimer.getWaitingForPrerequisitesTime(), queryStateTimer.getQueuedTime(), queryStateTimer.getResourceWaitingTime(), queryStateTimer.getSemanticAnalyzingTime(), queryStateTimer.getColumnAccessPermissionCheckingTime(), queryStateTimer.getDispatchingTime(), queryStateTimer.getExecutionTime(), queryStateTimer.getAnalysisTime(), queryStateTimer.getPlanningTime(), queryStateTimer.getFinishingTime(), totalTasks, runningTasks, peakRunningTasks, completedTasks, totalDrivers, queuedDrivers, runningDrivers, blockedDrivers, completedDrivers, cumulativeUserMemory, cumulativeTotalMemory, succinctBytes(userMemoryReservation), succinctBytes(totalMemoryReservation), peakUserMemoryReservation, peakTotalMemoryReservation, peakTaskUserMemory, peakTaskTotalMemory, peakNodeTotalMemory, isScheduled(rootStage), succinctDuration(totalScheduledTime, MILLISECONDS), succinctDuration(totalCpuTime, MILLISECONDS), succinctDuration(retriedCpuTime, MILLISECONDS), succinctDuration(totalBlockedTime, MILLISECONDS), fullyBlocked, blockedReasons, succinctBytes(totalAllocation), succinctBytes(rawInputDataSize), rawInputPositions, succinctBytes(processedInputDataSize), processedInputPositions, succinctBytes(outputDataSize), outputPositions, writtenOutputPositions, succinctBytes(writtenOutputLogicalDataSize), succinctBytes(writtenOutputPhysicalDataSize), succinctBytes(writtenIntermediatePhysicalDataSize), stageGcStatistics.build(), operatorStatsSummary.build(), mergedRuntimeStats);
}
use of java.util.concurrent.TimeUnit.MILLISECONDS in project presto by prestodb.
the class IcebergModule method createStripeMetadataSourceFactory.
@Singleton
@Provides
public StripeMetadataSourceFactory createStripeMetadataSourceFactory(OrcCacheConfig orcCacheConfig, MBeanExporter exporter) {
StripeMetadataSource stripeMetadataSource = new StorageStripeMetadataSource();
if (orcCacheConfig.isStripeMetadataCacheEnabled()) {
Cache<StripeReader.StripeId, Slice> footerCache = CacheBuilder.newBuilder().maximumWeight(orcCacheConfig.getStripeFooterCacheSize().toBytes()).weigher((id, footer) -> toIntExact(((Slice) footer).getRetainedSize())).expireAfterAccess(orcCacheConfig.getStripeFooterCacheTtlSinceLastAccess().toMillis(), MILLISECONDS).recordStats().build();
Cache<StripeReader.StripeStreamId, Slice> streamCache = CacheBuilder.newBuilder().maximumWeight(orcCacheConfig.getStripeStreamCacheSize().toBytes()).weigher((id, stream) -> toIntExact(((Slice) stream).getRetainedSize())).expireAfterAccess(orcCacheConfig.getStripeStreamCacheTtlSinceLastAccess().toMillis(), MILLISECONDS).recordStats().build();
CacheStatsMBean footerCacheStatsMBean = new CacheStatsMBean(footerCache);
CacheStatsMBean streamCacheStatsMBean = new CacheStatsMBean(streamCache);
exporter.export(generatedNameOf(CacheStatsMBean.class, connectorId + "_StripeFooter"), footerCacheStatsMBean);
exporter.export(generatedNameOf(CacheStatsMBean.class, connectorId + "_StripeStream"), streamCacheStatsMBean);
Optional<Cache<StripeReader.StripeStreamId, List<RowGroupIndex>>> rowGroupIndexCache = Optional.empty();
if (orcCacheConfig.isRowGroupIndexCacheEnabled()) {
rowGroupIndexCache = Optional.of(CacheBuilder.newBuilder().maximumWeight(orcCacheConfig.getRowGroupIndexCacheSize().toBytes()).weigher((id, rowGroupIndices) -> toIntExact(((List<RowGroupIndex>) rowGroupIndices).stream().mapToLong(RowGroupIndex::getRetainedSizeInBytes).sum())).expireAfterAccess(orcCacheConfig.getStripeStreamCacheTtlSinceLastAccess().toMillis(), MILLISECONDS).recordStats().build());
CacheStatsMBean rowGroupIndexCacheStatsMBean = new CacheStatsMBean(rowGroupIndexCache.get());
exporter.export(generatedNameOf(CacheStatsMBean.class, connectorId + "_StripeStreamRowGroupIndex"), rowGroupIndexCacheStatsMBean);
}
stripeMetadataSource = new CachingStripeMetadataSource(stripeMetadataSource, footerCache, streamCache, rowGroupIndexCache);
}
StripeMetadataSourceFactory factory = StripeMetadataSourceFactory.of(stripeMetadataSource);
if (orcCacheConfig.isDwrfStripeCacheEnabled()) {
factory = new DwrfAwareStripeMetadataSourceFactory(factory);
}
return factory;
}
Aggregations