use of io.airlift.units.DataSize in project presto by prestodb.
the class TestResourceGroups method testFairEligibility.
@Test(timeOut = 10_000)
public void testFairEligibility() {
RootInternalResourceGroup root = new RootInternalResourceGroup("root", (group, export) -> {
}, directExecutor());
root.setSoftMemoryLimit(new DataSize(1, MEGABYTE));
root.setMaxQueuedQueries(4);
root.setMaxRunningQueries(1);
InternalResourceGroup group1 = root.getOrCreateSubGroup("1");
group1.setSoftMemoryLimit(new DataSize(1, MEGABYTE));
group1.setMaxQueuedQueries(4);
group1.setMaxRunningQueries(1);
InternalResourceGroup group2 = root.getOrCreateSubGroup("2");
group2.setSoftMemoryLimit(new DataSize(1, MEGABYTE));
group2.setMaxQueuedQueries(4);
group2.setMaxRunningQueries(1);
InternalResourceGroup group3 = root.getOrCreateSubGroup("3");
group3.setSoftMemoryLimit(new DataSize(1, MEGABYTE));
group3.setMaxQueuedQueries(4);
group3.setMaxRunningQueries(1);
MockQueryExecution query1a = new MockQueryExecution(0);
group1.run(query1a);
assertEquals(query1a.getState(), RUNNING);
MockQueryExecution query1b = new MockQueryExecution(0);
group1.run(query1b);
assertEquals(query1b.getState(), QUEUED);
MockQueryExecution query2a = new MockQueryExecution(0);
group2.run(query2a);
assertEquals(query2a.getState(), QUEUED);
MockQueryExecution query2b = new MockQueryExecution(0);
group2.run(query2b);
assertEquals(query2b.getState(), QUEUED);
MockQueryExecution query3a = new MockQueryExecution(0);
group3.run(query3a);
assertEquals(query3a.getState(), QUEUED);
query1a.complete();
root.processQueuedQueries();
// 2a and not 1b should have started, as group1 was not eligible to start a second query
assertEquals(query1b.getState(), QUEUED);
assertEquals(query2a.getState(), RUNNING);
assertEquals(query2b.getState(), QUEUED);
assertEquals(query3a.getState(), QUEUED);
query2a.complete();
root.processQueuedQueries();
assertEquals(query3a.getState(), RUNNING);
assertEquals(query2b.getState(), QUEUED);
assertEquals(query1b.getState(), QUEUED);
query3a.complete();
root.processQueuedQueries();
assertEquals(query1b.getState(), RUNNING);
assertEquals(query2b.getState(), QUEUED);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestResourceGroups method testMemoryLimit.
@Test(timeOut = 10_000)
public void testMemoryLimit() {
RootInternalResourceGroup root = new RootInternalResourceGroup("root", (group, export) -> {
}, directExecutor());
root.setSoftMemoryLimit(new DataSize(1, BYTE));
root.setMaxQueuedQueries(4);
root.setMaxRunningQueries(3);
MockQueryExecution query1 = new MockQueryExecution(1);
root.run(query1);
// Process the group to refresh stats
root.processQueuedQueries();
assertEquals(query1.getState(), RUNNING);
MockQueryExecution query2 = new MockQueryExecution(0);
root.run(query2);
assertEquals(query2.getState(), QUEUED);
MockQueryExecution query3 = new MockQueryExecution(0);
root.run(query3);
assertEquals(query3.getState(), QUEUED);
query1.complete();
root.processQueuedQueries();
assertEquals(query2.getState(), RUNNING);
assertEquals(query3.getState(), RUNNING);
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class TestResourceGroups method testPriorityScheduling.
@Test(timeOut = 10_000)
public void testPriorityScheduling() {
RootInternalResourceGroup root = new RootInternalResourceGroup("root", (group, export) -> {
}, directExecutor());
root.setSoftMemoryLimit(new DataSize(1, MEGABYTE));
root.setMaxQueuedQueries(100);
// Start with zero capacity, so that nothing starts running until we've added all the queries
root.setMaxRunningQueries(0);
root.setSchedulingPolicy(QUERY_PRIORITY);
InternalResourceGroup group1 = root.getOrCreateSubGroup("1");
group1.setSoftMemoryLimit(new DataSize(1, MEGABYTE));
group1.setMaxQueuedQueries(100);
group1.setMaxRunningQueries(1);
InternalResourceGroup group2 = root.getOrCreateSubGroup("2");
group2.setSoftMemoryLimit(new DataSize(1, MEGABYTE));
group2.setMaxQueuedQueries(100);
group2.setMaxRunningQueries(1);
SortedMap<Integer, MockQueryExecution> queries = new TreeMap<>();
Random random = new Random();
for (int i = 0; i < 100; i++) {
int priority;
do {
priority = random.nextInt(1_000_000) + 1;
} while (queries.containsKey(priority));
MockQueryExecution query = new MockQueryExecution(0, "query_id", priority);
if (random.nextBoolean()) {
group1.run(query);
} else {
group2.run(query);
}
queries.put(priority, query);
}
root.setMaxRunningQueries(1);
List<MockQueryExecution> orderedQueries = new ArrayList<>(queries.values());
reverse(orderedQueries);
for (MockQueryExecution query : orderedQueries) {
root.processQueuedQueries();
assertEquals(query.getState(), RUNNING);
query.complete();
}
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class AbstractBenchmark method runBenchmark.
public void runBenchmark(@Nullable BenchmarkResultHook benchmarkResultHook) {
AverageBenchmarkResults averageBenchmarkResults = new AverageBenchmarkResults();
setUp();
try {
for (int i = 0; i < warmupIterations; i++) {
runOnce();
}
for (int i = 0; i < measuredIterations; i++) {
Map<String, Long> results = runOnce();
if (benchmarkResultHook != null) {
benchmarkResultHook.addResults(results);
}
averageBenchmarkResults.addResults(results);
}
} catch (Throwable t) {
throw new RuntimeException("Exception in " + getBenchmarkName(), t);
} finally {
tearDown();
}
if (benchmarkResultHook != null) {
benchmarkResultHook.finished();
}
Map<String, Double> resultsAvg = averageBenchmarkResults.getAverageResultsValues();
Duration cpuNanos = new Duration(resultsAvg.get("cpu_nanos"), NANOSECONDS);
long inputRows = resultsAvg.get("input_rows").longValue();
DataSize inputBytes = new DataSize(resultsAvg.get("input_bytes"), BYTE);
long outputRows = resultsAvg.get("output_rows").longValue();
DataSize outputBytes = new DataSize(resultsAvg.get("output_bytes"), BYTE);
System.out.printf("%35s :: %8.3f cpu ms :: in %5s, %6s, %8s, %8s :: out %5s, %6s, %8s, %8s%n", getBenchmarkName(), cpuNanos.getValue(MILLISECONDS), formatCount(inputRows), formatDataSize(inputBytes, true), formatCountRate(inputRows, cpuNanos, true), formatDataRate(inputBytes, cpuNanos, true), formatCount(outputRows), formatDataSize(outputBytes, true), formatCountRate(outputRows, cpuNanos, true), formatDataRate(outputBytes, cpuNanos, true));
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class AbstractOperatorBenchmark method runOnce.
@Override
protected Map<String, Long> runOnce() {
Session session = testSessionBuilder().setSystemProperty("optimizer.optimize-hash-generation", "true").build();
ExecutorService executor = localQueryRunner.getExecutor();
MemoryPool memoryPool = new MemoryPool(new MemoryPoolId("test"), new DataSize(1, GIGABYTE));
MemoryPool systemMemoryPool = new MemoryPool(new MemoryPoolId("testSystem"), new DataSize(1, GIGABYTE));
TaskContext taskContext = new QueryContext(new QueryId("test"), new DataSize(256, MEGABYTE), memoryPool, systemMemoryPool, executor).addTaskContext(new TaskStateMachine(new TaskId("query", 0, 0), executor), session, false, false);
CpuTimer cpuTimer = new CpuTimer();
execute(taskContext);
CpuDuration executionTime = cpuTimer.elapsedTime();
TaskStats taskStats = taskContext.getTaskStats();
long inputRows = taskStats.getRawInputPositions();
long inputBytes = taskStats.getRawInputDataSize().toBytes();
long outputRows = taskStats.getOutputPositions();
long outputBytes = taskStats.getOutputDataSize().toBytes();
double inputMegaBytes = new DataSize(inputBytes, BYTE).getValue(MEGABYTE);
return ImmutableMap.<String, Long>builder().put("elapsed_millis", executionTime.getWall().toMillis()).put("input_rows_per_second", (long) (inputRows / executionTime.getWall().getValue(SECONDS))).put("output_rows_per_second", (long) (outputRows / executionTime.getWall().getValue(SECONDS))).put("input_megabytes", (long) inputMegaBytes).put("input_megabytes_per_second", (long) (inputMegaBytes / executionTime.getWall().getValue(SECONDS))).put("wall_nanos", executionTime.getWall().roundTo(NANOSECONDS)).put("cpu_nanos", executionTime.getCpu().roundTo(NANOSECONDS)).put("user_nanos", executionTime.getUser().roundTo(NANOSECONDS)).put("input_rows", inputRows).put("input_bytes", inputBytes).put("output_rows", outputRows).put("output_bytes", outputBytes).build();
}
Aggregations