use of io.prestosql.spi.memory.MemoryPoolId in project hetu-core by openlookeng.
the class ClusterMemoryManager method getUsedMemory.
public synchronized Long getUsedMemory() {
Long usedMemory = 0L;
for (Entry<String, RemoteNodeMemory> entry : nodes.entrySet()) {
MemoryInfo memoryInfo = entry.getValue().getInfo().orElse(new MemoryInfo(0, 0, 0, new DataSize(0, DataSize.Unit.BYTE), new HashMap<>()));
Map<MemoryPoolId, MemoryPoolInfo> memoryPools = memoryInfo.getPools();
MemoryPoolId general = new MemoryPoolId("general");
MemoryPoolId reserved = new MemoryPoolId("reserved");
if (memoryPools.containsKey(general)) {
usedMemory += memoryPools.get(general).getReservedBytes();
}
if (memoryPools.containsKey(reserved)) {
usedMemory += memoryPools.get(reserved).getReservedBytes();
}
}
return usedMemory;
}
use of io.prestosql.spi.memory.MemoryPoolId in project hetu-core by openlookeng.
the class GroupByHashYieldAssertion method finishOperatorWithYieldingGroupByHash.
/**
* @param operatorFactory creates an Operator that should directly or indirectly contain GroupByHash
* @param getHashCapacity returns the hash table capacity for the input operator
* @param additionalMemoryInBytes the memory used in addition to the GroupByHash in the operator (e.g., aggregator)
*/
public static GroupByHashYieldResult finishOperatorWithYieldingGroupByHash(List<Page> input, Type hashKeyType, OperatorFactory operatorFactory, Function<Operator, Integer> getHashCapacity, long additionalMemoryInBytes) {
assertLessThan(additionalMemoryInBytes, 1L << 21, "additionalMemoryInBytes should be a relatively small number");
List<Page> result = new LinkedList<>();
// mock an adjustable memory pool
QueryId queryId = new QueryId("test_query");
MemoryPool memoryPool = new MemoryPool(new MemoryPoolId("test"), new DataSize(1, GIGABYTE));
QueryContext queryContext = new QueryContext(queryId, new DataSize(512, MEGABYTE), new DataSize(1024, MEGABYTE), memoryPool, new TestingGcMonitor(), EXECUTOR, SCHEDULED_EXECUTOR, new DataSize(512, MEGABYTE), new SpillSpaceTracker(new DataSize(512, MEGABYTE)), NOOP_SNAPSHOT_UTILS);
DriverContext driverContext = createTaskContext(queryContext, EXECUTOR, TEST_SESSION).addPipelineContext(0, true, true, false).addDriverContext();
Operator operator = operatorFactory.createOperator(driverContext);
// run operator
int yieldCount = 0;
long expectedReservedExtraBytes = 0;
for (Page page : input) {
// unblocked
assertTrue(operator.needsInput());
// saturate the pool with a tiny memory left
long reservedMemoryInBytes = memoryPool.getFreeBytes() - additionalMemoryInBytes;
memoryPool.reserve(queryId, "test", reservedMemoryInBytes);
long oldMemoryUsage = operator.getOperatorContext().getDriverContext().getMemoryUsage();
int oldCapacity = getHashCapacity.apply(operator);
// add a page and verify different behaviors
operator.addInput(page);
// get output to consume the input
Page output = operator.getOutput();
if (output != null) {
result.add(output);
}
long newMemoryUsage = operator.getOperatorContext().getDriverContext().getMemoryUsage();
// between rehash and memory used by aggregator
if (newMemoryUsage < new DataSize(4, MEGABYTE).toBytes()) {
// free the pool for the next iteration
memoryPool.free(queryId, "test", reservedMemoryInBytes);
// this required in case input is blocked
operator.getOutput();
continue;
}
long actualIncreasedMemory = newMemoryUsage - oldMemoryUsage;
if (operator.needsInput()) {
// We have successfully added a page
// Assert we are not blocked
assertTrue(operator.getOperatorContext().isWaitingForMemory().isDone());
// assert the hash capacity is not changed; otherwise, we should have yielded
assertTrue(oldCapacity == getHashCapacity.apply(operator));
// We are not going to rehash; therefore, assert the memory increase only comes from the aggregator
assertLessThan(actualIncreasedMemory, additionalMemoryInBytes);
// free the pool for the next iteration
memoryPool.free(queryId, "test", reservedMemoryInBytes);
} else {
// We failed to finish the page processing i.e. we yielded
yieldCount++;
// Assert we are blocked
assertFalse(operator.getOperatorContext().isWaitingForMemory().isDone());
// Hash table capacity should not change
assertEquals(oldCapacity, (long) getHashCapacity.apply(operator));
// Increased memory is no smaller than the hash table size and no greater than the hash table size + the memory used by aggregator
if (hashKeyType == BIGINT) {
// groupIds and values double by hashCapacity; while valuesByGroupId double by maxFill = hashCapacity / 0.75
expectedReservedExtraBytes = oldCapacity * (long) (Long.BYTES * 1.75 + Integer.BYTES) + page.getRetainedSizeInBytes();
} else {
// groupAddressByHash, groupIdsByHash, and rawHashByHashPosition double by hashCapacity; while groupAddressByGroupId double by maxFill = hashCapacity / 0.75
expectedReservedExtraBytes = oldCapacity * (long) (Long.BYTES * 1.75 + Integer.BYTES + Byte.BYTES) + page.getRetainedSizeInBytes();
}
assertBetweenInclusive(actualIncreasedMemory, expectedReservedExtraBytes, expectedReservedExtraBytes + additionalMemoryInBytes);
// Output should be blocked as well
assertNull(operator.getOutput());
// Free the pool to unblock
memoryPool.free(queryId, "test", reservedMemoryInBytes);
// Trigger a process through getOutput() or needsInput()
output = operator.getOutput();
if (output != null) {
result.add(output);
}
assertTrue(operator.needsInput());
// Hash table capacity has increased
assertGreaterThan(getHashCapacity.apply(operator), oldCapacity);
// Assert the estimated reserved memory before rehash is very close to the one after rehash
long rehashedMemoryUsage = operator.getOperatorContext().getDriverContext().getMemoryUsage();
assertBetweenInclusive(rehashedMemoryUsage * 1.0 / newMemoryUsage, 0.99, 1.01);
// unblocked
assertTrue(operator.needsInput());
}
}
result.addAll(finishOperator(operator));
return new GroupByHashYieldResult(yieldCount, expectedReservedExtraBytes, result);
}
use of io.prestosql.spi.memory.MemoryPoolId in project hetu-core by openlookeng.
the class AbstractOperatorBenchmark method runOnce.
@Override
protected Map<String, Long> runOnce() {
Session setSession = testSessionBuilder().setSystemProperty("optimizer.optimize-hash-generation", "true").build();
MemoryPool memoryPool = new MemoryPool(new MemoryPoolId("test"), new DataSize(1, GIGABYTE));
SpillSpaceTracker spillSpaceTracker = new SpillSpaceTracker(new DataSize(1, GIGABYTE));
TaskContext taskContext = new QueryContext(new QueryId("test"), new DataSize(256, MEGABYTE), new DataSize(512, MEGABYTE), memoryPool, new TestingGcMonitor(), localQueryRunner.getExecutor(), localQueryRunner.getScheduler(), new DataSize(256, MEGABYTE), spillSpaceTracker, NOOP_SNAPSHOT_UTILS).addTaskContext(new TaskStateMachine(new TaskId("query", 0, 0), localQueryRunner.getExecutor()), setSession, false, false, OptionalInt.empty(), Optional.empty(), TESTING_SERDE_FACTORY);
CpuTimer cpuTimer = new CpuTimer();
Map<String, Long> executionStats = execute(taskContext);
CpuDuration executionTime = cpuTimer.elapsedTime();
TaskStats taskStats = taskContext.getTaskStats();
long inputRows = taskStats.getRawInputPositions();
long inputBytes = taskStats.getRawInputDataSize().toBytes();
long outputRows = taskStats.getOutputPositions();
long outputBytes = taskStats.getOutputDataSize().toBytes();
double inputMegaBytes = new DataSize(inputBytes, BYTE).getValue(MEGABYTE);
return ImmutableMap.<String, Long>builder().putAll(executionStats).put("elapsed_millis", executionTime.getWall().toMillis()).put("input_rows_per_second", (long) (inputRows / executionTime.getWall().getValue(SECONDS))).put("output_rows_per_second", (long) (outputRows / executionTime.getWall().getValue(SECONDS))).put("input_megabytes", (long) inputMegaBytes).put("input_megabytes_per_second", (long) (inputMegaBytes / executionTime.getWall().getValue(SECONDS))).put("wall_nanos", executionTime.getWall().roundTo(NANOSECONDS)).put("cpu_nanos", executionTime.getCpu().roundTo(NANOSECONDS)).put("user_nanos", executionTime.getUser().roundTo(NANOSECONDS)).put("input_rows", inputRows).put("input_bytes", inputBytes).put("output_rows", outputRows).put("output_bytes", outputBytes).build();
}
use of io.prestosql.spi.memory.MemoryPoolId in project hetu-core by openlookeng.
the class TestMemoryPools method testMoveUnknownQuery.
@Test
public void testMoveUnknownQuery() {
QueryId testQuery = new QueryId("test_query");
MemoryPool pool1 = new MemoryPool(new MemoryPoolId("test"), new DataSize(1000, BYTE));
MemoryPool pool2 = new MemoryPool(new MemoryPoolId("test"), new DataSize(1000, BYTE));
assertNull(pool1.getTaggedMemoryAllocations().get(testQuery));
pool1.moveQuery(testQuery, pool2);
assertNull(pool1.getTaggedMemoryAllocations().get(testQuery));
assertNull(pool2.getTaggedMemoryAllocations().get(testQuery));
}
use of io.prestosql.spi.memory.MemoryPoolId in project hetu-core by openlookeng.
the class TestMemoryPools method setUp.
private void setUp(Supplier<List<Driver>> driversSupplier) {
checkState(localQueryRunner == null, "Already set up");
Session session = testSessionBuilder().setCatalog("tpch").setSchema("tiny").setSystemProperty("task_default_concurrency", "1").build();
localQueryRunner = queryRunnerWithInitialTransaction(session);
// add tpch
localQueryRunner.createCatalog("tpch", new TpchConnectorFactory(1), ImmutableMap.of());
userPool = new MemoryPool(new MemoryPoolId("test"), TEN_MEGABYTES);
fakeQueryId = new QueryId("fake");
SpillSpaceTracker spillSpaceTracker = new SpillSpaceTracker(new DataSize(1, GIGABYTE));
QueryContext queryContext = new QueryContext(new QueryId("query"), TEN_MEGABYTES, new DataSize(20, MEGABYTE), userPool, new TestingGcMonitor(), localQueryRunner.getExecutor(), localQueryRunner.getScheduler(), TEN_MEGABYTES, spillSpaceTracker, NOOP_SNAPSHOT_UTILS);
taskContext = createTaskContext(queryContext, localQueryRunner.getExecutor(), session);
drivers = driversSupplier.get();
}
Aggregations