use of io.prestosql.spiller.SpillSpaceTracker in project hetu-core by openlookeng.
the class TestQueryContext method testSetMemoryPool.
@Test(dataProvider = "testSetMemoryPoolOptions")
public void testSetMemoryPool(boolean useReservedPool) {
QueryId secondQuery = new QueryId("second");
MemoryPool reservedPool = new MemoryPool(RESERVED_POOL, new DataSize(10, BYTE));
long secondQueryMemory = reservedPool.getMaxBytes() - 1;
if (useReservedPool) {
assertTrue(reservedPool.reserve(secondQuery, "test", secondQueryMemory).isDone());
}
try (LocalQueryRunner localQueryRunner = new LocalQueryRunner(TEST_SESSION)) {
QueryContext queryContext = new QueryContext(new QueryId("query"), new DataSize(10, BYTE), new DataSize(20, BYTE), new MemoryPool(GENERAL_POOL, new DataSize(10, BYTE)), new TestingGcMonitor(), localQueryRunner.getExecutor(), localQueryRunner.getScheduler(), new DataSize(0, BYTE), new SpillSpaceTracker(new DataSize(0, BYTE)), NOOP_SNAPSHOT_UTILS);
// Use memory
queryContext.getQueryMemoryContext().initializeLocalMemoryContexts("test");
LocalMemoryContext userMemoryContext = queryContext.getQueryMemoryContext().localUserMemoryContext();
LocalMemoryContext revocableMemoryContext = queryContext.getQueryMemoryContext().localRevocableMemoryContext();
assertTrue(userMemoryContext.setBytes(3).isDone());
assertTrue(revocableMemoryContext.setBytes(5).isDone());
queryContext.setMemoryPool(reservedPool);
if (useReservedPool) {
reservedPool.free(secondQuery, "test", secondQueryMemory);
}
// Free memory
userMemoryContext.close();
revocableMemoryContext.close();
}
}
use of io.prestosql.spiller.SpillSpaceTracker in project hetu-core by openlookeng.
the class MemoryLocalQueryRunner method execute.
public List<Page> execute(@Language("SQL") String query) {
MemoryPool memoryPool = new MemoryPool(new MemoryPoolId("test"), new DataSize(2, GIGABYTE));
SpillSpaceTracker spillSpaceTracker = new SpillSpaceTracker(new DataSize(1, GIGABYTE));
QueryContext queryContext = new QueryContext(new QueryId("test"), new DataSize(1, GIGABYTE), new DataSize(2, GIGABYTE), memoryPool, new TestingGcMonitor(), localQueryRunner.getExecutor(), localQueryRunner.getScheduler(), new DataSize(4, GIGABYTE), spillSpaceTracker, NOOP_SNAPSHOT_UTILS);
TaskContext taskContext = queryContext.addTaskContext(new TaskStateMachine(new TaskId("query", 0, 0), localQueryRunner.getExecutor()), localQueryRunner.getDefaultSession(), false, false, OptionalInt.empty(), Optional.empty(), TESTING_SERDE_FACTORY);
// Use NullOutputFactory to avoid coping out results to avoid affecting benchmark results
ImmutableList.Builder<Page> output = ImmutableList.builder();
List<Driver> drivers = localQueryRunner.createDrivers(query, new PageConsumerOperator.PageConsumerOutputFactory(types -> output::add), taskContext);
boolean done = false;
while (!done) {
boolean processed = false;
for (Driver driver : drivers) {
if (!driver.isFinished()) {
driver.process();
processed = true;
}
}
done = !processed;
}
return output.build();
}
use of io.prestosql.spiller.SpillSpaceTracker in project hetu-core by openlookeng.
the class TestSqlTask method createInitialTask.
private SqlTask createInitialTask() {
TaskId taskId = new TaskId("query", 0, nextTaskId.incrementAndGet());
String instanceId = "0-query_test_instance_id";
URI location = URI.create("fake://task/" + taskId);
QueryContext queryContext = new QueryContext(new QueryId("query"), new DataSize(1, MEGABYTE), new DataSize(2, MEGABYTE), new MemoryPool(new MemoryPoolId("test"), new DataSize(1, GIGABYTE)), new TestingGcMonitor(), taskNotificationExecutor, driverYieldExecutor, new DataSize(1, MEGABYTE), new SpillSpaceTracker(new DataSize(1, GIGABYTE)), NOOP_SNAPSHOT_UTILS);
queryContext.addTaskContext(new TaskStateMachine(taskId, taskNotificationExecutor), testSessionBuilder().build(), false, false, OptionalInt.empty(), Optional.empty(), TESTING_SERDE_FACTORY);
return createSqlTask(taskId, instanceId, location, "fake", queryContext, sqlTaskExecutionFactory, taskNotificationExecutor, Functions.identity(), new DataSize(32, MEGABYTE), new CounterStat(), createTestMetadataManager());
}
use of io.prestosql.spiller.SpillSpaceTracker in project hetu-core by openlookeng.
the class BufferTestUtils method newTestingTaskContext.
static TaskContext newTestingTaskContext() {
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
QueryContext queryContext = new QueryContext(new QueryId("query_id"), new DataSize(1, MEGABYTE), new DataSize(2, MEGABYTE), new MemoryPool(new MemoryPoolId("test"), new DataSize(1, GIGABYTE)), new TestingGcMonitor(), executor, executor, new DataSize(1, MEGABYTE), new SpillSpaceTracker(new DataSize(1, GIGABYTE)), NOOP_SNAPSHOT_UTILS);
TaskId taskId = TaskId.valueOf("query_id.1.2");
TaskContext taskContext = queryContext.addTaskContext(new TaskStateMachine(taskId, executor), TEST_SNAPSHOT_SESSION, false, false, OptionalInt.empty(), Optional.empty(), TESTING_SERDE_FACTORY);
taskContext.getSnapshotManager().setTotalComponents(100);
return taskContext;
}
use of io.prestosql.spiller.SpillSpaceTracker in project hetu-core by openlookeng.
the class GroupByHashYieldAssertion method finishOperatorWithYieldingGroupByHash.
/**
* @param operatorFactory creates an Operator that should directly or indirectly contain GroupByHash
* @param getHashCapacity returns the hash table capacity for the input operator
* @param additionalMemoryInBytes the memory used in addition to the GroupByHash in the operator (e.g., aggregator)
*/
public static GroupByHashYieldResult finishOperatorWithYieldingGroupByHash(List<Page> input, Type hashKeyType, OperatorFactory operatorFactory, Function<Operator, Integer> getHashCapacity, long additionalMemoryInBytes) {
assertLessThan(additionalMemoryInBytes, 1L << 21, "additionalMemoryInBytes should be a relatively small number");
List<Page> result = new LinkedList<>();
// mock an adjustable memory pool
QueryId queryId = new QueryId("test_query");
MemoryPool memoryPool = new MemoryPool(new MemoryPoolId("test"), new DataSize(1, GIGABYTE));
QueryContext queryContext = new QueryContext(queryId, new DataSize(512, MEGABYTE), new DataSize(1024, MEGABYTE), memoryPool, new TestingGcMonitor(), EXECUTOR, SCHEDULED_EXECUTOR, new DataSize(512, MEGABYTE), new SpillSpaceTracker(new DataSize(512, MEGABYTE)), NOOP_SNAPSHOT_UTILS);
DriverContext driverContext = createTaskContext(queryContext, EXECUTOR, TEST_SESSION).addPipelineContext(0, true, true, false).addDriverContext();
Operator operator = operatorFactory.createOperator(driverContext);
// run operator
int yieldCount = 0;
long expectedReservedExtraBytes = 0;
for (Page page : input) {
// unblocked
assertTrue(operator.needsInput());
// saturate the pool with a tiny memory left
long reservedMemoryInBytes = memoryPool.getFreeBytes() - additionalMemoryInBytes;
memoryPool.reserve(queryId, "test", reservedMemoryInBytes);
long oldMemoryUsage = operator.getOperatorContext().getDriverContext().getMemoryUsage();
int oldCapacity = getHashCapacity.apply(operator);
// add a page and verify different behaviors
operator.addInput(page);
// get output to consume the input
Page output = operator.getOutput();
if (output != null) {
result.add(output);
}
long newMemoryUsage = operator.getOperatorContext().getDriverContext().getMemoryUsage();
// between rehash and memory used by aggregator
if (newMemoryUsage < new DataSize(4, MEGABYTE).toBytes()) {
// free the pool for the next iteration
memoryPool.free(queryId, "test", reservedMemoryInBytes);
// this required in case input is blocked
operator.getOutput();
continue;
}
long actualIncreasedMemory = newMemoryUsage - oldMemoryUsage;
if (operator.needsInput()) {
// We have successfully added a page
// Assert we are not blocked
assertTrue(operator.getOperatorContext().isWaitingForMemory().isDone());
// assert the hash capacity is not changed; otherwise, we should have yielded
assertTrue(oldCapacity == getHashCapacity.apply(operator));
// We are not going to rehash; therefore, assert the memory increase only comes from the aggregator
assertLessThan(actualIncreasedMemory, additionalMemoryInBytes);
// free the pool for the next iteration
memoryPool.free(queryId, "test", reservedMemoryInBytes);
} else {
// We failed to finish the page processing i.e. we yielded
yieldCount++;
// Assert we are blocked
assertFalse(operator.getOperatorContext().isWaitingForMemory().isDone());
// Hash table capacity should not change
assertEquals(oldCapacity, (long) getHashCapacity.apply(operator));
// Increased memory is no smaller than the hash table size and no greater than the hash table size + the memory used by aggregator
if (hashKeyType == BIGINT) {
// groupIds and values double by hashCapacity; while valuesByGroupId double by maxFill = hashCapacity / 0.75
expectedReservedExtraBytes = oldCapacity * (long) (Long.BYTES * 1.75 + Integer.BYTES) + page.getRetainedSizeInBytes();
} else {
// groupAddressByHash, groupIdsByHash, and rawHashByHashPosition double by hashCapacity; while groupAddressByGroupId double by maxFill = hashCapacity / 0.75
expectedReservedExtraBytes = oldCapacity * (long) (Long.BYTES * 1.75 + Integer.BYTES + Byte.BYTES) + page.getRetainedSizeInBytes();
}
assertBetweenInclusive(actualIncreasedMemory, expectedReservedExtraBytes, expectedReservedExtraBytes + additionalMemoryInBytes);
// Output should be blocked as well
assertNull(operator.getOutput());
// Free the pool to unblock
memoryPool.free(queryId, "test", reservedMemoryInBytes);
// Trigger a process through getOutput() or needsInput()
output = operator.getOutput();
if (output != null) {
result.add(output);
}
assertTrue(operator.needsInput());
// Hash table capacity has increased
assertGreaterThan(getHashCapacity.apply(operator), oldCapacity);
// Assert the estimated reserved memory before rehash is very close to the one after rehash
long rehashedMemoryUsage = operator.getOperatorContext().getDriverContext().getMemoryUsage();
assertBetweenInclusive(rehashedMemoryUsage * 1.0 / newMemoryUsage, 0.99, 1.01);
// unblocked
assertTrue(operator.needsInput());
}
}
result.addAll(finishOperator(operator));
return new GroupByHashYieldResult(yieldCount, expectedReservedExtraBytes, result);
}
Aggregations