use of io.trino.spi.QueryId in project trino by trinodb.
the class GroupByHashYieldAssertion method finishOperatorWithYieldingGroupByHash.
/**
* @param operatorFactory creates an Operator that should directly or indirectly contain GroupByHash
* @param getHashCapacity returns the hash table capacity for the input operator
* @param additionalMemoryInBytes the memory used in addition to the GroupByHash in the operator (e.g., aggregator)
*/
public static GroupByHashYieldResult finishOperatorWithYieldingGroupByHash(List<Page> input, Type hashKeyType, OperatorFactory operatorFactory, Function<Operator, Integer> getHashCapacity, long additionalMemoryInBytes) {
assertLessThan(additionalMemoryInBytes, 1L << 21, "additionalMemoryInBytes should be a relatively small number");
List<Page> result = new LinkedList<>();
// mock an adjustable memory pool
QueryId queryId = new QueryId("test_query");
MemoryPool memoryPool = new MemoryPool(DataSize.of(1, GIGABYTE));
QueryContext queryContext = new QueryContext(queryId, DataSize.of(512, MEGABYTE), memoryPool, new TestingGcMonitor(), EXECUTOR, SCHEDULED_EXECUTOR, DataSize.of(512, MEGABYTE), new SpillSpaceTracker(DataSize.of(512, MEGABYTE)));
DriverContext driverContext = createTaskContext(queryContext, EXECUTOR, TEST_SESSION).addPipelineContext(0, true, true, false).addDriverContext();
Operator operator = operatorFactory.createOperator(driverContext);
// run operator
int yieldCount = 0;
long expectedReservedExtraBytes = 0;
for (Page page : input) {
// unblocked
assertTrue(operator.needsInput());
// saturate the pool with a tiny memory left
long reservedMemoryInBytes = memoryPool.getFreeBytes() - additionalMemoryInBytes;
memoryPool.reserve(queryId, "test", reservedMemoryInBytes);
long oldMemoryUsage = operator.getOperatorContext().getDriverContext().getMemoryUsage();
int oldCapacity = getHashCapacity.apply(operator);
// add a page and verify different behaviors
operator.addInput(page);
// get output to consume the input
Page output = operator.getOutput();
if (output != null) {
result.add(output);
}
long newMemoryUsage = operator.getOperatorContext().getDriverContext().getMemoryUsage();
// between rehash and memory used by aggregator
if (newMemoryUsage < DataSize.of(4, MEGABYTE).toBytes()) {
// free the pool for the next iteration
memoryPool.free(queryId, "test", reservedMemoryInBytes);
// this required in case input is blocked
operator.getOutput();
continue;
}
long actualIncreasedMemory = newMemoryUsage - oldMemoryUsage;
if (operator.needsInput()) {
// We have successfully added a page
// Assert we are not blocked
assertTrue(operator.getOperatorContext().isWaitingForMemory().isDone());
// assert the hash capacity is not changed; otherwise, we should have yielded
assertTrue(oldCapacity == getHashCapacity.apply(operator));
// We are not going to rehash; therefore, assert the memory increase only comes from the aggregator
assertLessThan(actualIncreasedMemory, additionalMemoryInBytes);
// free the pool for the next iteration
memoryPool.free(queryId, "test", reservedMemoryInBytes);
} else {
// We failed to finish the page processing i.e. we yielded
yieldCount++;
// Assert we are blocked
assertFalse(operator.getOperatorContext().isWaitingForMemory().isDone());
// Hash table capacity should not change
assertEquals(oldCapacity, (long) getHashCapacity.apply(operator));
// Increased memory is no smaller than the hash table size and no greater than the hash table size + the memory used by aggregator
if (hashKeyType == BIGINT) {
// groupIds and values double by hashCapacity; while valuesByGroupId double by maxFill = hashCapacity / 0.75
expectedReservedExtraBytes = oldCapacity * (long) (Long.BYTES * 1.75 + Integer.BYTES) + page.getRetainedSizeInBytes();
} else {
// groupAddressByHash, groupIdsByHash, and rawHashByHashPosition double by hashCapacity; while groupAddressByGroupId double by maxFill = hashCapacity / 0.75
expectedReservedExtraBytes = oldCapacity * (long) (Long.BYTES * 1.75 + Integer.BYTES + Byte.BYTES) + page.getRetainedSizeInBytes();
}
assertBetweenInclusive(actualIncreasedMemory, expectedReservedExtraBytes, expectedReservedExtraBytes + additionalMemoryInBytes);
// Output should be blocked as well
assertNull(operator.getOutput());
// Free the pool to unblock
memoryPool.free(queryId, "test", reservedMemoryInBytes);
// Trigger a process through getOutput() or needsInput()
output = operator.getOutput();
if (output != null) {
result.add(output);
}
assertTrue(operator.needsInput());
// Hash table capacity has increased
assertGreaterThan(getHashCapacity.apply(operator), oldCapacity);
// Assert the estimated reserved memory before rehash is very close to the one after rehash
long rehashedMemoryUsage = operator.getOperatorContext().getDriverContext().getMemoryUsage();
assertBetweenInclusive(rehashedMemoryUsage * 1.0 / newMemoryUsage, 0.99, 1.01);
// unblocked
assertTrue(operator.needsInput());
}
}
result.addAll(finishOperator(operator));
return new GroupByHashYieldResult(yieldCount, expectedReservedExtraBytes, result);
}
use of io.trino.spi.QueryId in project trino by trinodb.
the class TestDeduplicatingDirectExchangeBuffer method testExchangeManagerNotConfigured.
@Test
public void testExchangeManagerNotConfigured() {
// no overflow
try (DirectExchangeBuffer buffer = new DeduplicatingDirectExchangeBuffer(directExecutor(), DataSize.of(100, BYTE), RetryPolicy.QUERY, new ExchangeManagerRegistry(new ExchangeHandleResolver()), new QueryId("query"), createRandomExchangeId())) {
TaskId task = createTaskId(0, 0);
Slice page = createPage("1234", DataSize.of(10, BYTE));
buffer.addTask(task);
buffer.addPages(task, ImmutableList.of(page));
buffer.taskFinished(task);
buffer.noMoreTasks();
assertFalse(buffer.isFinished());
assertNotBlocked(buffer.isBlocked());
assertEquals(buffer.pollPage(), page);
assertNull(buffer.pollPage());
assertTrue(buffer.isFinished());
}
// overflow
try (DirectExchangeBuffer buffer = new DeduplicatingDirectExchangeBuffer(directExecutor(), DataSize.of(100, BYTE), RetryPolicy.QUERY, new ExchangeManagerRegistry(new ExchangeHandleResolver()), new QueryId("query"), createRandomExchangeId())) {
TaskId task = createTaskId(0, 0);
Slice page1 = utf8Slice("1234");
Slice page2 = utf8Slice("123456789");
assertThat(page1.getRetainedSize()).isLessThanOrEqualTo(100);
assertThat(page1.getRetainedSize() + page2.getRetainedSize()).isGreaterThan(100);
buffer.addTask(task);
buffer.addPages(task, ImmutableList.of(page1));
assertFalse(buffer.isFinished());
assertBlocked(buffer.isBlocked());
assertEquals(buffer.getRetainedSizeInBytes(), page1.getRetainedSize());
buffer.addPages(task, ImmutableList.of(page2));
assertFalse(buffer.isFinished());
assertTrue(buffer.isFailed());
assertNotBlocked(buffer.isBlocked());
assertEquals(buffer.getRetainedSizeInBytes(), 0);
assertEquals(buffer.getBufferedPageCount(), 0);
assertThatThrownBy(buffer::pollPage).isInstanceOf(TrinoException.class);
}
}
use of io.trino.spi.QueryId in project trino by trinodb.
the class TestTotalReservationOnBlockedNodesLowMemoryKiller method testSkewedQuery.
@Test
public void testSkewedQuery() {
int memoryPool = 12;
// q1 is neither the query with the most total memory reservation, nor the query with the max memory reservation.
// This also tests the corner case where a node has an empty memory pool
Map<String, Map<String, Long>> queries = ImmutableMap.<String, Map<String, Long>>builder().put("q_1", ImmutableMap.of("n1", 0L, "n2", 8L, "n3", 0L, "n4", 0L, "n5", 0L)).put("q_2", ImmutableMap.of("n1", 3L, "n2", 5L, "n3", 2L, "n4", 4L, "n5", 0L)).put("q_3", ImmutableMap.of("n1", 0L, "n2", 0L, "n3", 9L, "n4", 0L, "n5", 0L)).buildOrThrow();
assertEquals(lowMemoryKiller.chooseQueryToKill(toQueryMemoryInfoList(queries), toNodeMemoryInfoList(memoryPool, queries)), Optional.of(KillTarget.wholeQuery(new QueryId("q_1"))));
}
use of io.trino.spi.QueryId in project trino by trinodb.
the class TestQuerySessionSupplier method createSession.
private static Session createSession(ListMultimap<String, String> headers, SqlEnvironmentConfig config) {
MultivaluedMap<String, String> headerMap = new GuavaMultivaluedMap<>(headers);
SessionContext context = SESSION_CONTEXT_FACTORY.createSessionContext(headerMap, Optional.empty(), Optional.of("testRemote"), Optional.empty());
QuerySessionSupplier sessionSupplier = createSessionSupplier(config);
return sessionSupplier.createSession(new QueryId("test_query_id"), context);
}
use of io.trino.spi.QueryId in project trino by trinodb.
the class TestSessionPropertyDefaults method testApplyDefaultProperties.
@Test
public void testApplyDefaultProperties() {
SessionPropertyDefaults sessionPropertyDefaults = new SessionPropertyDefaults(TEST_NODE_INFO, new AllowAllAccessControlManager());
SessionPropertyManager sessionPropertyManager = new SessionPropertyManager();
sessionPropertyManager.addConnectorSessionProperties(new CatalogName("testCatalog"), ImmutableList.of(PropertyMetadata.stringProperty("explicit_set", "Test property", null, false), PropertyMetadata.stringProperty("catalog_default", "Test property", null, false)));
SessionPropertyConfigurationManagerFactory factory = new TestingSessionPropertyConfigurationManagerFactory(ImmutableMap.<String, String>builder().put(QUERY_MAX_MEMORY, // Will be overridden
"2GB").put(QUERY_MAX_TOTAL_MEMORY, // Will remain default
"2GB").buildOrThrow(), ImmutableMap.of("testCatalog", ImmutableMap.<String, String>builder().put("explicit_set", // Will be overridden
"override").put("catalog_default", // Will remain default
"catalog_default").buildOrThrow()));
sessionPropertyDefaults.addConfigurationManagerFactory(factory);
sessionPropertyDefaults.setConfigurationManager(factory.getName(), ImmutableMap.of());
Session session = Session.builder(sessionPropertyManager).setQueryId(new QueryId("test_query_id")).setIdentity(Identity.ofUser("testUser")).setSystemProperty(QUERY_MAX_MEMORY, // Override this default system property
"1GB").setSystemProperty(JOIN_DISTRIBUTION_TYPE, "partitioned").setSystemProperty(HASH_PARTITION_COUNT, "43").setCatalogSessionProperty("testCatalog", "explicit_set", // Override this default catalog property
"explicit_set").build();
assertEquals(session.getSystemProperties(), ImmutableMap.<String, String>builder().put(QUERY_MAX_MEMORY, "1GB").put(JOIN_DISTRIBUTION_TYPE, "partitioned").put(HASH_PARTITION_COUNT, "43").buildOrThrow());
assertEquals(session.getCatalogProperties(), ImmutableMap.of("testCatalog", ImmutableMap.<String, String>builder().put("explicit_set", "explicit_set").buildOrThrow()));
session = sessionPropertyDefaults.newSessionWithDefaultProperties(session, Optional.empty(), TEST_RESOURCE_GROUP_ID);
assertEquals(session.getSystemProperties(), ImmutableMap.<String, String>builder().put(QUERY_MAX_MEMORY, // User provided value overrides default value
"1GB").put(JOIN_DISTRIBUTION_TYPE, // User provided value is used
"partitioned").put(HASH_PARTITION_COUNT, // User provided value is used
"43").put(QUERY_MAX_TOTAL_MEMORY, // Default value is used
"2GB").buildOrThrow());
assertEquals(session.getCatalogProperties(), ImmutableMap.of("testCatalog", ImmutableMap.<String, String>builder().put("explicit_set", // User provided value overrides default value
"explicit_set").put("catalog_default", // Default value is used
"catalog_default").buildOrThrow()));
}
Aggregations