use of io.trino.metadata.InMemoryNodeManager in project trino by trinodb.
the class TestFullNodeCapableNodeAllocator method testRemoveAcquiredSharedNode.
@Test(timeOut = TEST_TIMEOUT)
public void testRemoveAcquiredSharedNode() throws Exception {
InMemoryNodeManager nodeManager = testingNodeManager(basicNodesMap(NODE_1));
setupNodeAllocatorService(nodeManager, 1);
try (NodeAllocator nodeAllocator = nodeAllocatorService.getNodeAllocator(Q1_SESSION)) {
NodeAllocator.NodeLease acquire1 = nodeAllocator.acquire(NO_REQUIREMENTS);
assertAcquired(acquire1, NODE_1);
// remove acquired node
nodeManager.removeNode(NODE_1);
// we should still be able to release lease for removed node
acquire1.release();
}
}
use of io.trino.metadata.InMemoryNodeManager in project trino by trinodb.
the class TestFullNodeCapableNodeAllocator method testAllocateFullWithAddressRequirements.
@Test(timeOut = TEST_TIMEOUT)
public void testAllocateFullWithAddressRequirements() throws Exception {
InMemoryNodeManager nodeManager = testingNodeManager(basicNodesMap(NODE_1, NODE_2, NODE_3));
setupNodeAllocatorService(nodeManager, 2);
try (NodeAllocator nodeAllocator = nodeAllocatorService.getNodeAllocator(Q1_SESSION)) {
NodeAllocator.NodeLease acquire1 = nodeAllocator.acquire(FULL_NODE_1_REQUIREMENTS);
assertAcquired(acquire1);
NodeAllocator.NodeLease acquire2 = nodeAllocator.acquire(FULL_NODE_2_REQUIREMENTS);
assertAcquired(acquire2);
NodeAllocator.NodeLease acquire3 = nodeAllocator.acquire(FULL_NODE_3_REQUIREMENTS);
assertNotAcquired(acquire3);
acquire1.release();
assertEventually(() -> assertAcquired(acquire3));
}
}
use of io.trino.metadata.InMemoryNodeManager in project trino by trinodb.
the class TestFullNodeCapableNodeAllocator method testNoSharedNodeAvailable.
@Test(timeOut = TEST_TIMEOUT)
public void testNoSharedNodeAvailable() throws Exception {
InMemoryNodeManager nodeManager = testingNodeManager(nodesMapBuilder().put(NODE_1, ImmutableList.of(CATALOG_2)).buildOrThrow());
setupNodeAllocatorService(nodeManager, 1);
try (NodeAllocator nodeAllocator = nodeAllocatorService.getNodeAllocator(Q1_SESSION)) {
// request a node with specific catalog (not present)
assertThatThrownBy(() -> nodeAllocator.acquire(SHARED_NODE_CATALOG_1_REQUIREMENTS.withMemory(DataSize.of(64, GIGABYTE)))).hasMessage("No nodes available to run query");
// add node with specific catalog
addNode(nodeManager, NODE_2, CATALOG_1);
// we should be able to acquire the node now
NodeAllocator.NodeLease acquire1 = nodeAllocator.acquire(SHARED_NODE_CATALOG_1_REQUIREMENTS.withMemory(DataSize.of(64, GIGABYTE)));
assertAcquired(acquire1, NODE_2);
// acquiring one more should block (only one acquire fits a node as we request 64GB)
NodeAllocator.NodeLease acquire2 = nodeAllocator.acquire(SHARED_NODE_CATALOG_1_REQUIREMENTS.withMemory(DataSize.of(64, GIGABYTE)));
assertNotAcquired(acquire2);
// remove node with catalog
nodeManager.removeNode(NODE_2);
// TODO: make FullNodeCapableNodeAllocatorService react on node removed automatically
nodeAllocatorService.wakeupProcessPendingAcquires();
// pending acquire2 should be completed now but with an exception
assertEventually(() -> {
assertFalse(acquire2.getNode().isCancelled());
assertTrue(acquire2.getNode().isDone());
assertThatThrownBy(() -> getFutureValue(acquire2.getNode())).hasMessage("No nodes available to run query");
});
}
}
use of io.trino.metadata.InMemoryNodeManager in project trino by trinodb.
the class TestFullNodeCapableNodeAllocator method testAllocateFullNodeReleaseBeforeAcquiredWaitingOnMaxFullNodesPerQuery.
@Test(timeOut = TEST_TIMEOUT)
public void testAllocateFullNodeReleaseBeforeAcquiredWaitingOnMaxFullNodesPerQuery() throws Exception {
InMemoryNodeManager nodeManager = testingNodeManager(basicNodesMap(NODE_1, NODE_2));
setupNodeAllocatorService(nodeManager, 1);
try (NodeAllocator nodeAllocator = nodeAllocatorService.getNodeAllocator(Q1_SESSION)) {
// first full allocation should not block
NodeAllocator.NodeLease acquire1 = nodeAllocator.acquire(FULL_NODE_REQUIREMENTS);
assertAcquired(acquire1, NODE_1);
// next two should block (maxFullNodesPerQuery == 1)
NodeAllocator.NodeLease acquire2 = nodeAllocator.acquire(FULL_NODE_REQUIREMENTS);
assertNotAcquired(acquire2);
NodeAllocator.NodeLease acquire3 = nodeAllocator.acquire(FULL_NODE_REQUIREMENTS);
assertNotAcquired(acquire3);
// releasing a blocked one should not unblock anything
acquire2.release();
assertNotAcquired(acquire3);
// releasing an acquired one should unblock one which is still blocked
acquire1.release();
assertEventually(() -> assertAcquired(acquire3, NODE_1));
}
}
use of io.trino.metadata.InMemoryNodeManager in project trino by trinodb.
the class TestSourcePartitionedScheduler method testStageBalancedSplitAssignment.
@Test
public void testStageBalancedSplitAssignment() {
// use private node manager so we can add a node later
InMemoryNodeManager nodeManager = new InMemoryNodeManager();
nodeManager.addNode(CONNECTOR_ID, new InternalNode("other1", URI.create("http://127.0.0.1:11"), NodeVersion.UNKNOWN, false), new InternalNode("other2", URI.create("http://127.0.0.1:12"), NodeVersion.UNKNOWN, false), new InternalNode("other3", URI.create("http://127.0.0.1:13"), NodeVersion.UNKNOWN, false));
NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
// Schedule 15 splits - there are 3 nodes, each node should get 5 splits
PlanFragment firstPlan = createFragment();
StageExecution firstStage = createStageExecution(firstPlan, nodeTaskMap);
QueuedSplitSource firstSplitSource = new QueuedSplitSource(TestingSplit::createRemoteSplit);
StageScheduler firstScheduler = getSourcePartitionedScheduler(firstSplitSource, firstStage, nodeManager, nodeTaskMap, 200, STAGE);
firstSplitSource.addSplits(15);
ScheduleResult scheduleResult = firstScheduler.schedule();
assertTrue(scheduleResult.getBlocked().isDone());
assertEquals(scheduleResult.getNewTasks().size(), 3);
assertEquals(firstStage.getAllTasks().size(), 3);
for (RemoteTask remoteTask : firstStage.getAllTasks()) {
PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
assertEquals(splitsInfo.getCount(), 5);
}
// Add new node
InternalNode additionalNode = new InternalNode("other4", URI.create("http://127.0.0.1:14"), NodeVersion.UNKNOWN, false);
nodeManager.addNode(CONNECTOR_ID, additionalNode);
// Schedule 5 splits in first query. Since the new node does not have any splits, all 5 splits are assigned to the new node
firstSplitSource.addSplits(5);
firstSplitSource.close();
scheduleResult = firstScheduler.schedule();
assertEffectivelyFinished(scheduleResult, firstScheduler);
assertTrue(scheduleResult.getBlocked().isDone());
assertEquals(scheduleResult.getNewTasks().size(), 1);
assertEquals(firstStage.getAllTasks().size(), 4);
for (RemoteTask remoteTask : firstStage.getAllTasks()) {
PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
assertEquals(splitsInfo.getCount(), 5);
}
// Add new node
InternalNode anotherAdditionalNode = new InternalNode("other5", URI.create("http://127.0.0.1:15"), NodeVersion.UNKNOWN, false);
nodeManager.addNode(CONNECTOR_ID, anotherAdditionalNode);
// Schedule 5 splits in another query. New query should be balanced across all nodes
PlanFragment secondPlan = createFragment();
StageExecution secondStage = createStageExecution(secondPlan, nodeTaskMap);
StageScheduler secondScheduler = getSourcePartitionedScheduler(createFixedSplitSource(5, TestingSplit::createRemoteSplit), secondStage, nodeManager, nodeTaskMap, 200, STAGE);
scheduleResult = secondScheduler.schedule();
assertEffectivelyFinished(scheduleResult, secondScheduler);
assertEquals(secondStage.getAllTasks().size(), 5);
for (RemoteTask remoteTask : secondStage.getAllTasks()) {
PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
assertEquals(splitsInfo.getCount(), 1);
}
firstStage.abort();
secondStage.abort();
}
Aggregations