Search in sources :

Example 11 with RemoteTask

use of io.trino.execution.RemoteTask in project trino by trinodb.

the class TestSourcePartitionedScheduler method testStageBalancedSplitAssignment.

@Test
public void testStageBalancedSplitAssignment() {
    // use private node manager so we can add a node later
    InMemoryNodeManager nodeManager = new InMemoryNodeManager();
    nodeManager.addNode(CONNECTOR_ID, new InternalNode("other1", URI.create("http://127.0.0.1:11"), NodeVersion.UNKNOWN, false), new InternalNode("other2", URI.create("http://127.0.0.1:12"), NodeVersion.UNKNOWN, false), new InternalNode("other3", URI.create("http://127.0.0.1:13"), NodeVersion.UNKNOWN, false));
    NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
    // Schedule 15 splits - there are 3 nodes, each node should get 5 splits
    PlanFragment firstPlan = createFragment();
    StageExecution firstStage = createStageExecution(firstPlan, nodeTaskMap);
    QueuedSplitSource firstSplitSource = new QueuedSplitSource(TestingSplit::createRemoteSplit);
    StageScheduler firstScheduler = getSourcePartitionedScheduler(firstSplitSource, firstStage, nodeManager, nodeTaskMap, 200, STAGE);
    firstSplitSource.addSplits(15);
    ScheduleResult scheduleResult = firstScheduler.schedule();
    assertTrue(scheduleResult.getBlocked().isDone());
    assertEquals(scheduleResult.getNewTasks().size(), 3);
    assertEquals(firstStage.getAllTasks().size(), 3);
    for (RemoteTask remoteTask : firstStage.getAllTasks()) {
        PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
        assertEquals(splitsInfo.getCount(), 5);
    }
    // Add new node
    InternalNode additionalNode = new InternalNode("other4", URI.create("http://127.0.0.1:14"), NodeVersion.UNKNOWN, false);
    nodeManager.addNode(CONNECTOR_ID, additionalNode);
    // Schedule 5 splits in first query. Since the new node does not have any splits, all 5 splits are assigned to the new node
    firstSplitSource.addSplits(5);
    firstSplitSource.close();
    scheduleResult = firstScheduler.schedule();
    assertEffectivelyFinished(scheduleResult, firstScheduler);
    assertTrue(scheduleResult.getBlocked().isDone());
    assertEquals(scheduleResult.getNewTasks().size(), 1);
    assertEquals(firstStage.getAllTasks().size(), 4);
    for (RemoteTask remoteTask : firstStage.getAllTasks()) {
        PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
        assertEquals(splitsInfo.getCount(), 5);
    }
    // Add new node
    InternalNode anotherAdditionalNode = new InternalNode("other5", URI.create("http://127.0.0.1:15"), NodeVersion.UNKNOWN, false);
    nodeManager.addNode(CONNECTOR_ID, anotherAdditionalNode);
    // Schedule 5 splits in another query. New query should be balanced across all nodes
    PlanFragment secondPlan = createFragment();
    StageExecution secondStage = createStageExecution(secondPlan, nodeTaskMap);
    StageScheduler secondScheduler = getSourcePartitionedScheduler(createFixedSplitSource(5, TestingSplit::createRemoteSplit), secondStage, nodeManager, nodeTaskMap, 200, STAGE);
    scheduleResult = secondScheduler.schedule();
    assertEffectivelyFinished(scheduleResult, secondScheduler);
    assertEquals(secondStage.getAllTasks().size(), 5);
    for (RemoteTask remoteTask : secondStage.getAllTasks()) {
        PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
        assertEquals(splitsInfo.getCount(), 1);
    }
    firstStage.abort();
    secondStage.abort();
}
Also used : NodeTaskMap(io.trino.execution.NodeTaskMap) PipelinedStageExecution.createPipelinedStageExecution(io.trino.execution.scheduler.PipelinedStageExecution.createPipelinedStageExecution) PartitionedSplitsInfo(io.trino.execution.PartitionedSplitsInfo) MockRemoteTask(io.trino.execution.MockRemoteTaskFactory.MockRemoteTask) RemoteTask(io.trino.execution.RemoteTask) InternalNode(io.trino.metadata.InternalNode) TestingSplit(io.trino.testing.TestingSplit) PlanFragment(io.trino.sql.planner.PlanFragment) InMemoryNodeManager(io.trino.metadata.InMemoryNodeManager) SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler(io.trino.execution.scheduler.SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler) Test(org.testng.annotations.Test)

Example 12 with RemoteTask

use of io.trino.execution.RemoteTask in project trino by trinodb.

the class TestSourcePartitionedScheduler method testNewTaskScheduledWhenChildStageBufferIsUnderutilized.

@Test
public void testNewTaskScheduledWhenChildStageBufferIsUnderutilized() {
    NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
    // use private node manager so we can add a node later
    InMemoryNodeManager nodeManager = new InMemoryNodeManager();
    nodeManager.addNode(CONNECTOR_ID, new InternalNode("other1", URI.create("http://127.0.0.1:11"), NodeVersion.UNKNOWN, false), new InternalNode("other2", URI.create("http://127.0.0.1:12"), NodeVersion.UNKNOWN, false), new InternalNode("other3", URI.create("http://127.0.0.1:13"), NodeVersion.UNKNOWN, false));
    NodeScheduler nodeScheduler = new NodeScheduler(new UniformNodeSelectorFactory(nodeManager, new NodeSchedulerConfig().setIncludeCoordinator(false), nodeTaskMap, new Duration(0, SECONDS)));
    PlanFragment plan = createFragment();
    StageExecution stage = createStageExecution(plan, nodeTaskMap);
    // setting under utilized child output buffer
    StageScheduler scheduler = newSourcePartitionedSchedulerAsStageScheduler(stage, TABLE_SCAN_NODE_ID, new ConnectorAwareSplitSource(CONNECTOR_ID, createFixedSplitSource(500, TestingSplit::createRemoteSplit)), new DynamicSplitPlacementPolicy(nodeScheduler.createNodeSelector(session, Optional.of(CONNECTOR_ID)), stage::getAllTasks), 500, new DynamicFilterService(metadata, functionManager, typeOperators, new DynamicFilterConfig()), new TableExecuteContextManager(), () -> false);
    // the queues of 3 running nodes should be full
    ScheduleResult scheduleResult = scheduler.schedule();
    assertEquals(scheduleResult.getBlockedReason().get(), SPLIT_QUEUES_FULL);
    assertEquals(scheduleResult.getNewTasks().size(), 3);
    assertEquals(scheduleResult.getSplitsScheduled(), 300);
    for (RemoteTask remoteTask : scheduleResult.getNewTasks()) {
        PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
        assertEquals(splitsInfo.getCount(), 100);
    }
    // new node added - the pending splits should go to it since the child tasks are not blocked
    nodeManager.addNode(CONNECTOR_ID, new InternalNode("other4", URI.create("http://127.0.0.4:14"), NodeVersion.UNKNOWN, false));
    scheduleResult = scheduler.schedule();
    // split queue is full but still the source task creation isn't blocked
    assertEquals(scheduleResult.getBlockedReason().get(), SPLIT_QUEUES_FULL);
    assertEquals(scheduleResult.getNewTasks().size(), 1);
    assertEquals(scheduleResult.getSplitsScheduled(), 100);
}
Also used : NodeTaskMap(io.trino.execution.NodeTaskMap) PipelinedStageExecution.createPipelinedStageExecution(io.trino.execution.scheduler.PipelinedStageExecution.createPipelinedStageExecution) PartitionedSplitsInfo(io.trino.execution.PartitionedSplitsInfo) MockRemoteTask(io.trino.execution.MockRemoteTaskFactory.MockRemoteTask) RemoteTask(io.trino.execution.RemoteTask) Duration(io.airlift.units.Duration) PlanFragment(io.trino.sql.planner.PlanFragment) ConnectorAwareSplitSource(io.trino.split.ConnectorAwareSplitSource) InMemoryNodeManager(io.trino.metadata.InMemoryNodeManager) SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler(io.trino.execution.scheduler.SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler) TableExecuteContextManager(io.trino.execution.TableExecuteContextManager) InternalNode(io.trino.metadata.InternalNode) DynamicFilterService(io.trino.server.DynamicFilterService) TestingSplit(io.trino.testing.TestingSplit) DynamicFilterConfig(io.trino.execution.DynamicFilterConfig) Test(org.testng.annotations.Test)

Example 13 with RemoteTask

use of io.trino.execution.RemoteTask in project trino by trinodb.

the class TestSourcePartitionedScheduler method testNoNewTaskScheduledWhenChildStageBufferIsOverutilized.

@Test
public void testNoNewTaskScheduledWhenChildStageBufferIsOverutilized() {
    NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
    // use private node manager so we can add a node later
    InMemoryNodeManager nodeManager = new InMemoryNodeManager();
    nodeManager.addNode(CONNECTOR_ID, new InternalNode("other1", URI.create("http://127.0.0.1:11"), NodeVersion.UNKNOWN, false), new InternalNode("other2", URI.create("http://127.0.0.1:12"), NodeVersion.UNKNOWN, false), new InternalNode("other3", URI.create("http://127.0.0.1:13"), NodeVersion.UNKNOWN, false));
    NodeScheduler nodeScheduler = new NodeScheduler(new UniformNodeSelectorFactory(nodeManager, new NodeSchedulerConfig().setIncludeCoordinator(false), nodeTaskMap, new Duration(0, SECONDS)));
    PlanFragment plan = createFragment();
    StageExecution stage = createStageExecution(plan, nodeTaskMap);
    // setting over utilized child output buffer
    StageScheduler scheduler = newSourcePartitionedSchedulerAsStageScheduler(stage, TABLE_SCAN_NODE_ID, new ConnectorAwareSplitSource(CONNECTOR_ID, createFixedSplitSource(400, TestingSplit::createRemoteSplit)), new DynamicSplitPlacementPolicy(nodeScheduler.createNodeSelector(session, Optional.of(CONNECTOR_ID)), stage::getAllTasks), 400, new DynamicFilterService(metadata, functionManager, typeOperators, new DynamicFilterConfig()), new TableExecuteContextManager(), () -> true);
    // the queues of 3 running nodes should be full
    ScheduleResult scheduleResult = scheduler.schedule();
    assertEquals(scheduleResult.getBlockedReason().get(), SPLIT_QUEUES_FULL);
    assertEquals(scheduleResult.getNewTasks().size(), 3);
    assertEquals(scheduleResult.getSplitsScheduled(), 300);
    for (RemoteTask remoteTask : scheduleResult.getNewTasks()) {
        PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
        assertEquals(splitsInfo.getCount(), 100);
    }
    // new node added but 1 child's output buffer is overutilized - so lockdown the tasks
    nodeManager.addNode(CONNECTOR_ID, new InternalNode("other4", URI.create("http://127.0.0.4:14"), NodeVersion.UNKNOWN, false));
    scheduleResult = scheduler.schedule();
    assertEquals(scheduleResult.getBlockedReason().get(), SPLIT_QUEUES_FULL);
    assertEquals(scheduleResult.getNewTasks().size(), 0);
    assertEquals(scheduleResult.getSplitsScheduled(), 0);
}
Also used : NodeTaskMap(io.trino.execution.NodeTaskMap) PipelinedStageExecution.createPipelinedStageExecution(io.trino.execution.scheduler.PipelinedStageExecution.createPipelinedStageExecution) PartitionedSplitsInfo(io.trino.execution.PartitionedSplitsInfo) MockRemoteTask(io.trino.execution.MockRemoteTaskFactory.MockRemoteTask) RemoteTask(io.trino.execution.RemoteTask) Duration(io.airlift.units.Duration) PlanFragment(io.trino.sql.planner.PlanFragment) ConnectorAwareSplitSource(io.trino.split.ConnectorAwareSplitSource) InMemoryNodeManager(io.trino.metadata.InMemoryNodeManager) SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler(io.trino.execution.scheduler.SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler) TableExecuteContextManager(io.trino.execution.TableExecuteContextManager) InternalNode(io.trino.metadata.InternalNode) DynamicFilterService(io.trino.server.DynamicFilterService) TestingSplit(io.trino.testing.TestingSplit) DynamicFilterConfig(io.trino.execution.DynamicFilterConfig) Test(org.testng.annotations.Test)

Example 14 with RemoteTask

use of io.trino.execution.RemoteTask in project trino by trinodb.

the class TestFixedCountScheduler method testMultipleNodes.

@Test
public void testMultipleNodes() {
    FixedCountScheduler nodeScheduler = new FixedCountScheduler((node, partition) -> Optional.of(taskFactory.createTableScanTask(new TaskId(new StageId("test", 1), 1, 0), node, ImmutableList.of(), new PartitionedSplitCountTracker(delta -> {
    }))), generateRandomNodes(5));
    ScheduleResult result = nodeScheduler.schedule();
    assertTrue(result.isFinished());
    assertTrue(result.getBlocked().isDone());
    assertEquals(result.getNewTasks().size(), 5);
    assertEquals(result.getNewTasks().stream().map(RemoteTask::getNodeId).collect(toImmutableSet()).size(), 5);
}
Also used : IntStream(java.util.stream.IntStream) AfterClass(org.testng.annotations.AfterClass) StageId(io.trino.execution.StageId) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) RemoteTask(io.trino.execution.RemoteTask) Assert.assertEquals(org.testng.Assert.assertEquals) Test(org.testng.annotations.Test) TaskId(io.trino.execution.TaskId) PartitionedSplitCountTracker(io.trino.execution.NodeTaskMap.PartitionedSplitCountTracker) InternalNode(io.trino.metadata.InternalNode) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Executors.newCachedThreadPool(java.util.concurrent.Executors.newCachedThreadPool) Executors.newScheduledThreadPool(java.util.concurrent.Executors.newScheduledThreadPool) Threads.daemonThreadsNamed(io.airlift.concurrent.Threads.daemonThreadsNamed) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Optional(java.util.Optional) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) Assert.assertTrue(org.testng.Assert.assertTrue) NodeVersion(io.trino.client.NodeVersion) URI(java.net.URI) MockRemoteTaskFactory(io.trino.execution.MockRemoteTaskFactory) ExecutorService(java.util.concurrent.ExecutorService) TaskId(io.trino.execution.TaskId) PartitionedSplitCountTracker(io.trino.execution.NodeTaskMap.PartitionedSplitCountTracker) StageId(io.trino.execution.StageId) Test(org.testng.annotations.Test)

Example 15 with RemoteTask

use of io.trino.execution.RemoteTask in project trino by trinodb.

the class TestSourcePartitionedScheduler method testScheduleSplitsBlock.

@Test
public void testScheduleSplitsBlock() {
    PlanFragment plan = createFragment();
    NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
    StageExecution stage = createStageExecution(plan, nodeTaskMap);
    StageScheduler scheduler = getSourcePartitionedScheduler(createFixedSplitSource(80, TestingSplit::createRemoteSplit), stage, nodeManager, nodeTaskMap, 1, STAGE);
    // schedule first 60 splits, which will cause the scheduler to block
    for (int i = 0; i <= 60; i++) {
        ScheduleResult scheduleResult = scheduler.schedule();
        assertFalse(scheduleResult.isFinished());
        // blocks at 20 per node
        assertEquals(scheduleResult.getBlocked().isDone(), i != 60);
        // first three splits create new tasks
        assertEquals(scheduleResult.getNewTasks().size(), i < 3 ? 1 : 0);
        assertEquals(stage.getAllTasks().size(), i < 3 ? i + 1 : 3);
        assertPartitionedSplitCount(stage, min(i + 1, 60));
    }
    for (RemoteTask remoteTask : stage.getAllTasks()) {
        PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
        assertEquals(splitsInfo.getCount(), 20);
    }
    // todo rewrite MockRemoteTask to fire a tate transition when splits are cleared, and then validate blocked future completes
    // drop the 20 splits from one node
    ((MockRemoteTask) stage.getAllTasks().get(0)).clearSplits();
    // schedule remaining 20 splits
    for (int i = 0; i < 20; i++) {
        ScheduleResult scheduleResult = scheduler.schedule();
        // finishes when last split is fetched
        if (i == 19) {
            assertEffectivelyFinished(scheduleResult, scheduler);
        } else {
            assertFalse(scheduleResult.isFinished());
        }
        // does not block again
        assertTrue(scheduleResult.getBlocked().isDone());
        // no additional tasks will be created
        assertEquals(scheduleResult.getNewTasks().size(), 0);
        assertEquals(stage.getAllTasks().size(), 3);
        // we dropped 20 splits so start at 40 and count to 60
        assertPartitionedSplitCount(stage, min(i + 41, 60));
    }
    for (RemoteTask remoteTask : stage.getAllTasks()) {
        PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
        assertEquals(splitsInfo.getCount(), 20);
    }
    stage.abort();
}
Also used : NodeTaskMap(io.trino.execution.NodeTaskMap) PipelinedStageExecution.createPipelinedStageExecution(io.trino.execution.scheduler.PipelinedStageExecution.createPipelinedStageExecution) PartitionedSplitsInfo(io.trino.execution.PartitionedSplitsInfo) MockRemoteTask(io.trino.execution.MockRemoteTaskFactory.MockRemoteTask) MockRemoteTask(io.trino.execution.MockRemoteTaskFactory.MockRemoteTask) RemoteTask(io.trino.execution.RemoteTask) PlanFragment(io.trino.sql.planner.PlanFragment) SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler(io.trino.execution.scheduler.SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler) Test(org.testng.annotations.Test)

Aggregations

RemoteTask (io.trino.execution.RemoteTask)26 InternalNode (io.trino.metadata.InternalNode)11 Test (org.testng.annotations.Test)11 NodeTaskMap (io.trino.execution.NodeTaskMap)9 ImmutableList (com.google.common.collect.ImmutableList)7 MockRemoteTask (io.trino.execution.MockRemoteTaskFactory.MockRemoteTask)7 PartitionedSplitsInfo (io.trino.execution.PartitionedSplitsInfo)7 PipelinedStageExecution.createPipelinedStageExecution (io.trino.execution.scheduler.PipelinedStageExecution.createPipelinedStageExecution)7 SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler (io.trino.execution.scheduler.SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler)7 Split (io.trino.metadata.Split)7 PlanFragment (io.trino.sql.planner.PlanFragment)7 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)5 List (java.util.List)5 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)4 ImmutableSet (com.google.common.collect.ImmutableSet)4 Duration (io.airlift.units.Duration)4 Lifespan (io.trino.execution.Lifespan)4 TaskId (io.trino.execution.TaskId)4 Optional (java.util.Optional)4 Preconditions.checkArgument (com.google.common.base.Preconditions.checkArgument)3