Search in sources :

Example 56 with InternalNode

use of io.prestosql.metadata.InternalNode in project hetu-core by openlookeng.

the class TestNodeScheduler method testScheduleLocal.

@Test
public void testScheduleLocal() {
    setUpNodes();
    Split split = new Split(CONNECTOR_ID, new TestSplitLocallyAccessible(), Lifespan.taskWide());
    Set<Split> splits = ImmutableSet.of(split);
    Map.Entry<InternalNode, Split> assignment = Iterables.getOnlyElement(nodeSelector.computeAssignments(splits, ImmutableList.copyOf(taskMap.values()), Optional.empty()).getAssignments().entries());
    assertEquals(assignment.getKey().getHostAndPort(), split.getAddresses().get(0));
    assertEquals(assignment.getValue(), split);
}
Also used : InternalNode(io.prestosql.metadata.InternalNode) MockSplit(io.prestosql.MockSplit) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) Split(io.prestosql.metadata.Split) TestingSplit(io.prestosql.testing.TestingSplit) SplitCacheMap(io.prestosql.execution.SplitCacheMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) NodeTaskMap(io.prestosql.execution.NodeTaskMap) Test(org.testng.annotations.Test)

Example 57 with InternalNode

use of io.prestosql.metadata.InternalNode in project hetu-core by openlookeng.

the class TestSourcePartitionedScheduler method testBalancedSplitAssignment.

@Test
public void testBalancedSplitAssignment() {
    // use private node manager so we can add a node later
    InMemoryNodeManager memoryNodeManager = new InMemoryNodeManager();
    memoryNodeManager.addNode(CONNECTOR_ID, new InternalNode("other1", URI.create("http://127.0.0.1:11"), NodeVersion.UNKNOWN, false), new InternalNode("other2", URI.create("http://127.0.0.1:12"), NodeVersion.UNKNOWN, false), new InternalNode("other3", URI.create("http://127.0.0.1:13"), NodeVersion.UNKNOWN, false));
    NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
    // Schedule 15 splits - there are 3 nodes, each node should get 5 splits
    StageExecutionPlan firstPlan = createPlan(createFixedSplitSource(15, TestingSplit::createRemoteSplit));
    SqlStageExecution firstStage = createSqlStageExecution(firstPlan, nodeTaskMap);
    StageScheduler firstScheduler = getSourcePartitionedScheduler(firstPlan, firstStage, memoryNodeManager, nodeTaskMap, 200);
    ScheduleResult scheduleResult = firstScheduler.schedule();
    assertEffectivelyFinished(scheduleResult, firstScheduler);
    assertTrue(scheduleResult.getBlocked().isDone());
    assertEquals(scheduleResult.getNewTasks().size(), 3);
    assertEquals(firstStage.getAllTasks().size(), 3);
    for (RemoteTask remoteTask : firstStage.getAllTasks()) {
        assertEquals(remoteTask.getPartitionedSplitCount(), 5);
    }
    // Add new node
    InternalNode additionalNode = new InternalNode("other4", URI.create("http://127.0.0.1:14"), NodeVersion.UNKNOWN, false);
    memoryNodeManager.addNode(CONNECTOR_ID, additionalNode);
    // Schedule 5 splits in another query. Since the new node does not have any splits, all 5 splits are assigned to the new node
    StageExecutionPlan secondPlan = createPlan(createFixedSplitSource(5, TestingSplit::createRemoteSplit));
    SqlStageExecution secondStage = createSqlStageExecution(secondPlan, nodeTaskMap);
    StageScheduler secondScheduler = getSourcePartitionedScheduler(secondPlan, secondStage, memoryNodeManager, nodeTaskMap, 200);
    scheduleResult = secondScheduler.schedule();
    assertEffectivelyFinished(scheduleResult, secondScheduler);
    assertTrue(scheduleResult.getBlocked().isDone());
    assertEquals(scheduleResult.getNewTasks().size(), 1);
    assertEquals(secondStage.getAllTasks().size(), 1);
    RemoteTask task = secondStage.getAllTasks().get(0);
    assertEquals(task.getPartitionedSplitCount(), 5);
    firstStage.abort();
    secondStage.abort();
}
Also used : NodeTaskMap(io.prestosql.execution.NodeTaskMap) StageExecutionPlan(io.prestosql.sql.planner.StageExecutionPlan) MockRemoteTask(io.prestosql.execution.MockRemoteTaskFactory.MockRemoteTask) RemoteTask(io.prestosql.execution.RemoteTask) InternalNode(io.prestosql.metadata.InternalNode) SqlStageExecution(io.prestosql.execution.SqlStageExecution) InMemoryNodeManager(io.prestosql.metadata.InMemoryNodeManager) SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler(io.prestosql.execution.scheduler.SourcePartitionedScheduler.newSourcePartitionedSchedulerAsStageScheduler) Test(org.testng.annotations.Test)

Example 58 with InternalNode

use of io.prestosql.metadata.InternalNode in project hetu-core by openlookeng.

the class TestDistributedResourceGroup method testStateStoreFetchAndUpdate.

@Test
public void testStateStoreFetchAndUpdate() throws JsonProcessingException {
    synchronized (lock) {
        DistributedResourceGroupTemp root = new DistributedResourceGroupTemp(Optional.empty(), "root", (group, export) -> {
        }, directExecutor(), statestore, internalNodeManager);
        resourceGroupBasicSetUp(root, ONE_MEGABYTE, 1, 1);
        MockManagedQueryExecution query1 = new MockManagedQueryExecution(100);
        query1.setResourceGroupId(root.getId());
        MockManagedQueryExecution query2 = new MockManagedQueryExecution(0);
        query2.setResourceGroupId(root.getId());
        Map<String, String> mockMap = new HashMap<>();
        MockStateMap<String, String> mockStateMap = new MockStateMap<>("127.0.0.1-resourceaggrstats", mockMap);
        when(statestore.getOrCreateStateCollection(anyString(), anyObject())).thenReturn(mockStateMap);
        when(internalNodeManager.getCurrentNode()).thenReturn(new InternalNode("node1", URI.create("local://127.0.0.1"), NodeVersion.UNKNOWN, true));
        when(internalNodeManager.getCoordinators()).thenReturn(ImmutableSet.of(new InternalNode("node1", URI.create("local://127.0.0.1"), NodeVersion.UNKNOWN, true)));
        root.run(query1);
        DistributedResourceGroupAggrStats rootStats = MAPPER.readerFor(DistributedResourceGroupAggrStats.class).readValue(mockMap.get("root"));
        assertEquals(rootStats.getRunningQueries(), 1);
        root.run(query2);
        rootStats = MAPPER.readerFor(DistributedResourceGroupAggrStats.class).readValue(mockMap.get("root"));
        assertEquals(rootStats.getQueuedQueries(), 1);
        assertEquals(rootStats.getCachedMemoryUsageBytes(), 100);
        when(internalNodeManager.getCurrentNode()).thenReturn(new InternalNode("node2", URI.create("local://127.0.0.2"), NodeVersion.UNKNOWN, true));
        assertEquals(root.getGlobalCachedMemoryUsageBytes(), 200);
    }
}
Also used : MockStateMap(io.prestosql.statestore.MockStateMap) MockManagedQueryExecution(io.prestosql.execution.MockManagedQueryExecution) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) InternalNode(io.prestosql.metadata.InternalNode) Test(org.testng.annotations.Test)

Example 59 with InternalNode

use of io.prestosql.metadata.InternalNode in project hetu-core by openlookeng.

the class TestHiveIntegrationSmokeTest method testRuseExchangeGroupSplitsMatchingBetweenProducerConsumer.

@Test
public void testRuseExchangeGroupSplitsMatchingBetweenProducerConsumer() {
    setUpNodes();
    NodeTaskMap nodeTasks = new NodeTaskMap(new FinalizerService());
    StageId stageId = new StageId(new QueryId("query"), 0);
    UUID uuid = UUID.randomUUID();
    PlanFragment testFragmentProducer = createTableScanPlanFragment("build", ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_PRODUCER, uuid, 1);
    PlanNodeId tableScanNodeId = new PlanNodeId("plan_id");
    StageExecutionPlan producerStageExecutionPlan = new StageExecutionPlan(testFragmentProducer, ImmutableMap.of(tableScanNodeId, new ConnectorAwareSplitSource(CONNECTOR_ID, createFixedSplitSource(0, TestingSplit::createRemoteSplit))), ImmutableList.of(), ImmutableMap.of(tableScanNodeId, new TableInfo(new QualifiedObjectName("test", TEST_SCHEMA, "test"), TupleDomain.all())));
    SqlStageExecution producerStage = createSqlStageExecution(stageId, new TestSqlTaskManager.MockLocationFactory().createStageLocation(stageId), producerStageExecutionPlan.getFragment(), producerStageExecutionPlan.getTables(), new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor), TEST_SESSION_REUSE, true, nodeTasks, remoteTaskExecutor, new NoOpFailureDetector(), new SplitSchedulerStats(), new DynamicFilterService(new LocalStateStoreProvider(new SeedStoreManager(new FileSystemClientManager()))), new QuerySnapshotManager(stageId.getQueryId(), NOOP_SNAPSHOT_UTILS, TEST_SESSION));
    Set<Split> splits = createAndGetSplits(10);
    Multimap<InternalNode, Split> producerAssignment = nodeSelector.computeAssignments(splits, ImmutableList.copyOf(taskMap.values()), Optional.of(producerStage)).getAssignments();
    PlanFragment testFragmentConsumer = createTableScanPlanFragment("build", ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_CONSUMER, uuid, 1);
    StageExecutionPlan consumerStageExecutionPlan = new StageExecutionPlan(testFragmentConsumer, ImmutableMap.of(tableScanNodeId, new ConnectorAwareSplitSource(CONNECTOR_ID, createFixedSplitSource(0, TestingSplit::createRemoteSplit))), ImmutableList.of(), ImmutableMap.of(tableScanNodeId, new TableInfo(new QualifiedObjectName("test", TEST_SCHEMA, "test"), TupleDomain.all())));
    SqlStageExecution stage = createSqlStageExecution(stageId, new TestSqlTaskManager.MockLocationFactory().createStageLocation(stageId), consumerStageExecutionPlan.getFragment(), consumerStageExecutionPlan.getTables(), new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor), TEST_SESSION_REUSE, true, nodeTasks, remoteTaskExecutor, new NoOpFailureDetector(), new SplitSchedulerStats(), new DynamicFilterService(new LocalStateStoreProvider(new SeedStoreManager(new FileSystemClientManager()))), new QuerySnapshotManager(stageId.getQueryId(), NOOP_SNAPSHOT_UTILS, TEST_SESSION));
    Multimap<InternalNode, Split> consumerAssignment = nodeSelector.computeAssignments(splits, ImmutableList.copyOf(taskMap.values()), Optional.of(stage)).getAssignments();
    assertEquals(consumerAssignment.size(), consumerAssignment.size());
    for (InternalNode node : consumerAssignment.keySet()) {
        List<Split> splitList = new ArrayList<>();
        List<Split> splitList2 = new ArrayList<>();
        boolean b = producerAssignment.containsEntry(node, consumerAssignment.get(node));
        Collection<Split> producerSplits = producerAssignment.get(node);
        Collection<Split> consumerSplits = producerAssignment.get(node);
        producerSplits.forEach(s -> splitList.add(s));
        List<Split> splitList1 = splitList.get(0).getSplits();
        consumerSplits.forEach(s -> splitList2.add(s));
        int i = 0;
        for (Split split3 : splitList1) {
            SplitKey splitKey1 = new SplitKey(split3, TEST_CATALOG, TEST_SCHEMA, TEST_TABLE);
            SplitKey splitKey2 = new SplitKey(splitList1.get(i), TEST_CATALOG, TEST_SCHEMA, TEST_TABLE);
            boolean f = splitKey1.equals(splitKey2);
            assertEquals(true, f);
            i++;
        }
    }
}
Also used : NoOpFailureDetector(io.prestosql.failuredetector.NoOpFailureDetector) SplitKey(io.prestosql.execution.SplitKey) StageExecutionPlan(io.prestosql.sql.planner.StageExecutionPlan) StageId(io.prestosql.execution.StageId) ArrayList(java.util.ArrayList) TestPhasedExecutionSchedule.createTableScanPlanFragment(io.prestosql.execution.scheduler.TestPhasedExecutionSchedule.createTableScanPlanFragment) PlanFragment(io.prestosql.sql.planner.PlanFragment) ConnectorAwareSplitSource(io.prestosql.split.ConnectorAwareSplitSource) SqlStageExecution.createSqlStageExecution(io.prestosql.execution.SqlStageExecution.createSqlStageExecution) SqlStageExecution(io.prestosql.execution.SqlStageExecution) QuerySnapshotManager(io.prestosql.snapshot.QuerySnapshotManager) PlanNodeId(io.prestosql.spi.plan.PlanNodeId) LocalStateStoreProvider(io.prestosql.statestore.LocalStateStoreProvider) SeedStoreManager(io.prestosql.seedstore.SeedStoreManager) TableInfo(io.prestosql.execution.TableInfo) DynamicFilterService(io.prestosql.dynamicfilter.DynamicFilterService) UUID(java.util.UUID) NodeTaskMap(io.prestosql.execution.NodeTaskMap) QueryId(io.prestosql.spi.QueryId) QualifiedObjectName(io.prestosql.spi.connector.QualifiedObjectName) ColumnConstraint(io.prestosql.sql.planner.planprinter.IoPlanPrinter.ColumnConstraint) Constraint(io.prestosql.spi.connector.Constraint) FileSystemClientManager(io.prestosql.filesystem.FileSystemClientManager) SplitSchedulerStats(io.prestosql.execution.scheduler.SplitSchedulerStats) FinalizerService(io.prestosql.util.FinalizerService) InternalNode(io.prestosql.metadata.InternalNode) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) Split(io.prestosql.metadata.Split) TestingSplit(io.prestosql.testing.TestingSplit) MockRemoteTaskFactory(io.prestosql.execution.MockRemoteTaskFactory) Test(org.testng.annotations.Test) AbstractTestIntegrationSmokeTest(io.prestosql.tests.AbstractTestIntegrationSmokeTest)

Example 60 with InternalNode

use of io.prestosql.metadata.InternalNode in project hetu-core by openlookeng.

the class TestSqlStageExecution method testFinalStageInfoInternal.

private void testFinalStageInfoInternal() throws Exception {
    NodeTaskMap nodeTaskMap = new NodeTaskMap(new FinalizerService());
    StageId stageId = new StageId(new QueryId("query"), 0);
    SqlStageExecution stage = createSqlStageExecution(stageId, new MockLocationFactory().createStageLocation(stageId), createExchangePlanFragment(), ImmutableMap.of(), new MockRemoteTaskFactory(executor, scheduledExecutor), TEST_SESSION, true, nodeTaskMap, executor, new NoOpFailureDetector(), new SplitSchedulerStats(), new DynamicFilterService(new LocalStateStoreProvider(new SeedStoreManager(new FileSystemClientManager()))), new QuerySnapshotManager(stageId.getQueryId(), NOOP_SNAPSHOT_UTILS, TEST_SESSION));
    stage.setOutputBuffers(createInitialEmptyOutputBuffers(ARBITRARY));
    // add listener that fetches stage info when the final status is available
    SettableFuture<StageInfo> finalStageInfo = SettableFuture.create();
    stage.addFinalStageInfoListener(finalStageInfo::set);
    // in a background thread add a ton of tasks
    CountDownLatch latch = new CountDownLatch(1000);
    Future<?> addTasksTask = executor.submit(() -> {
        try {
            for (int i = 0; i < 1_000_000; i++) {
                if (Thread.interrupted()) {
                    return;
                }
                InternalNode node = new InternalNode("source" + i, URI.create("http://10.0.0." + (i / 10_000) + ":" + (i % 10_000)), NodeVersion.UNKNOWN, false);
                stage.scheduleTask(node, i, OptionalInt.empty());
                latch.countDown();
            }
        } finally {
            while (latch.getCount() > 0) {
                latch.countDown();
            }
        }
    });
    // wait for some tasks to be created, and then abort the query
    latch.await(1, MINUTES);
    assertFalse(stage.getStageInfo().getTasks().isEmpty());
    stage.abort();
    // once the final stage info is available, verify that it is complete
    StageInfo stageInfo = finalStageInfo.get(1, MINUTES);
    assertFalse(stageInfo.getTasks().isEmpty());
    assertTrue(stageInfo.isCompleteInfo());
    assertSame(stage.getStageInfo(), stageInfo);
    // cancel the background thread adding tasks
    addTasksTask.cancel(true);
}
Also used : NoOpFailureDetector(io.prestosql.failuredetector.NoOpFailureDetector) QueryId(io.prestosql.spi.QueryId) MockLocationFactory(io.prestosql.execution.TestSqlTaskManager.MockLocationFactory) CountDownLatch(java.util.concurrent.CountDownLatch) SqlStageExecution.createSqlStageExecution(io.prestosql.execution.SqlStageExecution.createSqlStageExecution) FileSystemClientManager(io.prestosql.filesystem.FileSystemClientManager) QuerySnapshotManager(io.prestosql.snapshot.QuerySnapshotManager) LocalStateStoreProvider(io.prestosql.statestore.LocalStateStoreProvider) SplitSchedulerStats(io.prestosql.execution.scheduler.SplitSchedulerStats) SeedStoreManager(io.prestosql.seedstore.SeedStoreManager) FinalizerService(io.prestosql.util.FinalizerService) DynamicFilterService(io.prestosql.dynamicfilter.DynamicFilterService) InternalNode(io.prestosql.metadata.InternalNode)

Aggregations

InternalNode (io.prestosql.metadata.InternalNode)61 Split (io.prestosql.metadata.Split)33 ConnectorSplit (io.prestosql.spi.connector.ConnectorSplit)23 Test (org.testng.annotations.Test)22 TestingSplit (io.prestosql.testing.TestingSplit)20 HashSet (java.util.HashSet)17 MockSplit (io.prestosql.MockSplit)16 PlanNodeId (io.prestosql.spi.plan.PlanNodeId)16 ImmutableList (com.google.common.collect.ImmutableList)15 HashMap (java.util.HashMap)15 RemoteTask (io.prestosql.execution.RemoteTask)14 LinkedHashSet (java.util.LinkedHashSet)14 NodeTaskMap (io.prestosql.execution.NodeTaskMap)13 ArrayList (java.util.ArrayList)12 Map (java.util.Map)12 MockRemoteTaskFactory (io.prestosql.execution.MockRemoteTaskFactory)11 SqlStageExecution (io.prestosql.execution.SqlStageExecution)10 ImmutableSet (com.google.common.collect.ImmutableSet)9 TaskId (io.prestosql.execution.TaskId)9 PrestoException (io.prestosql.spi.PrestoException)9