Search in sources :

Example 11 with SeedStoreManager

use of io.prestosql.seedstore.SeedStoreManager in project hetu-core by openlookeng.

the class TestNodeScheduler method testRuseExchangeComputeAssignments.

@Test
public void testRuseExchangeComputeAssignments() {
    setUpNodes();
    Split split = new Split(CONNECTOR_ID, new TestSplitLocallyAccessible(), Lifespan.taskWide());
    Set<Split> splits = ImmutableSet.of(split);
    NodeTaskMap newNodeTaskMap = new NodeTaskMap(new FinalizerService());
    StageId stageId = new StageId(new QueryId("query"), 0);
    UUID uuid = UUID.randomUUID();
    PlanFragment testFragmentProducer = createTableScanPlanFragment("build", ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_PRODUCER, uuid, 1);
    PlanNodeId tableScanNodeId = new PlanNodeId("plan_id");
    StageExecutionPlan producerStageExecutionPlan = new StageExecutionPlan(testFragmentProducer, ImmutableMap.of(tableScanNodeId, new ConnectorAwareSplitSource(CONNECTOR_ID, createFixedSplitSource(0, TestingSplit::createRemoteSplit))), ImmutableList.of(), ImmutableMap.of(tableScanNodeId, new TableInfo(new QualifiedObjectName("test", TEST_SCHEMA, "test"), TupleDomain.all())));
    SqlStageExecution producerStage = createSqlStageExecution(stageId, new TestSqlTaskManager.MockLocationFactory().createStageLocation(stageId), producerStageExecutionPlan.getFragment(), producerStageExecutionPlan.getTables(), new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor), TEST_SESSION_REUSE, true, newNodeTaskMap, remoteTaskExecutor, new NoOpFailureDetector(), new SplitSchedulerStats(), new DynamicFilterService(new LocalStateStoreProvider(new SeedStoreManager(new FileSystemClientManager()))), new QuerySnapshotManager(stageId.getQueryId(), NOOP_SNAPSHOT_UTILS, TEST_SESSION));
    Map.Entry<InternalNode, Split> producerAssignment = Iterables.getOnlyElement(nodeSelector.computeAssignments(splits, ImmutableList.copyOf(this.taskMap.values()), Optional.of(producerStage)).getAssignments().entries());
    PlanFragment testFragmentConsumer = createTableScanPlanFragment("build", ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_CONSUMER, uuid, 1);
    StageExecutionPlan consumerStageExecutionPlan = new StageExecutionPlan(testFragmentConsumer, ImmutableMap.of(tableScanNodeId, new ConnectorAwareSplitSource(CONNECTOR_ID, createFixedSplitSource(0, TestingSplit::createRemoteSplit))), ImmutableList.of(), ImmutableMap.of(tableScanNodeId, new TableInfo(new QualifiedObjectName("test", TEST_SCHEMA, "test"), TupleDomain.all())));
    SqlStageExecution stage = createSqlStageExecution(stageId, new TestSqlTaskManager.MockLocationFactory().createStageLocation(stageId), consumerStageExecutionPlan.getFragment(), consumerStageExecutionPlan.getTables(), new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor), TEST_SESSION_REUSE, true, newNodeTaskMap, remoteTaskExecutor, new NoOpFailureDetector(), new SplitSchedulerStats(), new DynamicFilterService(new LocalStateStoreProvider(new SeedStoreManager(new FileSystemClientManager()))), new QuerySnapshotManager(stageId.getQueryId(), NOOP_SNAPSHOT_UTILS, TEST_SESSION));
    Map.Entry<InternalNode, Split> consumerAssignment = Iterables.getOnlyElement(nodeSelector.computeAssignments(splits, ImmutableList.copyOf(this.taskMap.values()), Optional.of(stage)).getAssignments().entries());
    Split producerSplit = producerAssignment.getValue();
    Split consumerSplit = consumerAssignment.getValue();
    SplitKey splitKeyProducer = new SplitKey(producerSplit, producerSplit.getCatalogName().getCatalogName(), TEST_SCHEMA, "test");
    SplitKey splitKeyConsumer = new SplitKey(producerSplit, consumerSplit.getCatalogName().getCatalogName(), TEST_SCHEMA, "test");
    if (splitKeyProducer.equals(splitKeyConsumer)) {
        assertEquals(true, true);
    } else {
        assertEquals(false, true);
    }
}
Also used : NoOpFailureDetector(io.prestosql.failuredetector.NoOpFailureDetector) SplitKey(io.prestosql.execution.SplitKey) StageExecutionPlan(io.prestosql.sql.planner.StageExecutionPlan) StageId(io.prestosql.execution.StageId) TestPhasedExecutionSchedule.createTableScanPlanFragment(io.prestosql.execution.scheduler.TestPhasedExecutionSchedule.createTableScanPlanFragment) PlanFragment(io.prestosql.sql.planner.PlanFragment) ConnectorAwareSplitSource(io.prestosql.split.ConnectorAwareSplitSource) SqlStageExecution.createSqlStageExecution(io.prestosql.execution.SqlStageExecution.createSqlStageExecution) SqlStageExecution(io.prestosql.execution.SqlStageExecution) QuerySnapshotManager(io.prestosql.snapshot.QuerySnapshotManager) PlanNodeId(io.prestosql.spi.plan.PlanNodeId) LocalStateStoreProvider(io.prestosql.statestore.LocalStateStoreProvider) SeedStoreManager(io.prestosql.seedstore.SeedStoreManager) TableInfo(io.prestosql.execution.TableInfo) DynamicFilterService(io.prestosql.dynamicfilter.DynamicFilterService) UUID(java.util.UUID) NodeTaskMap(io.prestosql.execution.NodeTaskMap) QueryId(io.prestosql.spi.QueryId) QualifiedObjectName(io.prestosql.spi.connector.QualifiedObjectName) FileSystemClientManager(io.prestosql.filesystem.FileSystemClientManager) FinalizerService(io.prestosql.util.FinalizerService) InternalNode(io.prestosql.metadata.InternalNode) MockSplit(io.prestosql.MockSplit) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) Split(io.prestosql.metadata.Split) TestingSplit(io.prestosql.testing.TestingSplit) SplitCacheMap(io.prestosql.execution.SplitCacheMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) NodeTaskMap(io.prestosql.execution.NodeTaskMap) MockRemoteTaskFactory(io.prestosql.execution.MockRemoteTaskFactory) Test(org.testng.annotations.Test)

Example 12 with SeedStoreManager

use of io.prestosql.seedstore.SeedStoreManager in project hetu-core by openlookeng.

the class TestNodeScheduler method testRuseExchangeComputeAssignmentsSplitsNotMatchProdConsumer.

@Test
public void testRuseExchangeComputeAssignmentsSplitsNotMatchProdConsumer() {
    setUpNodes();
    Split split = new Split(CONNECTOR_ID, new TestSplitLocallyAccessible(), Lifespan.taskWide());
    Set<Split> splits = ImmutableSet.of(split);
    NodeTaskMap newNodeTaskMap = new NodeTaskMap(new FinalizerService());
    StageId stageId = new StageId(new QueryId("query"), 0);
    UUID uuid = UUID.randomUUID();
    PlanFragment testFragmentProducer = createTableScanPlanFragment("build", ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_PRODUCER, uuid, 1);
    PlanNodeId tableScanNodeId = new PlanNodeId("plan_id");
    StageExecutionPlan producerStageExecutionPlan = new StageExecutionPlan(testFragmentProducer, ImmutableMap.of(tableScanNodeId, new ConnectorAwareSplitSource(CONNECTOR_ID, createFixedSplitSource(0, TestingSplit::createRemoteSplit))), ImmutableList.of(), ImmutableMap.of(tableScanNodeId, new TableInfo(new QualifiedObjectName("test", TEST_SCHEMA, "test"), TupleDomain.all())));
    SqlStageExecution producerStage = createSqlStageExecution(stageId, new TestSqlTaskManager.MockLocationFactory().createStageLocation(stageId), producerStageExecutionPlan.getFragment(), producerStageExecutionPlan.getTables(), new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor), TEST_SESSION_REUSE, true, newNodeTaskMap, remoteTaskExecutor, new NoOpFailureDetector(), new SplitSchedulerStats(), new DynamicFilterService(new LocalStateStoreProvider(new SeedStoreManager(new FileSystemClientManager()))), new QuerySnapshotManager(stageId.getQueryId(), NOOP_SNAPSHOT_UTILS, TEST_SESSION));
    nodeSelector.computeAssignments(splits, ImmutableList.copyOf(this.taskMap.values()), Optional.of(producerStage)).getAssignments().entries();
    // Consumer
    Split splitConsumer = new Split(CONNECTOR_ID, new TestSplitLocallyAccessibleDifferentIndex(), Lifespan.taskWide());
    Set<Split> splitConsumers = ImmutableSet.of(splitConsumer);
    PlanFragment testFragmentConsumer = createTableScanPlanFragment("build", ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_CONSUMER, uuid, 1);
    StageExecutionPlan consumerStageExecutionPlan = new StageExecutionPlan(testFragmentConsumer, ImmutableMap.of(tableScanNodeId, new ConnectorAwareSplitSource(CONNECTOR_ID, createFixedSplitSource(0, TestingSplit::createRemoteSplit))), ImmutableList.of(), ImmutableMap.of(tableScanNodeId, new TableInfo(new QualifiedObjectName("test", TEST_SCHEMA, "test"), TupleDomain.all())));
    SqlStageExecution stage = createSqlStageExecution(stageId, new TestSqlTaskManager.MockLocationFactory().createStageLocation(stageId), consumerStageExecutionPlan.getFragment(), consumerStageExecutionPlan.getTables(), new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor), TEST_SESSION_REUSE, true, newNodeTaskMap, remoteTaskExecutor, new NoOpFailureDetector(), new SplitSchedulerStats(), new DynamicFilterService(new LocalStateStoreProvider(new SeedStoreManager(new FileSystemClientManager()))), new QuerySnapshotManager(stageId.getQueryId(), NOOP_SNAPSHOT_UTILS, TEST_SESSION));
    try {
        nodeSelector.computeAssignments(splitConsumers, ImmutableList.copyOf(this.taskMap.values()), Optional.of(stage)).getAssignments().entries();
    } catch (PrestoException e) {
        assertEquals("Producer & consumer splits are not same", e.getMessage());
        return;
    }
    assertEquals(false, true);
}
Also used : NoOpFailureDetector(io.prestosql.failuredetector.NoOpFailureDetector) StageExecutionPlan(io.prestosql.sql.planner.StageExecutionPlan) StageId(io.prestosql.execution.StageId) PrestoException(io.prestosql.spi.PrestoException) TestPhasedExecutionSchedule.createTableScanPlanFragment(io.prestosql.execution.scheduler.TestPhasedExecutionSchedule.createTableScanPlanFragment) PlanFragment(io.prestosql.sql.planner.PlanFragment) ConnectorAwareSplitSource(io.prestosql.split.ConnectorAwareSplitSource) SqlStageExecution.createSqlStageExecution(io.prestosql.execution.SqlStageExecution.createSqlStageExecution) SqlStageExecution(io.prestosql.execution.SqlStageExecution) QuerySnapshotManager(io.prestosql.snapshot.QuerySnapshotManager) PlanNodeId(io.prestosql.spi.plan.PlanNodeId) LocalStateStoreProvider(io.prestosql.statestore.LocalStateStoreProvider) SeedStoreManager(io.prestosql.seedstore.SeedStoreManager) TableInfo(io.prestosql.execution.TableInfo) DynamicFilterService(io.prestosql.dynamicfilter.DynamicFilterService) UUID(java.util.UUID) NodeTaskMap(io.prestosql.execution.NodeTaskMap) QueryId(io.prestosql.spi.QueryId) QualifiedObjectName(io.prestosql.spi.connector.QualifiedObjectName) FileSystemClientManager(io.prestosql.filesystem.FileSystemClientManager) FinalizerService(io.prestosql.util.FinalizerService) MockSplit(io.prestosql.MockSplit) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) Split(io.prestosql.metadata.Split) TestingSplit(io.prestosql.testing.TestingSplit) MockRemoteTaskFactory(io.prestosql.execution.MockRemoteTaskFactory) Test(org.testng.annotations.Test)

Example 13 with SeedStoreManager

use of io.prestosql.seedstore.SeedStoreManager in project hetu-core by openlookeng.

the class TestHiveIntegrationSmokeTest method testRuseExchangeGroupSplitsMatchingBetweenProducerConsumer.

@Test
public void testRuseExchangeGroupSplitsMatchingBetweenProducerConsumer() {
    setUpNodes();
    NodeTaskMap nodeTasks = new NodeTaskMap(new FinalizerService());
    StageId stageId = new StageId(new QueryId("query"), 0);
    UUID uuid = UUID.randomUUID();
    PlanFragment testFragmentProducer = createTableScanPlanFragment("build", ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_PRODUCER, uuid, 1);
    PlanNodeId tableScanNodeId = new PlanNodeId("plan_id");
    StageExecutionPlan producerStageExecutionPlan = new StageExecutionPlan(testFragmentProducer, ImmutableMap.of(tableScanNodeId, new ConnectorAwareSplitSource(CONNECTOR_ID, createFixedSplitSource(0, TestingSplit::createRemoteSplit))), ImmutableList.of(), ImmutableMap.of(tableScanNodeId, new TableInfo(new QualifiedObjectName("test", TEST_SCHEMA, "test"), TupleDomain.all())));
    SqlStageExecution producerStage = createSqlStageExecution(stageId, new TestSqlTaskManager.MockLocationFactory().createStageLocation(stageId), producerStageExecutionPlan.getFragment(), producerStageExecutionPlan.getTables(), new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor), TEST_SESSION_REUSE, true, nodeTasks, remoteTaskExecutor, new NoOpFailureDetector(), new SplitSchedulerStats(), new DynamicFilterService(new LocalStateStoreProvider(new SeedStoreManager(new FileSystemClientManager()))), new QuerySnapshotManager(stageId.getQueryId(), NOOP_SNAPSHOT_UTILS, TEST_SESSION));
    Set<Split> splits = createAndGetSplits(10);
    Multimap<InternalNode, Split> producerAssignment = nodeSelector.computeAssignments(splits, ImmutableList.copyOf(taskMap.values()), Optional.of(producerStage)).getAssignments();
    PlanFragment testFragmentConsumer = createTableScanPlanFragment("build", ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_CONSUMER, uuid, 1);
    StageExecutionPlan consumerStageExecutionPlan = new StageExecutionPlan(testFragmentConsumer, ImmutableMap.of(tableScanNodeId, new ConnectorAwareSplitSource(CONNECTOR_ID, createFixedSplitSource(0, TestingSplit::createRemoteSplit))), ImmutableList.of(), ImmutableMap.of(tableScanNodeId, new TableInfo(new QualifiedObjectName("test", TEST_SCHEMA, "test"), TupleDomain.all())));
    SqlStageExecution stage = createSqlStageExecution(stageId, new TestSqlTaskManager.MockLocationFactory().createStageLocation(stageId), consumerStageExecutionPlan.getFragment(), consumerStageExecutionPlan.getTables(), new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor), TEST_SESSION_REUSE, true, nodeTasks, remoteTaskExecutor, new NoOpFailureDetector(), new SplitSchedulerStats(), new DynamicFilterService(new LocalStateStoreProvider(new SeedStoreManager(new FileSystemClientManager()))), new QuerySnapshotManager(stageId.getQueryId(), NOOP_SNAPSHOT_UTILS, TEST_SESSION));
    Multimap<InternalNode, Split> consumerAssignment = nodeSelector.computeAssignments(splits, ImmutableList.copyOf(taskMap.values()), Optional.of(stage)).getAssignments();
    assertEquals(consumerAssignment.size(), consumerAssignment.size());
    for (InternalNode node : consumerAssignment.keySet()) {
        List<Split> splitList = new ArrayList<>();
        List<Split> splitList2 = new ArrayList<>();
        boolean b = producerAssignment.containsEntry(node, consumerAssignment.get(node));
        Collection<Split> producerSplits = producerAssignment.get(node);
        Collection<Split> consumerSplits = producerAssignment.get(node);
        producerSplits.forEach(s -> splitList.add(s));
        List<Split> splitList1 = splitList.get(0).getSplits();
        consumerSplits.forEach(s -> splitList2.add(s));
        int i = 0;
        for (Split split3 : splitList1) {
            SplitKey splitKey1 = new SplitKey(split3, TEST_CATALOG, TEST_SCHEMA, TEST_TABLE);
            SplitKey splitKey2 = new SplitKey(splitList1.get(i), TEST_CATALOG, TEST_SCHEMA, TEST_TABLE);
            boolean f = splitKey1.equals(splitKey2);
            assertEquals(true, f);
            i++;
        }
    }
}
Also used : NoOpFailureDetector(io.prestosql.failuredetector.NoOpFailureDetector) SplitKey(io.prestosql.execution.SplitKey) StageExecutionPlan(io.prestosql.sql.planner.StageExecutionPlan) StageId(io.prestosql.execution.StageId) ArrayList(java.util.ArrayList) TestPhasedExecutionSchedule.createTableScanPlanFragment(io.prestosql.execution.scheduler.TestPhasedExecutionSchedule.createTableScanPlanFragment) PlanFragment(io.prestosql.sql.planner.PlanFragment) ConnectorAwareSplitSource(io.prestosql.split.ConnectorAwareSplitSource) SqlStageExecution.createSqlStageExecution(io.prestosql.execution.SqlStageExecution.createSqlStageExecution) SqlStageExecution(io.prestosql.execution.SqlStageExecution) QuerySnapshotManager(io.prestosql.snapshot.QuerySnapshotManager) PlanNodeId(io.prestosql.spi.plan.PlanNodeId) LocalStateStoreProvider(io.prestosql.statestore.LocalStateStoreProvider) SeedStoreManager(io.prestosql.seedstore.SeedStoreManager) TableInfo(io.prestosql.execution.TableInfo) DynamicFilterService(io.prestosql.dynamicfilter.DynamicFilterService) UUID(java.util.UUID) NodeTaskMap(io.prestosql.execution.NodeTaskMap) QueryId(io.prestosql.spi.QueryId) QualifiedObjectName(io.prestosql.spi.connector.QualifiedObjectName) ColumnConstraint(io.prestosql.sql.planner.planprinter.IoPlanPrinter.ColumnConstraint) Constraint(io.prestosql.spi.connector.Constraint) FileSystemClientManager(io.prestosql.filesystem.FileSystemClientManager) SplitSchedulerStats(io.prestosql.execution.scheduler.SplitSchedulerStats) FinalizerService(io.prestosql.util.FinalizerService) InternalNode(io.prestosql.metadata.InternalNode) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) Split(io.prestosql.metadata.Split) TestingSplit(io.prestosql.testing.TestingSplit) MockRemoteTaskFactory(io.prestosql.execution.MockRemoteTaskFactory) Test(org.testng.annotations.Test) AbstractTestIntegrationSmokeTest(io.prestosql.tests.AbstractTestIntegrationSmokeTest)

Example 14 with SeedStoreManager

use of io.prestosql.seedstore.SeedStoreManager in project hetu-core by openlookeng.

the class LocalQueryRunner method createDrivers.

private List<Driver> createDrivers(Session session, Plan plan, OutputFactory outputFactory, TaskContext taskContext) {
    if (printPlan) {
        System.out.println(PlanPrinter.textLogicalPlan(plan.getRoot(), plan.getTypes(), metadata, plan.getStatsAndCosts(), session, 0, false));
    }
    SubPlan subplan = planFragmenter.createSubPlans(session, plan, true, WarningCollector.NOOP);
    if (!subplan.getChildren().isEmpty()) {
        throw new AssertionError("Expected subplan to have no children");
    }
    NodeInfo nodeInfo = new NodeInfo("test");
    FileSystemClientManager fileSystemClientManager = new FileSystemClientManager();
    SeedStoreManager seedStoreManager = new SeedStoreManager(fileSystemClientManager);
    StateStoreProvider stateStoreProvider = new LocalStateStoreProvider(seedStoreManager);
    LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner(metadata, new TypeAnalyzer(sqlParser, metadata), Optional.empty(), pageSourceManager, indexManager, nodePartitioningManager, pageSinkManager, null, expressionCompiler, pageFunctionCompiler, joinFilterFunctionCompiler, new IndexJoinLookupStats(), this.taskManagerConfig, spillerFactory, singleStreamSpillerFactory, partitioningSpillerFactory, new PagesIndex.TestingFactory(false), joinCompiler, new LookupJoinOperators(), new OrderingCompiler(), nodeInfo, stateStoreProvider, new StateStoreListenerManager(stateStoreProvider), new DynamicFilterCacheManager(), heuristicIndexerManager, cubeManager);
    // plan query
    StageExecutionDescriptor stageExecutionDescriptor = subplan.getFragment().getStageExecutionDescriptor();
    LocalExecutionPlan localExecutionPlan = executionPlanner.plan(taskContext, stageExecutionDescriptor, subplan.getFragment().getRoot(), subplan.getFragment().getPartitioningScheme().getOutputLayout(), plan.getTypes(), subplan.getFragment().getPartitionedSources(), null, outputFactory, Optional.empty(), Optional.empty(), null);
    // generate sources
    List<TaskSource> sources = new ArrayList<>();
    long sequenceId = 0;
    for (TableScanNode tableScan : findTableScanNodes(subplan.getFragment().getRoot())) {
        TableHandle table = tableScan.getTable();
        SplitSource splitSource = splitManager.getSplits(session, table, stageExecutionDescriptor.isScanGroupedExecution(tableScan.getId()) ? GROUPED_SCHEDULING : UNGROUPED_SCHEDULING, null, Optional.empty(), Collections.emptyMap(), ImmutableSet.of(), tableScan.getStrategy() != ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, tableScan.getId());
        ImmutableSet.Builder<ScheduledSplit> scheduledSplits = ImmutableSet.builder();
        while (!splitSource.isFinished()) {
            for (Split split : getNextBatch(splitSource)) {
                scheduledSplits.add(new ScheduledSplit(sequenceId++, tableScan.getId(), split));
            }
        }
        sources.add(new TaskSource(tableScan.getId(), scheduledSplits.build(), true));
    }
    // create drivers
    List<Driver> drivers = new ArrayList<>();
    Map<PlanNodeId, DriverFactory> driverFactoriesBySource = new HashMap<>();
    for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
        for (int i = 0; i < driverFactory.getDriverInstances().orElse(1); i++) {
            if (driverFactory.getSourceId().isPresent()) {
                checkState(driverFactoriesBySource.put(driverFactory.getSourceId().get(), driverFactory) == null);
            } else {
                DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), false).addDriverContext();
                Driver driver = driverFactory.createDriver(driverContext);
                drivers.add(driver);
            }
        }
    }
    // add sources to the drivers
    ImmutableSet<PlanNodeId> partitionedSources = ImmutableSet.copyOf(subplan.getFragment().getPartitionedSources());
    for (TaskSource source : sources) {
        DriverFactory driverFactory = driverFactoriesBySource.get(source.getPlanNodeId());
        checkState(driverFactory != null);
        boolean partitioned = partitionedSources.contains(driverFactory.getSourceId().get());
        for (ScheduledSplit split : source.getSplits()) {
            DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), partitioned).addDriverContext();
            Driver driver = driverFactory.createDriver(driverContext);
            driver.updateSource(new TaskSource(split.getPlanNodeId(), ImmutableSet.of(split), true));
            drivers.add(driver);
        }
    }
    for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
        driverFactory.noMoreDrivers();
    }
    return ImmutableList.copyOf(drivers);
}
Also used : DriverContext(io.prestosql.operator.DriverContext) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) StateStoreListenerManager(io.prestosql.statestore.listener.StateStoreListenerManager) Driver(io.prestosql.operator.Driver) PagesIndex(io.prestosql.operator.PagesIndex) LocalStateStoreProvider(io.prestosql.statestore.LocalStateStoreProvider) StateStoreProvider(io.prestosql.statestore.StateStoreProvider) LocalStateStoreProvider(io.prestosql.statestore.LocalStateStoreProvider) PlanNodeId(io.prestosql.spi.plan.PlanNodeId) SeedStoreManager(io.prestosql.seedstore.SeedStoreManager) ImmutableSet(com.google.common.collect.ImmutableSet) OrderingCompiler(io.prestosql.sql.gen.OrderingCompiler) DriverFactory(io.prestosql.operator.DriverFactory) LookupJoinOperators(io.prestosql.operator.LookupJoinOperators) DynamicFilterCacheManager(io.prestosql.dynamicfilter.DynamicFilterCacheManager) ScheduledSplit(io.prestosql.execution.ScheduledSplit) LocalExecutionPlanner(io.prestosql.sql.planner.LocalExecutionPlanner) IndexJoinLookupStats(io.prestosql.operator.index.IndexJoinLookupStats) StageExecutionDescriptor(io.prestosql.operator.StageExecutionDescriptor) TypeAnalyzer(io.prestosql.sql.planner.TypeAnalyzer) FileSystemClientManager(io.prestosql.filesystem.FileSystemClientManager) LocalExecutionPlan(io.prestosql.sql.planner.LocalExecutionPlanner.LocalExecutionPlan) TableScanNode(io.prestosql.spi.plan.TableScanNode) NodeInfo(io.airlift.node.NodeInfo) TableHandle(io.prestosql.spi.metadata.TableHandle) SplitSource(io.prestosql.split.SplitSource) Split(io.prestosql.metadata.Split) ScheduledSplit(io.prestosql.execution.ScheduledSplit) SubPlan(io.prestosql.sql.planner.SubPlan) TaskSource(io.prestosql.execution.TaskSource)

Example 15 with SeedStoreManager

use of io.prestosql.seedstore.SeedStoreManager in project hetu-core by openlookeng.

the class TestSqlStageExecution method testFinalStageInfoInternal.

private void testFinalStageInfoInternal() throws Exception {
    NodeTaskMap nodeTaskMap = new NodeTaskMap(new FinalizerService());
    StageId stageId = new StageId(new QueryId("query"), 0);
    SqlStageExecution stage = createSqlStageExecution(stageId, new MockLocationFactory().createStageLocation(stageId), createExchangePlanFragment(), ImmutableMap.of(), new MockRemoteTaskFactory(executor, scheduledExecutor), TEST_SESSION, true, nodeTaskMap, executor, new NoOpFailureDetector(), new SplitSchedulerStats(), new DynamicFilterService(new LocalStateStoreProvider(new SeedStoreManager(new FileSystemClientManager()))), new QuerySnapshotManager(stageId.getQueryId(), NOOP_SNAPSHOT_UTILS, TEST_SESSION));
    stage.setOutputBuffers(createInitialEmptyOutputBuffers(ARBITRARY));
    // add listener that fetches stage info when the final status is available
    SettableFuture<StageInfo> finalStageInfo = SettableFuture.create();
    stage.addFinalStageInfoListener(finalStageInfo::set);
    // in a background thread add a ton of tasks
    CountDownLatch latch = new CountDownLatch(1000);
    Future<?> addTasksTask = executor.submit(() -> {
        try {
            for (int i = 0; i < 1_000_000; i++) {
                if (Thread.interrupted()) {
                    return;
                }
                InternalNode node = new InternalNode("source" + i, URI.create("http://10.0.0." + (i / 10_000) + ":" + (i % 10_000)), NodeVersion.UNKNOWN, false);
                stage.scheduleTask(node, i, OptionalInt.empty());
                latch.countDown();
            }
        } finally {
            while (latch.getCount() > 0) {
                latch.countDown();
            }
        }
    });
    // wait for some tasks to be created, and then abort the query
    latch.await(1, MINUTES);
    assertFalse(stage.getStageInfo().getTasks().isEmpty());
    stage.abort();
    // once the final stage info is available, verify that it is complete
    StageInfo stageInfo = finalStageInfo.get(1, MINUTES);
    assertFalse(stageInfo.getTasks().isEmpty());
    assertTrue(stageInfo.isCompleteInfo());
    assertSame(stage.getStageInfo(), stageInfo);
    // cancel the background thread adding tasks
    addTasksTask.cancel(true);
}
Also used : NoOpFailureDetector(io.prestosql.failuredetector.NoOpFailureDetector) QueryId(io.prestosql.spi.QueryId) MockLocationFactory(io.prestosql.execution.TestSqlTaskManager.MockLocationFactory) CountDownLatch(java.util.concurrent.CountDownLatch) SqlStageExecution.createSqlStageExecution(io.prestosql.execution.SqlStageExecution.createSqlStageExecution) FileSystemClientManager(io.prestosql.filesystem.FileSystemClientManager) QuerySnapshotManager(io.prestosql.snapshot.QuerySnapshotManager) LocalStateStoreProvider(io.prestosql.statestore.LocalStateStoreProvider) SplitSchedulerStats(io.prestosql.execution.scheduler.SplitSchedulerStats) SeedStoreManager(io.prestosql.seedstore.SeedStoreManager) FinalizerService(io.prestosql.util.FinalizerService) DynamicFilterService(io.prestosql.dynamicfilter.DynamicFilterService) InternalNode(io.prestosql.metadata.InternalNode)

Aggregations

SeedStoreManager (io.prestosql.seedstore.SeedStoreManager)17 LocalStateStoreProvider (io.prestosql.statestore.LocalStateStoreProvider)14 FileSystemClientManager (io.prestosql.filesystem.FileSystemClientManager)13 Split (io.prestosql.metadata.Split)9 PlanNodeId (io.prestosql.spi.plan.PlanNodeId)9 DynamicFilterService (io.prestosql.dynamicfilter.DynamicFilterService)8 SqlStageExecution.createSqlStageExecution (io.prestosql.execution.SqlStageExecution.createSqlStageExecution)8 NoOpFailureDetector (io.prestosql.failuredetector.NoOpFailureDetector)8 QuerySnapshotManager (io.prestosql.snapshot.QuerySnapshotManager)8 QueryId (io.prestosql.spi.QueryId)8 FinalizerService (io.prestosql.util.FinalizerService)8 Test (org.testng.annotations.Test)8 MockRemoteTaskFactory (io.prestosql.execution.MockRemoteTaskFactory)7 SqlStageExecution (io.prestosql.execution.SqlStageExecution)7 StageId (io.prestosql.execution.StageId)7 PlanFragment (io.prestosql.sql.planner.PlanFragment)7 TestingSplit (io.prestosql.testing.TestingSplit)7 UUID (java.util.UUID)7 NodeTaskMap (io.prestosql.execution.NodeTaskMap)6 TableInfo (io.prestosql.execution.TableInfo)6