use of io.prestosql.sql.planner.NodePartitioningManager in project hetu-core by openlookeng.
the class TaskTestUtils method createTestingPlanner.
public static LocalExecutionPlanner createTestingPlanner() {
Metadata metadata = createTestMetadataManager();
PageSourceManager pageSourceManager = new PageSourceManager();
HetuMetaStoreManager hetuMetaStoreManager = new HetuMetaStoreManager();
FeaturesConfig featuresConfig = new FeaturesConfig();
CubeManager cubeManager = new CubeManager(featuresConfig, hetuMetaStoreManager);
pageSourceManager.addConnectorPageSourceProvider(CONNECTOR_ID, new TestingPageSourceProvider());
// we don't start the finalizer so nothing will be collected, which is ok for a test
FinalizerService finalizerService = new FinalizerService();
NodeScheduler nodeScheduler = new NodeScheduler(new LegacyNetworkTopology(), new InMemoryNodeManager(), new NodeSchedulerConfig().setIncludeCoordinator(true), new NodeTaskMap(finalizerService));
NodePartitioningManager nodePartitioningManager = new NodePartitioningManager(nodeScheduler);
PageFunctionCompiler pageFunctionCompiler = new PageFunctionCompiler(metadata, 0);
NodeInfo nodeInfo = new NodeInfo("test");
FileSystemClientManager fileSystemClientManager = new FileSystemClientManager();
SeedStoreManager seedStoreManager = new SeedStoreManager(fileSystemClientManager);
StateStoreProvider stateStoreProvider = new LocalStateStoreProvider(seedStoreManager);
HeuristicIndexerManager heuristicIndexerManager = new HeuristicIndexerManager(new FileSystemClientManager(), new HetuMetaStoreManager());
return new LocalExecutionPlanner(metadata, new TypeAnalyzer(new SqlParser(), metadata), Optional.empty(), pageSourceManager, new IndexManager(), nodePartitioningManager, new PageSinkManager(), new MockExchangeClientSupplier(), new ExpressionCompiler(metadata, pageFunctionCompiler), pageFunctionCompiler, new JoinFilterFunctionCompiler(metadata), new IndexJoinLookupStats(), new TaskManagerConfig(), new GenericSpillerFactory((types, spillContext, memoryContext) -> {
throw new UnsupportedOperationException();
}), (types, spillContext, memoryContext) -> {
throw new UnsupportedOperationException();
}, (types, partitionFunction, spillContext, memoryContext) -> {
throw new UnsupportedOperationException();
}, new PagesIndex.TestingFactory(false), new JoinCompiler(metadata), new LookupJoinOperators(), new OrderingCompiler(), nodeInfo, stateStoreProvider, new StateStoreListenerManager(stateStoreProvider), new DynamicFilterCacheManager(), heuristicIndexerManager, cubeManager);
}
use of io.prestosql.sql.planner.NodePartitioningManager in project hetu-core by openlookeng.
the class SqlQueryScheduler method createStages.
private List<SqlStageExecution> createStages(ExchangeLocationsConsumer parent, AtomicInteger nextStageId, LocationFactory locationFactory, StageExecutionPlan plan, NodeScheduler nodeScheduler, RemoteTaskFactory remoteTaskFactory, Session session, int splitBatchSize, BiFunction<PartitioningHandle, Integer, NodePartitionMap> partitioningCache, NodePartitioningManager nodePartitioningManager, ExecutorService queryExecutor, ScheduledExecutorService schedulerExecutor, FailureDetector failureDetector, NodeTaskMap nodeTaskMap, ImmutableMap.Builder<StageId, StageScheduler> stageSchedulers, ImmutableMap.Builder<StageId, StageLinkage> stageLinkages, boolean isSnapshotEnabled, QuerySnapshotManager snapshotManager, Map<StageId, Integer> stageTaskCounts) {
ImmutableList.Builder<SqlStageExecution> localStages = ImmutableList.builder();
StageId stageId = new StageId(queryStateMachine.getQueryId(), nextStageId.getAndIncrement());
SqlStageExecution stageExecution = createSqlStageExecution(stageId, locationFactory.createStageLocation(stageId), plan.getFragment(), plan.getTables(), remoteTaskFactory, session, summarizeTaskInfo, nodeTaskMap, queryExecutor, failureDetector, schedulerStats, dynamicFilterService, snapshotManager);
localStages.add(stageExecution);
Optional<int[]> bucketToPartition;
PartitioningHandle partitioningHandle = plan.getFragment().getPartitioning();
boolean keepConsumerOnFeederNodes = !plan.getFragment().getFeederCTEId().isPresent() && plan.getFragment().getFeederCTEParentId().isPresent();
if (partitioningHandle.equals(SOURCE_DISTRIBUTION)) {
// nodes are selected dynamically based on the constraints of the splits and the system load
Entry<PlanNodeId, SplitSource> entry = Iterables.getOnlyElement(plan.getSplitSources().entrySet());
PlanNodeId planNodeId = entry.getKey();
SplitSource splitSource = entry.getValue();
CatalogName catalogName = splitSource.getCatalogName();
if (isInternalSystemConnector(catalogName)) {
catalogName = null;
}
NodeSelector nodeSelector = nodeScheduler.createNodeSelector(catalogName, keepConsumerOnFeederNodes, feederScheduledNodes);
if (isSnapshotEnabled) {
// When snapshot is enabled, then no task can be added after the query started running,
// otherwise assumptions about how many "input channels" may be broken.
nodeSelector.lockDownNodes();
}
SplitPlacementPolicy placementPolicy = new DynamicSplitPlacementPolicy(nodeSelector, stageExecution::getAllTasks);
checkArgument(!plan.getFragment().getStageExecutionDescriptor().isStageGroupedExecution());
stageSchedulers.put(stageId, newSourcePartitionedSchedulerAsStageScheduler(stageExecution, planNodeId, splitSource, placementPolicy, splitBatchSize, session, heuristicIndexerManager));
bucketToPartition = Optional.of(new int[1]);
} else if (partitioningHandle.equals(SCALED_WRITER_DISTRIBUTION)) {
bucketToPartition = Optional.of(new int[1]);
} else {
Map<PlanNodeId, SplitSource> splitSources = plan.getSplitSources();
if (!splitSources.isEmpty()) {
// contains local source
List<PlanNodeId> schedulingOrder = plan.getFragment().getPartitionedSources();
CatalogName catalogName = partitioningHandle.getConnectorId().orElseThrow(IllegalStateException::new);
List<ConnectorPartitionHandle> connectorPartitionHandles;
boolean groupedExecutionForStage = plan.getFragment().getStageExecutionDescriptor().isStageGroupedExecution();
if (groupedExecutionForStage) {
connectorPartitionHandles = nodePartitioningManager.listPartitionHandles(session, partitioningHandle);
checkState(!ImmutableList.of(NOT_PARTITIONED).equals(connectorPartitionHandles));
} else {
connectorPartitionHandles = ImmutableList.of(NOT_PARTITIONED);
}
BucketNodeMap bucketNodeMap;
List<InternalNode> stageNodeList;
if (plan.getFragment().getRemoteSourceNodes().stream().allMatch(node -> node.getExchangeType() == REPLICATE)) {
// no remote source
boolean dynamicLifespanSchedule = plan.getFragment().getStageExecutionDescriptor().isDynamicLifespanSchedule();
if (isSnapshotEnabled) {
NodeSelector nodeSelector = nodeScheduler.createNodeSelector(catalogName, keepConsumerOnFeederNodes, feederScheduledNodes);
int nodeCount;
if (stageTaskCounts != null) {
// Resuming: need to create same number of tasks as old stage.
nodeCount = stageTaskCounts.get(stageId);
} else {
// Scheduling: reserve some nodes for resuming
nodeCount = calculateTaskCount(nodeSelector.selectableNodeCount());
}
stageNodeList = new ArrayList<>(nodeSelector.selectRandomNodes(nodeCount));
checkCondition(stageNodeList.size() == nodeCount, NO_NODES_AVAILABLE, "Snapshot: not enough worker nodes to resume expected number of tasks: " + nodeCount);
// Make sure bucketNodeMap uses the same node list
bucketNodeMap = nodePartitioningManager.getBucketNodeMap(session, partitioningHandle, dynamicLifespanSchedule, stageNodeList);
} else {
bucketNodeMap = nodePartitioningManager.getBucketNodeMap(session, partitioningHandle, dynamicLifespanSchedule);
stageNodeList = new ArrayList<>(nodeScheduler.createNodeSelector(catalogName, keepConsumerOnFeederNodes, feederScheduledNodes).allNodes());
}
// verify execution is consistent with planner's decision on dynamic lifespan schedule
verify(bucketNodeMap.isDynamic() == dynamicLifespanSchedule);
Collections.shuffle(stageNodeList);
bucketToPartition = Optional.empty();
} else {
// cannot use dynamic lifespan schedule
verify(!plan.getFragment().getStageExecutionDescriptor().isDynamicLifespanSchedule());
// remote source requires nodePartitionMap
NodePartitionMap nodePartitionMap = partitioningCache.apply(plan.getFragment().getPartitioning(), stageTaskCounts == null ? null : stageTaskCounts.get(stageId));
if (groupedExecutionForStage) {
checkState(connectorPartitionHandles.size() == nodePartitionMap.getBucketToPartition().length);
}
stageNodeList = nodePartitionMap.getPartitionToNode();
bucketNodeMap = nodePartitionMap.asBucketNodeMap();
bucketToPartition = Optional.of(nodePartitionMap.getBucketToPartition());
}
stageSchedulers.put(stageId, new FixedSourcePartitionedScheduler(stageExecution, splitSources, plan.getFragment().getStageExecutionDescriptor(), schedulingOrder, stageNodeList, bucketNodeMap, splitBatchSize, getConcurrentLifespansPerNode(session), nodeScheduler.createNodeSelector(catalogName, keepConsumerOnFeederNodes, feederScheduledNodes), connectorPartitionHandles, session, heuristicIndexerManager));
} else {
// all sources are remote
NodePartitionMap nodePartitionMap = partitioningCache.apply(plan.getFragment().getPartitioning(), stageTaskCounts == null ? null : stageTaskCounts.get(stageId));
List<InternalNode> partitionToNode = nodePartitionMap.getPartitionToNode();
// todo this should asynchronously wait a standard timeout period before failing
checkCondition(!partitionToNode.isEmpty(), NO_NODES_AVAILABLE, "No worker nodes available");
stageSchedulers.put(stageId, new FixedCountScheduler(stageExecution, partitionToNode));
bucketToPartition = Optional.of(nodePartitionMap.getBucketToPartition());
}
}
ImmutableSet.Builder<SqlStageExecution> childStagesBuilder = ImmutableSet.builder();
for (StageExecutionPlan subStagePlan : plan.getSubStages()) {
if (visitedPlanFrags.contains(subStagePlan.getFragment().getId())) {
continue;
}
visitedPlanFrags.add(subStagePlan.getFragment().getId());
List<SqlStageExecution> subTree = createStages(stageExecution::addExchangeLocations, nextStageId, locationFactory, subStagePlan.withBucketToPartition(bucketToPartition), nodeScheduler, remoteTaskFactory, session, splitBatchSize, partitioningCache, nodePartitioningManager, queryExecutor, schedulerExecutor, failureDetector, nodeTaskMap, stageSchedulers, stageLinkages, isSnapshotEnabled, snapshotManager, stageTaskCounts);
localStages.addAll(subTree);
SqlStageExecution childStage = subTree.get(0);
childStagesBuilder.add(childStage);
Optional<RemoteSourceNode> parentNode = plan.getFragment().getRemoteSourceNodes().stream().filter(x -> x.getSourceFragmentIds().contains(childStage.getFragment().getId())).findAny();
checkArgument(parentNode.isPresent(), "Couldn't find parent of a CTE node");
childStage.setParentId(parentNode.get().getId());
}
Set<SqlStageExecution> childStages = childStagesBuilder.build();
stageExecution.addStateChangeListener(newState -> {
if (newState.isDone() && newState != StageState.RESCHEDULING) {
// Snapshot: For "rescheduling", tasks are already cancelled (for resume)
childStages.forEach(SqlStageExecution::cancel);
}
});
stageLinkages.put(stageId, new StageLinkage(plan.getFragment().getId(), parent, childStages));
if (partitioningHandle.equals(SCALED_WRITER_DISTRIBUTION)) {
Supplier<Collection<TaskStatus>> sourceTasksProvider = () -> childStages.stream().map(SqlStageExecution::getAllTasks).flatMap(Collection::stream).map(RemoteTask::getTaskStatus).collect(toList());
Supplier<Collection<TaskStatus>> writerTasksProvider = () -> stageExecution.getAllTasks().stream().map(RemoteTask::getTaskStatus).collect(toList());
ScaledWriterScheduler scheduler = new ScaledWriterScheduler(stageExecution, sourceTasksProvider, writerTasksProvider, nodeScheduler.createNodeSelector(null, keepConsumerOnFeederNodes, feederScheduledNodes), schedulerExecutor, getWriterMinSize(session), isSnapshotEnabled, stageTaskCounts != null ? stageTaskCounts.get(stageId) : null);
whenAllStages(childStages, StageState::isDone).addListener(scheduler::finish, directExecutor());
stageSchedulers.put(stageId, scheduler);
}
return localStages.build();
}
use of io.prestosql.sql.planner.NodePartitioningManager in project hetu-core by openlookeng.
the class ServerMainModule method setup.
@Override
protected void setup(Binder binder) {
ServerConfig serverConfig = buildConfigObject(ServerConfig.class);
if (serverConfig.isCoordinator()) {
install(new CoordinatorModule());
} else {
install(new WorkerModule());
}
configBinder(binder).bindConfigDefaults(HttpServerConfig.class, httpServerConfig -> {
httpServerConfig.setAdminEnabled(false);
});
install(new InternalCommunicationModule());
configBinder(binder).bindConfig(FeaturesConfig.class);
configBinder(binder).bindConfig(PasswordSecurityConfig.class);
binder.bind(SqlParser.class).in(Scopes.SINGLETON);
binder.bind(SqlParserOptions.class).toInstance(sqlParserOptions);
sqlParserOptions.useEnhancedErrorHandler(serverConfig.isEnhancedErrorReporting());
jaxrsBinder(binder).bind(ThrowableMapper.class);
configBinder(binder).bindConfig(QueryManagerConfig.class);
configBinder(binder).bindConfig(HetuConfig.class);
configBinder(binder).bindConfig(SqlEnvironmentConfig.class);
newOptionalBinder(binder, ExplainAnalyzeContext.class);
// GC Monitor
binder.bind(GcMonitor.class).to(JmxGcMonitor.class).in(Scopes.SINGLETON);
// session properties
binder.bind(SessionPropertyManager.class).in(Scopes.SINGLETON);
binder.bind(SystemSessionProperties.class).in(Scopes.SINGLETON);
binder.bind(SessionPropertyDefaults.class).in(Scopes.SINGLETON);
// schema properties
binder.bind(SchemaPropertyManager.class).in(Scopes.SINGLETON);
// table properties
binder.bind(TablePropertyManager.class).in(Scopes.SINGLETON);
// column properties
binder.bind(ColumnPropertyManager.class).in(Scopes.SINGLETON);
// analyze properties
binder.bind(AnalyzePropertyManager.class).in(Scopes.SINGLETON);
newSetBinder(binder, Filter.class, TheServlet.class).addBinding().to(HttpServerAvailableCheckFilter.class).in(Scopes.SINGLETON);
// node manager
discoveryBinder(binder).bindSelector("presto");
binder.bind(DiscoveryNodeManager.class).in(Scopes.SINGLETON);
binder.bind(InternalNodeManager.class).to(DiscoveryNodeManager.class).in(Scopes.SINGLETON);
newExporter(binder).export(DiscoveryNodeManager.class).withGeneratedName();
httpClientBinder(binder).bindHttpClient("node-manager", ForNodeManager.class).withTracing().withConfigDefaults(config -> {
config.setIdleTimeout(serverConfig.getHttpClientIdleTimeout());
config.setRequestTimeout(serverConfig.getHttpClientRequestTimeout());
});
// node scheduler
// TODO: remove from NodePartitioningManager and move to CoordinatorModule
configBinder(binder).bindConfig(NodeSchedulerConfig.class);
binder.bind(NodeScheduler.class).in(Scopes.SINGLETON);
binder.bind(NodeSchedulerExporter.class).in(Scopes.SINGLETON);
binder.bind(NodeTaskMap.class).in(Scopes.SINGLETON);
newExporter(binder).export(NodeScheduler.class).withGeneratedName();
// network topology
// TODO: move to CoordinatorModule when NodeScheduler is moved
install(installModuleIf(NodeSchedulerConfig.class, config -> LEGACY.equalsIgnoreCase(config.getNetworkTopology()), moduleBinder -> moduleBinder.bind(NetworkTopology.class).to(LegacyNetworkTopology.class).in(Scopes.SINGLETON)));
install(installModuleIf(NodeSchedulerConfig.class, config -> FLAT.equalsIgnoreCase(config.getNetworkTopology()), moduleBinder -> moduleBinder.bind(NetworkTopology.class).to(FlatNetworkTopology.class).in(Scopes.SINGLETON)));
// task execution
jaxrsBinder(binder).bind(TaskResource.class);
newExporter(binder).export(TaskResource.class).withGeneratedName();
jaxrsBinder(binder).bind(TaskExecutorResource.class);
newExporter(binder).export(TaskExecutorResource.class).withGeneratedName();
binder.bind(TaskManagementExecutor.class).in(Scopes.SINGLETON);
binder.bind(SqlTaskManager.class).in(Scopes.SINGLETON);
binder.bind(TaskManager.class).to(Key.get(SqlTaskManager.class));
// memory revoking scheduler
binder.bind(MemoryRevokingScheduler.class).in(Scopes.SINGLETON);
// Add monitoring for JVM pauses
binder.bind(PauseMeter.class).in(Scopes.SINGLETON);
newExporter(binder).export(PauseMeter.class).withGeneratedName();
configBinder(binder).bindConfig(MemoryManagerConfig.class);
configBinder(binder).bindConfig(NodeMemoryConfig.class);
binder.bind(LocalMemoryManager.class).in(Scopes.SINGLETON);
binder.bind(LocalMemoryManagerExporter.class).in(Scopes.SINGLETON);
binder.bind(EmbedVersion.class).in(Scopes.SINGLETON);
newExporter(binder).export(TaskManager.class).withGeneratedName();
binder.bind(TaskExecutor.class).in(Scopes.SINGLETON);
newExporter(binder).export(TaskExecutor.class).withGeneratedName();
binder.bind(MultilevelSplitQueue.class).in(Scopes.SINGLETON);
newExporter(binder).export(MultilevelSplitQueue.class).withGeneratedName();
binder.bind(LocalExecutionPlanner.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(CompilerConfig.class);
binder.bind(ExpressionCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(ExpressionCompiler.class).withGeneratedName();
binder.bind(PageFunctionCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(PageFunctionCompiler.class).withGeneratedName();
configBinder(binder).bindConfig(TaskManagerConfig.class);
binder.bind(IndexJoinLookupStats.class).in(Scopes.SINGLETON);
newExporter(binder).export(IndexJoinLookupStats.class).withGeneratedName();
binder.bind(AsyncHttpExecutionMBean.class).in(Scopes.SINGLETON);
newExporter(binder).export(AsyncHttpExecutionMBean.class).withGeneratedName();
binder.bind(JoinFilterFunctionCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(JoinFilterFunctionCompiler.class).withGeneratedName();
binder.bind(JoinCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(JoinCompiler.class).withGeneratedName();
binder.bind(OrderingCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(OrderingCompiler.class).withGeneratedName();
binder.bind(PagesIndex.Factory.class).to(PagesIndex.DefaultFactory.class);
binder.bind(LookupJoinOperators.class).in(Scopes.SINGLETON);
jsonCodecBinder(binder).bindJsonCodec(TaskStatus.class);
jsonCodecBinder(binder).bindJsonCodec(StageInfo.class);
jsonCodecBinder(binder).bindJsonCodec(TaskInfo.class);
smileCodecBinder(binder).bindSmileCodec(TaskStatus.class);
smileCodecBinder(binder).bindSmileCodec(TaskInfo.class);
jsonCodecBinder(binder).bindJsonCodec(OperatorStats.class);
jsonCodecBinder(binder).bindJsonCodec(ExecutionFailureInfo.class);
jaxrsBinder(binder).bind(PagesResponseWriter.class);
// exchange client
binder.bind(ExchangeClientSupplier.class).to(ExchangeClientFactory.class).in(Scopes.SINGLETON);
httpClientBinder(binder).bindHttpClient("exchange", ForExchange.class).withTracing().withFilter(GenerateTraceTokenRequestFilter.class).withConfigDefaults(config -> {
config.setIdleTimeout(serverConfig.getHttpClientIdleTimeout());
config.setRequestTimeout(serverConfig.getHttpClientRequestTimeout());
config.setMaxConnectionsPerServer(250);
config.setMaxContentLength(new DataSize(32, MEGABYTE));
});
configBinder(binder).bindConfig(ExchangeClientConfig.class);
binder.bind(ExchangeExecutionMBean.class).in(Scopes.SINGLETON);
newExporter(binder).export(ExchangeExecutionMBean.class).withGeneratedName();
// execution
binder.bind(LocationFactory.class).to(HttpLocationFactory.class).in(Scopes.SINGLETON);
// memory manager
jaxrsBinder(binder).bind(MemoryResource.class);
jsonCodecBinder(binder).bindJsonCodec(MemoryInfo.class);
jsonCodecBinder(binder).bindJsonCodec(MemoryPoolAssignmentsRequest.class);
smileCodecBinder(binder).bindSmileCodec(MemoryInfo.class);
smileCodecBinder(binder).bindSmileCodec(MemoryPoolAssignmentsRequest.class);
// transaction manager
configBinder(binder).bindConfig(TransactionManagerConfig.class);
// data stream provider
binder.bind(PageSourceManager.class).in(Scopes.SINGLETON);
binder.bind(PageSourceProvider.class).to(PageSourceManager.class).in(Scopes.SINGLETON);
// page sink provider
binder.bind(PageSinkManager.class).in(Scopes.SINGLETON);
binder.bind(PageSinkProvider.class).to(PageSinkManager.class).in(Scopes.SINGLETON);
binder.bind(StaticCatalogStore.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(StaticCatalogStoreConfig.class);
binder.bind(FunctionAndTypeManager.class).in(Scopes.SINGLETON);
binder.bind(MetadataManager.class).in(Scopes.SINGLETON);
binder.bind(Metadata.class).to(MetadataManager.class).in(Scopes.SINGLETON);
binder.bind(DomainTranslator.class).to(RowExpressionDomainTranslator.class).in(Scopes.SINGLETON);
binder.bind(DeterminismEvaluator.class).to(RowExpressionDeterminismEvaluator.class).in(Scopes.SINGLETON);
// type
binder.bind(TypeManager.class).to(FunctionAndTypeManager.class).in(Scopes.SINGLETON);
binder.bind(TypeAnalyzer.class).in(Scopes.SINGLETON);
jsonBinder(binder).addDeserializerBinding(Type.class).to(TypeDeserializer.class);
newSetBinder(binder, Type.class);
binder.bind(Kryo.class).in(Scopes.SINGLETON);
// split manager
binder.bind(SplitManager.class).in(Scopes.SINGLETON);
// node partitioning manager
binder.bind(NodePartitioningManager.class).in(Scopes.SINGLETON);
// connector plan optimizer manager
binder.bind(ConnectorPlanOptimizerManager.class).in(Scopes.SINGLETON);
// index manager
binder.bind(IndexManager.class).in(Scopes.SINGLETON);
// handle resolver
binder.install(new HandleJsonModule());
// connector
binder.bind(ScalarStatsCalculator.class).in(Scopes.SINGLETON);
binder.bind(StatsNormalizer.class).in(Scopes.SINGLETON);
binder.bind(FilterStatsCalculator.class).in(Scopes.SINGLETON);
binder.bind(ConnectorManager.class).in(Scopes.SINGLETON);
// binding the DataCenterConnectorManager class for loading the
// DC connector sub catalogs
binder.bind(DataCenterConnectorManager.class).in(Scopes.SINGLETON);
// binding the DataCenterConnectorStore class for storing the
// DC connector objects and their properties
binder.bind(CatalogConnectorStore.class).in(Scopes.SINGLETON);
// Cube manager
binder.bind(CubeManager.class).in(Scopes.SINGLETON);
// system connector
binder.install(new SystemConnectorModule());
// splits
jsonCodecBinder(binder).bindJsonCodec(TaskUpdateRequest.class);
jsonCodecBinder(binder).bindJsonCodec(ConnectorSplit.class);
smileCodecBinder(binder).bindSmileCodec(TaskUpdateRequest.class);
smileCodecBinder(binder).bindSmileCodec(ConnectorSplit.class);
smileCodecBinder(binder).bindSmileCodec(PlanFragment.class);
jsonBinder(binder).addSerializerBinding(Slice.class).to(SliceSerializer.class);
jsonBinder(binder).addDeserializerBinding(Slice.class).to(SliceDeserializer.class);
jsonBinder(binder).addSerializerBinding(Expression.class).to(ExpressionSerializer.class);
jsonBinder(binder).addDeserializerBinding(Expression.class).to(ExpressionDeserializer.class);
jsonBinder(binder).addDeserializerBinding(FunctionCall.class).to(FunctionCallDeserializer.class);
// split monitor
binder.bind(SplitMonitor.class).in(Scopes.SINGLETON);
// Determine the NodeVersion
NodeVersion nodeVersion = new NodeVersion(serverConfig.getPrestoVersion());
binder.bind(NodeVersion.class).toInstance(nodeVersion);
// presto announcement
discoveryBinder(binder).bindHttpAnnouncement("presto").addProperty("node_version", nodeVersion.toString()).addProperty("coordinator", String.valueOf(serverConfig.isCoordinator())).addProperty("worker", String.valueOf(!serverConfig.isCoordinator() || buildConfigObject(NodeSchedulerConfig.class).isIncludeCoordinator()));
// server info resource
jaxrsBinder(binder).bind(ServerInfoResource.class);
jsonCodecBinder(binder).bindJsonCodec(ServerInfo.class);
// node status resource
jaxrsBinder(binder).bind(StatusResource.class);
jsonCodecBinder(binder).bindJsonCodec(NodeStatus.class);
// dynamic catalog resource
jsonCodecBinder(binder).bindJsonCodec(CatalogInfo.class);
binder.bind(DynamicCatalogStore.class).in(Scopes.SINGLETON);
binder.bind(DynamicCatalogScanner.class).in(Scopes.SINGLETON);
binder.bind(CatalogStoreUtil.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(DynamicCatalogConfig.class);
// plugin manager
binder.bind(PluginManager.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(PluginManagerConfig.class);
binder.bind(SeedStoreManager.class).in(Scopes.SINGLETON);
binder.bind(FileSystemClientManager.class).in(Scopes.SINGLETON);
binder.bind(CatalogManager.class).in(Scopes.SINGLETON);
binder.bind(HetuMetaStoreManager.class).in(Scopes.SINGLETON);
binder.bind(StaticFunctionNamespaceStore.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(StaticFunctionNamespaceStoreConfig.class);
// block encodings
jsonBinder(binder).addSerializerBinding(Block.class).to(BlockJsonSerde.Serializer.class);
jsonBinder(binder).addDeserializerBinding(Block.class).to(BlockJsonSerde.Deserializer.class);
// thread visualizer
jaxrsBinder(binder).bind(ThreadResource.class);
// PageSorter
binder.bind(PageSorter.class).to(PagesIndexPageSorter.class).in(Scopes.SINGLETON);
// PageIndexer
binder.bind(PageIndexerFactory.class).to(GroupByHashPageIndexerFactory.class).in(Scopes.SINGLETON);
// Finalizer
binder.bind(FinalizerService.class).in(Scopes.SINGLETON);
// HeuristicIndexerManager
binder.bind(HeuristicIndexerManager.class).in(Scopes.SINGLETON);
// SnapshotUtils
binder.bind(SnapshotUtils.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(SnapshotConfig.class);
// Spiller
binder.bind(SpillerFactory.class).to(GenericSpillerFactory.class).in(Scopes.SINGLETON);
binder.bind(SingleStreamSpillerFactory.class).to(FileSingleStreamSpillerFactory.class).in(Scopes.SINGLETON);
binder.bind(PartitioningSpillerFactory.class).to(GenericPartitioningSpillerFactory.class).in(Scopes.SINGLETON);
binder.bind(SpillerStats.class).in(Scopes.SINGLETON);
newExporter(binder).export(SpillerFactory.class).withGeneratedName();
binder.bind(LocalSpillManager.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(NodeSpillConfig.class);
// cleanup
binder.bind(ExecutorCleanup.class).in(Scopes.SINGLETON);
// State store
binder.bind(StateStoreProvider.class).to(LocalStateStoreProvider.class).in(Scopes.SINGLETON);
// State store listener manager
binder.bind(StateStoreListenerManager.class).in(Scopes.SINGLETON);
// dynamic filter listener service
binder.bind(DynamicFilterCacheManager.class).in(Scopes.SINGLETON);
}
Aggregations