use of com.facebook.presto.execution.scheduler.NodeScheduler in project presto by prestodb.
the class ServerMainModule method setup.
@Override
protected void setup(Binder binder) {
ServerConfig serverConfig = buildConfigObject(ServerConfig.class);
if (serverConfig.isCoordinator()) {
install(new CoordinatorModule());
binder.bind(new TypeLiteral<Optional<QueryPerformanceFetcher>>() {
}).toProvider(QueryPerformanceFetcherProvider.class).in(Scopes.SINGLETON);
} else {
binder.bind(new TypeLiteral<Optional<QueryPerformanceFetcher>>() {
}).toInstance(Optional.empty());
// Install no-op resource group manager on workers, since only coordinators manage resource groups.
binder.bind(ResourceGroupManager.class).to(NoOpResourceGroupManager.class).in(Scopes.SINGLETON);
// HACK: this binding is needed by SystemConnectorModule, but will only be used on the coordinator
binder.bind(QueryManager.class).toInstance(newProxy(QueryManager.class, (proxy, method, args) -> {
throw new UnsupportedOperationException();
}));
}
configBinder(binder).bindConfig(FeaturesConfig.class);
binder.bind(SqlParser.class).in(Scopes.SINGLETON);
binder.bind(SqlParserOptions.class).toInstance(sqlParserOptions);
bindFailureDetector(binder, serverConfig.isCoordinator());
jaxrsBinder(binder).bind(ThrowableMapper.class);
configBinder(binder).bindConfig(QueryManagerConfig.class);
jsonCodecBinder(binder).bindJsonCodec(ViewDefinition.class);
// session properties
binder.bind(SessionPropertyManager.class).in(Scopes.SINGLETON);
binder.bind(SystemSessionProperties.class).in(Scopes.SINGLETON);
// schema properties
binder.bind(SchemaPropertyManager.class).in(Scopes.SINGLETON);
// table properties
binder.bind(TablePropertyManager.class).in(Scopes.SINGLETON);
// node manager
discoveryBinder(binder).bindSelector("presto");
binder.bind(DiscoveryNodeManager.class).in(Scopes.SINGLETON);
binder.bind(InternalNodeManager.class).to(DiscoveryNodeManager.class).in(Scopes.SINGLETON);
newExporter(binder).export(DiscoveryNodeManager.class).withGeneratedName();
httpClientBinder(binder).bindHttpClient("node-manager", ForNodeManager.class).withTracing().withConfigDefaults(config -> {
config.setIdleTimeout(new Duration(30, SECONDS));
config.setRequestTimeout(new Duration(10, SECONDS));
});
// node scheduler
// TODO: remove from NodePartitioningManager and move to CoordinatorModule
configBinder(binder).bindConfig(NodeSchedulerConfig.class);
binder.bind(NodeScheduler.class).in(Scopes.SINGLETON);
binder.bind(NodeSchedulerExporter.class).in(Scopes.SINGLETON);
binder.bind(NodeTaskMap.class).in(Scopes.SINGLETON);
newExporter(binder).export(NodeScheduler.class).withGeneratedName();
// network topology
// TODO: move to CoordinatorModule when NodeScheduler is moved
install(installModuleIf(NodeSchedulerConfig.class, config -> LEGACY.equalsIgnoreCase(config.getNetworkTopology()), moduleBinder -> moduleBinder.bind(NetworkTopology.class).to(LegacyNetworkTopology.class).in(Scopes.SINGLETON)));
install(installModuleIf(NodeSchedulerConfig.class, config -> FLAT.equalsIgnoreCase(config.getNetworkTopology()), moduleBinder -> moduleBinder.bind(NetworkTopology.class).to(FlatNetworkTopology.class).in(Scopes.SINGLETON)));
// task execution
jaxrsBinder(binder).bind(TaskResource.class);
newExporter(binder).export(TaskResource.class).withGeneratedName();
binder.bind(TaskManager.class).to(SqlTaskManager.class).in(Scopes.SINGLETON);
// workaround for CodeCache GC issue
if (JavaVersion.current().getMajor() == 8) {
configBinder(binder).bindConfig(CodeCacheGcConfig.class);
binder.bind(CodeCacheGcTrigger.class).in(Scopes.SINGLETON);
}
// Add monitoring for JVM pauses
binder.bind(PauseMeter.class).in(Scopes.SINGLETON);
newExporter(binder).export(PauseMeter.class).withGeneratedName();
configBinder(binder).bindConfig(MemoryManagerConfig.class);
configBinder(binder).bindConfig(NodeMemoryConfig.class);
configBinder(binder).bindConfig(ReservedSystemMemoryConfig.class);
binder.bind(LocalMemoryManager.class).in(Scopes.SINGLETON);
binder.bind(LocalMemoryManagerExporter.class).in(Scopes.SINGLETON);
newExporter(binder).export(TaskManager.class).withGeneratedName();
binder.bind(TaskExecutor.class).in(Scopes.SINGLETON);
newExporter(binder).export(TaskExecutor.class).withGeneratedName();
binder.bind(LocalExecutionPlanner.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(CompilerConfig.class);
binder.bind(ExpressionCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(ExpressionCompiler.class).withGeneratedName();
configBinder(binder).bindConfig(TaskManagerConfig.class);
binder.bind(IndexJoinLookupStats.class).in(Scopes.SINGLETON);
newExporter(binder).export(IndexJoinLookupStats.class).withGeneratedName();
binder.bind(AsyncHttpExecutionMBean.class).in(Scopes.SINGLETON);
newExporter(binder).export(AsyncHttpExecutionMBean.class).withGeneratedName();
binder.bind(JoinFilterFunctionCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(JoinFilterFunctionCompiler.class).withGeneratedName();
binder.bind(JoinCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(JoinCompiler.class).withGeneratedName();
binder.bind(OrderingCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(OrderingCompiler.class).withGeneratedName();
binder.bind(PagesIndex.Factory.class).to(PagesIndex.DefaultFactory.class);
binder.bind(JoinProbeCompiler.class).in(Scopes.SINGLETON);
newExporter(binder).export(JoinProbeCompiler.class).withGeneratedName();
binder.bind(LookupJoinOperators.class).in(Scopes.SINGLETON);
jsonCodecBinder(binder).bindJsonCodec(TaskStatus.class);
jsonCodecBinder(binder).bindJsonCodec(StageInfo.class);
jsonCodecBinder(binder).bindJsonCodec(TaskInfo.class);
jaxrsBinder(binder).bind(PagesResponseWriter.class);
// exchange client
binder.bind(new TypeLiteral<ExchangeClientSupplier>() {
}).to(ExchangeClientFactory.class).in(Scopes.SINGLETON);
httpClientBinder(binder).bindHttpClient("exchange", ForExchange.class).withTracing().withConfigDefaults(config -> {
config.setIdleTimeout(new Duration(30, SECONDS));
config.setRequestTimeout(new Duration(10, SECONDS));
config.setMaxConnectionsPerServer(250);
config.setMaxContentLength(new DataSize(32, MEGABYTE));
});
configBinder(binder).bindConfig(ExchangeClientConfig.class);
binder.bind(ExchangeExecutionMBean.class).in(Scopes.SINGLETON);
newExporter(binder).export(ExchangeExecutionMBean.class).withGeneratedName();
// execution
binder.bind(LocationFactory.class).to(HttpLocationFactory.class).in(Scopes.SINGLETON);
// memory manager
jaxrsBinder(binder).bind(MemoryResource.class);
jsonCodecBinder(binder).bindJsonCodec(MemoryInfo.class);
jsonCodecBinder(binder).bindJsonCodec(MemoryPoolAssignmentsRequest.class);
// transaction manager
configBinder(binder).bindConfig(TransactionManagerConfig.class);
// data stream provider
binder.bind(PageSourceManager.class).in(Scopes.SINGLETON);
binder.bind(PageSourceProvider.class).to(PageSourceManager.class).in(Scopes.SINGLETON);
// page sink provider
binder.bind(PageSinkManager.class).in(Scopes.SINGLETON);
binder.bind(PageSinkProvider.class).to(PageSinkManager.class).in(Scopes.SINGLETON);
// metadata
binder.bind(StaticCatalogStore.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(StaticCatalogStoreConfig.class);
binder.bind(MetadataManager.class).in(Scopes.SINGLETON);
binder.bind(Metadata.class).to(MetadataManager.class).in(Scopes.SINGLETON);
// type
binder.bind(TypeRegistry.class).in(Scopes.SINGLETON);
binder.bind(TypeManager.class).to(TypeRegistry.class).in(Scopes.SINGLETON);
jsonBinder(binder).addDeserializerBinding(Type.class).to(TypeDeserializer.class);
newSetBinder(binder, Type.class);
// split manager
binder.bind(SplitManager.class).in(Scopes.SINGLETON);
// node partitioning manager
binder.bind(NodePartitioningManager.class).in(Scopes.SINGLETON);
// index manager
binder.bind(IndexManager.class).in(Scopes.SINGLETON);
// handle resolver
binder.install(new HandleJsonModule());
// connector
binder.bind(ConnectorManager.class).in(Scopes.SINGLETON);
// system connector
binder.install(new SystemConnectorModule());
// splits
jsonCodecBinder(binder).bindJsonCodec(TaskUpdateRequest.class);
jsonCodecBinder(binder).bindJsonCodec(ConnectorSplit.class);
jsonBinder(binder).addSerializerBinding(Slice.class).to(SliceSerializer.class);
jsonBinder(binder).addDeserializerBinding(Slice.class).to(SliceDeserializer.class);
jsonBinder(binder).addSerializerBinding(Expression.class).to(ExpressionSerializer.class);
jsonBinder(binder).addDeserializerBinding(Expression.class).to(ExpressionDeserializer.class);
jsonBinder(binder).addDeserializerBinding(FunctionCall.class).to(FunctionCallDeserializer.class);
// query monitor
configBinder(binder).bindConfig(QueryMonitorConfig.class);
binder.bind(QueryMonitor.class).in(Scopes.SINGLETON);
// Determine the NodeVersion
String prestoVersion = serverConfig.getPrestoVersion();
if (prestoVersion == null) {
prestoVersion = getClass().getPackage().getImplementationVersion();
}
checkState(prestoVersion != null, "presto.version must be provided when it cannot be automatically determined");
NodeVersion nodeVersion = new NodeVersion(prestoVersion);
binder.bind(NodeVersion.class).toInstance(nodeVersion);
// presto announcement
discoveryBinder(binder).bindHttpAnnouncement("presto").addProperty("node_version", nodeVersion.toString()).addProperty("coordinator", String.valueOf(serverConfig.isCoordinator())).addProperty("connectorIds", nullToEmpty(serverConfig.getDataSources()));
// server info resource
jaxrsBinder(binder).bind(ServerInfoResource.class);
// plugin manager
binder.bind(PluginManager.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(PluginManagerConfig.class);
binder.bind(CatalogManager.class).in(Scopes.SINGLETON);
// optimizers
binder.bind(PlanOptimizers.class).in(Scopes.SINGLETON);
// block encodings
binder.bind(BlockEncodingManager.class).in(Scopes.SINGLETON);
binder.bind(BlockEncodingSerde.class).to(BlockEncodingManager.class).in(Scopes.SINGLETON);
newSetBinder(binder, new TypeLiteral<BlockEncodingFactory<?>>() {
});
jsonBinder(binder).addSerializerBinding(Block.class).to(BlockJsonSerde.Serializer.class);
jsonBinder(binder).addDeserializerBinding(Block.class).to(BlockJsonSerde.Deserializer.class);
// thread visualizer
jaxrsBinder(binder).bind(ThreadResource.class);
// PageSorter
binder.bind(PageSorter.class).to(PagesIndexPageSorter.class).in(Scopes.SINGLETON);
// PageIndexer
binder.bind(PageIndexerFactory.class).to(GroupByHashPageIndexerFactory.class).in(Scopes.SINGLETON);
// Finalizer
binder.bind(FinalizerService.class).in(Scopes.SINGLETON);
// Spiller
binder.bind(SpillerFactory.class).to(BinarySpillerFactory.class).in(Scopes.SINGLETON);
newExporter(binder).export(SpillerFactory.class).withGeneratedName();
}
use of com.facebook.presto.execution.scheduler.NodeScheduler in project presto by prestodb.
the class TaskTestUtils method createTestingPlanner.
public static LocalExecutionPlanner createTestingPlanner() {
MetadataManager metadata = MetadataManager.createTestMetadataManager();
PageSourceManager pageSourceManager = new PageSourceManager();
pageSourceManager.addConnectorPageSourceProvider(CONNECTOR_ID, new TestingPageSourceProvider());
// we don't start the finalizer so nothing will be collected, which is ok for a test
FinalizerService finalizerService = new FinalizerService();
NodeScheduler nodeScheduler = new NodeScheduler(new LegacyNetworkTopology(), new InMemoryNodeManager(), new NodeSchedulerConfig().setIncludeCoordinator(true), new NodeTaskMap(finalizerService));
NodePartitioningManager nodePartitioningManager = new NodePartitioningManager(nodeScheduler);
return new LocalExecutionPlanner(metadata, new SqlParser(), Optional.empty(), pageSourceManager, new IndexManager(), nodePartitioningManager, new PageSinkManager(), new MockExchangeClientSupplier(), new ExpressionCompiler(metadata), new JoinFilterFunctionCompiler(metadata), new IndexJoinLookupStats(), new CompilerConfig(), new TaskManagerConfig(), new BinarySpillerFactory(new BlockEncodingManager(metadata.getTypeManager()), new FeaturesConfig()), new TestingBlockEncodingSerde(new TestingTypeManager()), new PagesIndex.TestingFactory(), new JoinCompiler(), new LookupJoinOperators(new JoinProbeCompiler()));
}
use of com.facebook.presto.execution.scheduler.NodeScheduler in project presto by prestodb.
the class TestNodeScheduler method testTopologyAwareScheduling.
@Test(timeOut = 60 * 1000)
public void testTopologyAwareScheduling() throws Exception {
TestingTransactionHandle transactionHandle = TestingTransactionHandle.create();
NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
InMemoryNodeManager nodeManager = new InMemoryNodeManager();
ImmutableList.Builder<Node> nodeBuilder = ImmutableList.builder();
nodeBuilder.add(new PrestoNode("node1", URI.create("http://host1.rack1:11"), NodeVersion.UNKNOWN, false));
nodeBuilder.add(new PrestoNode("node2", URI.create("http://host2.rack1:12"), NodeVersion.UNKNOWN, false));
nodeBuilder.add(new PrestoNode("node3", URI.create("http://host3.rack2:13"), NodeVersion.UNKNOWN, false));
ImmutableList<Node> nodes = nodeBuilder.build();
nodeManager.addNode(CONNECTOR_ID, nodes);
// contents of taskMap indicate the node-task map for the current stage
Map<Node, RemoteTask> taskMap = new HashMap<>();
NodeSchedulerConfig nodeSchedulerConfig = new NodeSchedulerConfig().setMaxSplitsPerNode(25).setIncludeCoordinator(false).setNetworkTopology("test").setMaxPendingSplitsPerTask(20);
TestNetworkTopology topology = new TestNetworkTopology();
NetworkLocationCache locationCache = new NetworkLocationCache(topology) {
@Override
public NetworkLocation get(HostAddress host) {
// Bypass the cache for workers, since we only look them up once and they would all be unresolved otherwise
if (host.getHostText().startsWith("host")) {
return topology.locate(host);
} else {
return super.get(host);
}
}
};
NodeScheduler nodeScheduler = new NodeScheduler(locationCache, topology, nodeManager, nodeSchedulerConfig, nodeTaskMap);
NodeSelector nodeSelector = nodeScheduler.createNodeSelector(CONNECTOR_ID);
// Fill up the nodes with non-local data
ImmutableSet.Builder<Split> nonRackLocalBuilder = ImmutableSet.builder();
for (int i = 0; i < (25 + 11) * 3; i++) {
nonRackLocalBuilder.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote(HostAddress.fromParts("data.other_rack", 1))));
}
Set<Split> nonRackLocalSplits = nonRackLocalBuilder.build();
Multimap<Node, Split> assignments = nodeSelector.computeAssignments(nonRackLocalSplits, ImmutableList.copyOf(taskMap.values())).getAssignments();
MockRemoteTaskFactory remoteTaskFactory = new MockRemoteTaskFactory(remoteTaskExecutor);
int task = 0;
for (Node node : assignments.keySet()) {
TaskId taskId = new TaskId("test", 1, task);
task++;
MockRemoteTaskFactory.MockRemoteTask remoteTask = remoteTaskFactory.createTableScanTask(taskId, node, ImmutableList.copyOf(assignments.get(node)), nodeTaskMap.createPartitionedSplitCountTracker(node, taskId));
remoteTask.startSplits(25);
nodeTaskMap.addTask(node, remoteTask);
taskMap.put(node, remoteTask);
}
// Continue assigning to fill up part of the queue
nonRackLocalSplits = Sets.difference(nonRackLocalSplits, new HashSet<>(assignments.values()));
assignments = nodeSelector.computeAssignments(nonRackLocalSplits, ImmutableList.copyOf(taskMap.values())).getAssignments();
for (Node node : assignments.keySet()) {
RemoteTask remoteTask = taskMap.get(node);
remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder().putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
}
nonRackLocalSplits = Sets.difference(nonRackLocalSplits, new HashSet<>(assignments.values()));
// Check that 3 of the splits were rejected, since they're non-local
assertEquals(nonRackLocalSplits.size(), 3);
// Assign rack-local splits
ImmutableSet.Builder<Split> rackLocalSplits = ImmutableSet.builder();
HostAddress dataHost1 = HostAddress.fromParts("data.rack1", 1);
HostAddress dataHost2 = HostAddress.fromParts("data.rack2", 1);
for (int i = 0; i < 6 * 2; i++) {
rackLocalSplits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote(dataHost1)));
}
for (int i = 0; i < 6; i++) {
rackLocalSplits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote(dataHost2)));
}
assignments = nodeSelector.computeAssignments(rackLocalSplits.build(), ImmutableList.copyOf(taskMap.values())).getAssignments();
for (Node node : assignments.keySet()) {
RemoteTask remoteTask = taskMap.get(node);
remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder().putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
}
Set<Split> unassigned = Sets.difference(rackLocalSplits.build(), new HashSet<>(assignments.values()));
// Compute the assignments a second time to account for the fact that some splits may not have been assigned due to asynchronous
// loading of the NetworkLocationCache
boolean cacheRefreshed = false;
while (!cacheRefreshed) {
cacheRefreshed = true;
if (locationCache.get(dataHost1).equals(ROOT_LOCATION)) {
cacheRefreshed = false;
}
if (locationCache.get(dataHost2).equals(ROOT_LOCATION)) {
cacheRefreshed = false;
}
MILLISECONDS.sleep(10);
}
assignments = nodeSelector.computeAssignments(unassigned, ImmutableList.copyOf(taskMap.values())).getAssignments();
for (Node node : assignments.keySet()) {
RemoteTask remoteTask = taskMap.get(node);
remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder().putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
}
unassigned = Sets.difference(unassigned, new HashSet<>(assignments.values()));
assertEquals(unassigned.size(), 3);
int rack1 = 0;
int rack2 = 0;
for (Split split : unassigned) {
String rack = topology.locate(split.getAddresses().get(0)).getSegments().get(0);
switch(rack) {
case "rack1":
rack1++;
break;
case "rack2":
rack2++;
break;
default:
fail();
}
}
assertEquals(rack1, 2);
assertEquals(rack2, 1);
// Assign local splits
ImmutableSet.Builder<Split> localSplits = ImmutableSet.builder();
localSplits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote(HostAddress.fromParts("host1.rack1", 1))));
localSplits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote(HostAddress.fromParts("host2.rack1", 1))));
localSplits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote(HostAddress.fromParts("host3.rack2", 1))));
assignments = nodeSelector.computeAssignments(localSplits.build(), ImmutableList.copyOf(taskMap.values())).getAssignments();
assertEquals(assignments.size(), 3);
assertEquals(assignments.keySet().size(), 3);
}
use of com.facebook.presto.execution.scheduler.NodeScheduler in project presto by prestodb.
the class TestNodeScheduler method setUp.
@BeforeMethod
public void setUp() throws Exception {
finalizerService = new FinalizerService();
nodeTaskMap = new NodeTaskMap(finalizerService);
nodeManager = new InMemoryNodeManager();
ImmutableList.Builder<Node> nodeBuilder = ImmutableList.builder();
nodeBuilder.add(new PrestoNode("other1", URI.create("http://127.0.0.1:11"), NodeVersion.UNKNOWN, false));
nodeBuilder.add(new PrestoNode("other2", URI.create("http://127.0.0.1:12"), NodeVersion.UNKNOWN, false));
nodeBuilder.add(new PrestoNode("other3", URI.create("http://127.0.0.1:13"), NodeVersion.UNKNOWN, false));
ImmutableList<Node> nodes = nodeBuilder.build();
nodeManager.addNode(CONNECTOR_ID, nodes);
NodeSchedulerConfig nodeSchedulerConfig = new NodeSchedulerConfig().setMaxSplitsPerNode(20).setIncludeCoordinator(false).setMaxPendingSplitsPerTask(10);
NodeScheduler nodeScheduler = new NodeScheduler(new LegacyNetworkTopology(), nodeManager, nodeSchedulerConfig, nodeTaskMap);
// contents of taskMap indicate the node-task map for the current stage
taskMap = new HashMap<>();
nodeSelector = nodeScheduler.createNodeSelector(CONNECTOR_ID);
remoteTaskExecutor = Executors.newCachedThreadPool(daemonThreadsNamed("remoteTaskExecutor-%s"));
finalizerService.start();
}
Aggregations