use of com.facebook.presto.execution.scheduler.SplitPlacementResult in project presto by prestodb.
the class TestNodeScheduler method testAffinityAssignmentNotSupported.
@Test
public void testAffinityAssignmentNotSupported() {
NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
TestingTransactionHandle transactionHandle = TestingTransactionHandle.create();
NodeSchedulerConfig nodeSchedulerConfig = new NodeSchedulerConfig().setMaxSplitsPerNode(20).setIncludeCoordinator(false).setMaxPendingSplitsPerTask(10);
LegacyNetworkTopology legacyNetworkTopology = new LegacyNetworkTopology();
NodeScheduler nodeScheduler = new NodeScheduler(new NetworkLocationCache(legacyNetworkTopology), legacyNetworkTopology, nodeManager, new NodeSelectionStats(), nodeSchedulerConfig, nodeTaskMap, new Duration(0, SECONDS), new ThrowingNodeTtlFetcherManager(), new NoOpQueryManager(), new SimpleTtlNodeSelectorConfig());
NodeSelector nodeSelector = nodeScheduler.createNodeSelector(session, CONNECTOR_ID, 2);
Set<Split> splits = new HashSet<>();
splits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote(HostAddress.fromString("127.0.0.1:10"))));
splits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote(HostAddress.fromString("127.0.0.1:10"))));
SplitPlacementResult splitPlacementResult = nodeSelector.computeAssignments(splits, ImmutableList.of());
Set<InternalNode> internalNodes = splitPlacementResult.getAssignments().keySet();
// Split doesn't support affinity schedule, fall back to random schedule
assertEquals(internalNodes.size(), 2);
}
use of com.facebook.presto.execution.scheduler.SplitPlacementResult in project presto by prestodb.
the class TestNodeScheduler method testMaxTasksPerStageWittLimit.
@Test
public void testMaxTasksPerStageWittLimit() {
NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
TestingTransactionHandle transactionHandle = TestingTransactionHandle.create();
NodeSchedulerConfig nodeSchedulerConfig = new NodeSchedulerConfig().setMaxSplitsPerNode(20).setIncludeCoordinator(false).setMaxPendingSplitsPerTask(10);
NodeScheduler nodeScheduler = new NodeScheduler(new LegacyNetworkTopology(), nodeManager, new NodeSelectionStats(), nodeSchedulerConfig, nodeTaskMap, new ThrowingNodeTtlFetcherManager(), new NoOpQueryManager(), new SimpleTtlNodeSelectorConfig());
NodeSelector nodeSelector = nodeScheduler.createNodeSelector(session, CONNECTOR_ID, 2);
Set<Split> splits = new HashSet<>();
splits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote()));
SplitPlacementResult splitPlacementResult = nodeSelector.computeAssignments(splits, ImmutableList.of());
Set<InternalNode> internalNodes = splitPlacementResult.getAssignments().keySet();
assertEquals(internalNodes.size(), 1);
// adding one more split. Total 2
splits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote()));
splitPlacementResult = nodeSelector.computeAssignments(splits, getRemoteTableScanTask(splitPlacementResult));
Set<InternalNode> internalNodesSecondCall = splitPlacementResult.getAssignments().keySet();
assertEquals(internalNodesSecondCall.size(), 2);
assertTrue(internalNodesSecondCall.containsAll(internalNodes));
// adding one more split. Total 3
splits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote()));
splitPlacementResult = nodeSelector.computeAssignments(splits, getRemoteTableScanTask(splitPlacementResult));
assertEquals(splitPlacementResult.getAssignments().keySet().size(), 2);
assertEquals(splitPlacementResult.getAssignments().keySet(), internalNodesSecondCall);
}
use of com.facebook.presto.execution.scheduler.SplitPlacementResult in project presto by prestodb.
the class TestNodeScheduler method testAffinityAssignmentWithConsistentHashingWithVirtualNodes.
@Test
public void testAffinityAssignmentWithConsistentHashingWithVirtualNodes() {
NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
TestingTransactionHandle transactionHandle = TestingTransactionHandle.create();
NodeSchedulerConfig nodeSchedulerConfig = new NodeSchedulerConfig().setNodeSelectionHashStrategy(CONSISTENT_HASHING).setMinVirtualNodeCount(5).setMaxSplitsPerNode(20).setIncludeCoordinator(false).setMaxPendingSplitsPerTask(10);
NodeScheduler nodeScheduler = new NodeScheduler(new LegacyNetworkTopology(), nodeManager, new NodeSelectionStats(), nodeSchedulerConfig, nodeTaskMap, new ThrowingNodeTtlFetcherManager(), new NoOpQueryManager(), new SimpleTtlNodeSelectorConfig());
NodeSelector nodeSelector = nodeScheduler.createNodeSelector(session, CONNECTOR_ID, 3);
Set<Split> splits = new HashSet<>();
IntStream.range(0, 10).forEach(i -> splits.add(new Split(CONNECTOR_ID, transactionHandle, new TestAffinitySplitRemote(i))));
InternalNode node1 = new InternalNode("other1", URI.create("http://127.0.0.1:11"), NodeVersion.UNKNOWN, false);
InternalNode node2 = new InternalNode("other2", URI.create("http://127.0.0.1:12"), NodeVersion.UNKNOWN, false);
InternalNode node3 = new InternalNode("other3", URI.create("http://127.0.0.1:13"), NodeVersion.UNKNOWN, false);
InternalNode node4 = new InternalNode("other4", URI.create("http://127.0.0.1:14"), NodeVersion.UNKNOWN, false);
// In setup node 1-3 are added to node manager
// consistent hashing ring for nodes:
// entry0 (-1907319920): node2
// entry1 (-1028466245): node3
// entry2 ( -546736344): node2
// entry3 ( 1127574531): node3
// entry4 ( 1166245243): node1
// entry5 ( 2145381619): node1
SplitPlacementResult splitPlacementResult = nodeSelector.computeAssignments(splits, ImmutableList.of());
// hashing value for splits:
// 0: -1962219106 -> entry0 -> node2
// 1: 145569539 -> entry3 -> node3
// 2: -1599101205 -> entry1 -> node3
// 3: -165119218 -> entry3 -> node3
// 4: 1142216720 -> entry4 -> node1
// 5: 1347620135 -> entry5 -> node1
// 6: 1232195252 -> entry5 -> node1
// 7: 427886318 -> entry3 -> node3
// 8: 1469878697 -> entry5 -> node1
// 9: 296801082 -> entry3 -> node3
assertEquals(splitPlacementResult.getAssignments().keySet().size(), 3);
// node1: split 4, 5, 6, 8
Collection<ConnectorSplit> node1Splits = splitPlacementResult.getAssignments().get(node1).stream().map(Split::getConnectorSplit).collect(toImmutableSet());
// node2: split 0
Collection<Object> node2Splits = splitPlacementResult.getAssignments().get(node2).stream().map(Split::getConnectorSplit).collect(toImmutableSet());
// node3: split 1, 2, 3, 7, 9
Collection<Object> node3Splits = splitPlacementResult.getAssignments().get(node3).stream().map(Split::getConnectorSplit).collect(toImmutableSet());
// Scheduling the same splits on the same set of nodes should give the same assignment
nodeSelector = nodeScheduler.createNodeSelector(session, CONNECTOR_ID, 3);
splitPlacementResult = nodeSelector.computeAssignments(splits, ImmutableList.of());
assertEquals(splitPlacementResult.getAssignments().get(node1).stream().map(Split::getConnectorSplit).collect(toImmutableSet()), node1Splits);
assertEquals(splitPlacementResult.getAssignments().get(node2).stream().map(Split::getConnectorSplit).collect(toImmutableSet()), node2Splits);
assertEquals(splitPlacementResult.getAssignments().get(node3).stream().map(Split::getConnectorSplit).collect(toImmutableSet()), node3Splits);
// Adding node4, consistent hashing ring for nodes:
// entry0 (-1907319920): node2
// entry1 (-1616890413): node4
// entry2 (-1028466245): node3
// entry3 ( -546736344): node2
// entry4 ( 1127574531): node3
// entry5 ( 1166245243): node1
// entry6 ( 1691928386): node4
// entry7 ( 2145381619): node1
nodeManager.addNode(CONNECTOR_ID, node4);
nodeSelector = nodeScheduler.createNodeSelector(session, CONNECTOR_ID, 3);
splitPlacementResult = nodeSelector.computeAssignments(splits, ImmutableList.of());
// hashing value for splits:
// 0: -1962219106 -> entry0 -> node2
// 1: 145569539 -> entry4 -> node3
// 2: -1599101205 -> entry2 -> node3
// 3: -165119218 -> entry4 -> node3
// 4: 1142216720 -> entry5 -> node1
// 5: 1347620135 -> entry6 -> node4
// 6: 1232195252 -> entry6 -> node4
// 7: 427886318 -> entry4 -> node3
// 8: 1469878697 -> entry6 -> node4
// 9: 296801082 -> entry4 -> node3
assertEquals(splitPlacementResult.getAssignments().keySet().size(), 4);
assertEquals(splitPlacementResult.getAssignments().get(node1).stream().map(Split::getConnectorSplit).map(ConnectorSplit::getSplitIdentifier).collect(toImmutableSet()), ImmutableSet.of(4));
assertEquals(splitPlacementResult.getAssignments().get(node2).stream().map(Split::getConnectorSplit).collect(toImmutableSet()), node2Splits);
assertEquals(splitPlacementResult.getAssignments().get(node3).stream().map(Split::getConnectorSplit).map(ConnectorSplit::getSplitIdentifier).collect(toImmutableSet()), ImmutableSet.of(1, 2, 3, 7, 9));
assertEquals(splitPlacementResult.getAssignments().get(node4).stream().map(Split::getConnectorSplit).map(ConnectorSplit::getSplitIdentifier).collect(toImmutableSet()), ImmutableSet.of(5, 6, 8));
}
use of com.facebook.presto.execution.scheduler.SplitPlacementResult in project presto by prestodb.
the class TestNodeScheduler method testMaxUnacknowledgedSplitsPerTask.
@Test
public void testMaxUnacknowledgedSplitsPerTask() {
int maxUnacknowledgedSplitsPerTask = 5;
nodeSelector = nodeScheduler.createNodeSelector(sessionWithMaxUnacknowledgedSplitsPerTask(maxUnacknowledgedSplitsPerTask), CONNECTOR_ID, Integer.MAX_VALUE);
TestingTransactionHandle transactionHandle = TestingTransactionHandle.create();
ImmutableList.Builder<Split> initialSplits = ImmutableList.builder();
for (int i = 0; i < maxUnacknowledgedSplitsPerTask; i++) {
initialSplits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote()));
}
List<InternalNode> nodes = new ArrayList<>();
List<MockRemoteTaskFactory.MockRemoteTask> tasks = new ArrayList<>();
MockRemoteTaskFactory remoteTaskFactory = new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor);
int counter = 1;
for (InternalNode node : nodeManager.getActiveConnectorNodes(CONNECTOR_ID)) {
// Max out number of unacknowledged splits on each task
TaskId taskId = new TaskId("test", 1, 0, counter);
counter++;
MockRemoteTaskFactory.MockRemoteTask remoteTask = remoteTaskFactory.createTableScanTask(taskId, node, initialSplits.build(), nodeTaskMap.createTaskStatsTracker(node, taskId));
nodeTaskMap.addTask(node, remoteTask);
remoteTask.setMaxUnacknowledgedSplits(maxUnacknowledgedSplitsPerTask);
remoteTask.setUnacknowledgedSplits(maxUnacknowledgedSplitsPerTask);
nodes.add(node);
tasks.add(remoteTask);
}
// One split per node
Set<Split> splits = new HashSet<>();
for (int i = 0; i < nodes.size(); i++) {
splits.add(new Split(CONNECTOR_ID, transactionHandle, new TestSplitRemote()));
}
SplitPlacementResult splitPlacements = nodeSelector.computeAssignments(splits, ImmutableList.copyOf(tasks));
// No splits should have been placed, max unacknowledged was already reached
assertEquals(splitPlacements.getAssignments().size(), 0);
assertFalse(splitPlacements.getBlocked().isDone());
// Unblock one task
MockRemoteTaskFactory.MockRemoteTask taskOne = tasks.get(0);
taskOne.finishSplits(1);
taskOne.setUnacknowledgedSplits(taskOne.getUnacknowledgedPartitionedSplitCount() - 1);
assertTrue(splitPlacements.getBlocked().isDone());
// Attempt to schedule again, only the node with the unblocked task should be chosen
splitPlacements = nodeSelector.computeAssignments(splits, ImmutableList.copyOf(tasks));
assertEquals(splitPlacements.getAssignments().size(), 1);
assertTrue(splitPlacements.getAssignments().keySet().contains(nodes.get(0)));
// Make the first node appear to have no splits, unacknowledged splits alone should force the splits to be spread across nodes
taskOne.clearSplits();
// Give all tasks with room for 1 unacknowledged split
tasks.forEach(task -> task.setUnacknowledgedSplits(maxUnacknowledgedSplitsPerTask - 1));
splitPlacements = nodeSelector.computeAssignments(splits, ImmutableList.copyOf(tasks));
// One split placed on each node
assertEquals(splitPlacements.getAssignments().size(), nodes.size());
assertTrue(splitPlacements.getAssignments().keySet().containsAll(nodes));
}
use of com.facebook.presto.execution.scheduler.SplitPlacementResult in project presto by prestodb.
the class TestNodeScheduler method testHardAffinityAssignment.
@Test
public void testHardAffinityAssignment() {
NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
TestingTransactionHandle transactionHandle = TestingTransactionHandle.create();
NodeSchedulerConfig nodeSchedulerConfig = new NodeSchedulerConfig().setMaxSplitsPerNode(20).setIncludeCoordinator(false).setMaxPendingSplitsPerTask(10);
NodeScheduler nodeScheduler = new NodeScheduler(new LegacyNetworkTopology(), nodeManager, new NodeSelectionStats(), nodeSchedulerConfig, nodeTaskMap, new ThrowingNodeTtlFetcherManager(), new NoOpQueryManager(), new SimpleTtlNodeSelectorConfig());
NodeSelector nodeSelector = nodeScheduler.createNodeSelector(session, CONNECTOR_ID, 3);
Set<Split> splits = new HashSet<>();
// Adding one more split (1 % 3 = 1), 1 splits will be distributed to 1 nodes
splits.add(new Split(CONNECTOR_ID, transactionHandle, new TestHardAffinitySplitRemote()));
splits.add(new Split(CONNECTOR_ID, transactionHandle, new TestHardAffinitySplitRemote()));
splits.add(new Split(CONNECTOR_ID, transactionHandle, new TestHardAffinitySplitRemote()));
SplitPlacementResult splitPlacementResult = nodeSelector.computeAssignments(splits, ImmutableList.of());
for (Split split : splitPlacementResult.getAssignments().values()) {
assertTrue(split.getSplitContext().isCacheable());
}
}
Aggregations