use of io.trino.metadata.InternalNode in project trino by trinodb.
the class NodePartitioningManager method getNodePartitioningMap.
public NodePartitionMap getNodePartitioningMap(Session session, PartitioningHandle partitioningHandle) {
requireNonNull(session, "session is null");
requireNonNull(partitioningHandle, "partitioningHandle is null");
if (partitioningHandle.getConnectorHandle() instanceof SystemPartitioningHandle) {
return ((SystemPartitioningHandle) partitioningHandle.getConnectorHandle()).getNodePartitionMap(session, nodeScheduler);
}
ConnectorBucketNodeMap connectorBucketNodeMap = getConnectorBucketNodeMap(session, partitioningHandle);
// safety check for crazy partitioning
checkArgument(connectorBucketNodeMap.getBucketCount() < 1_000_000, "Too many buckets in partitioning: %s", connectorBucketNodeMap.getBucketCount());
List<InternalNode> bucketToNode;
if (connectorBucketNodeMap.hasFixedMapping()) {
bucketToNode = getFixedMapping(connectorBucketNodeMap);
} else {
CatalogName catalogName = partitioningHandle.getConnectorId().orElseThrow(() -> new IllegalArgumentException("No connector ID for partitioning handle: " + partitioningHandle));
bucketToNode = createArbitraryBucketToNode(nodeScheduler.createNodeSelector(session, Optional.of(catalogName)).allNodes(), connectorBucketNodeMap.getBucketCount());
}
int[] bucketToPartition = new int[connectorBucketNodeMap.getBucketCount()];
BiMap<InternalNode, Integer> nodeToPartition = HashBiMap.create();
int nextPartitionId = 0;
for (int bucket = 0; bucket < bucketToNode.size(); bucket++) {
InternalNode node = bucketToNode.get(bucket);
Integer partitionId = nodeToPartition.get(node);
if (partitionId == null) {
partitionId = nextPartitionId++;
nodeToPartition.put(node, partitionId);
}
bucketToPartition[bucket] = partitionId;
}
List<InternalNode> partitionToNode = IntStream.range(0, nodeToPartition.size()).mapToObj(partitionId -> nodeToPartition.inverse().get(partitionId)).collect(toImmutableList());
return new NodePartitionMap(partitionToNode, bucketToPartition, getSplitToBucket(session, partitioningHandle));
}
use of io.trino.metadata.InternalNode in project trino by trinodb.
the class TestNodeScheduler method testEquateDistribution.
@Test
public void testEquateDistribution() {
InternalNode node1 = new InternalNode("node1", URI.create("http://10.0.0.1:11"), NodeVersion.UNKNOWN, false);
nodeManager.addNode(CONNECTOR_ID, node1);
InternalNode node2 = new InternalNode("node2", URI.create("http://10.0.0.1:12"), NodeVersion.UNKNOWN, false);
nodeManager.addNode(CONNECTOR_ID, node2);
InternalNode node3 = new InternalNode("node3", URI.create("http://10.0.0.1:13"), NodeVersion.UNKNOWN, false);
nodeManager.addNode(CONNECTOR_ID, node3);
InternalNode node4 = new InternalNode("node4", URI.create("http://10.0.0.1:14"), NodeVersion.UNKNOWN, false);
nodeManager.addNode(CONNECTOR_ID, node4);
Set<Split> splits = new LinkedHashSet<>();
// 20 splits with node1 as local node to be assigned in the first iteration of computeAssignments
for (int i = 0; i < 20; i++) {
splits.add(new Split(CONNECTOR_ID, new TestSplitLocal(), Lifespan.taskWide()));
}
// check that splits are divided uniformly across all nodes
Multimap<InternalNode, Split> assignment = nodeSelector.computeAssignments(splits, ImmutableList.copyOf(taskMap.values())).getAssignments();
assertEquals(assignment.size(), 20);
assertEquals(assignment.keySet().size(), 4);
assertEquals(assignment.get(node1).size(), 8);
assertEquals(assignment.get(node2).size(), 4);
assertEquals(assignment.get(node3).size(), 4);
assertEquals(assignment.get(node4).size(), 4);
}
use of io.trino.metadata.InternalNode in project trino by trinodb.
the class TestNodeScheduler method testTaskCompletion.
@Test
public void testTaskCompletion() throws Exception {
setUpNodes();
MockRemoteTaskFactory remoteTaskFactory = new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor);
InternalNode chosenNode = Iterables.get(nodeManager.getActiveConnectorNodes(CONNECTOR_ID), 0);
TaskId taskId = new TaskId(new StageId("test", 1), 1, 0);
RemoteTask remoteTask = remoteTaskFactory.createTableScanTask(taskId, chosenNode, ImmutableList.of(new Split(CONNECTOR_ID, new TestSplitRemote(), Lifespan.taskWide())), nodeTaskMap.createPartitionedSplitCountTracker(chosenNode, taskId));
nodeTaskMap.addTask(chosenNode, remoteTask);
assertEquals(nodeTaskMap.getPartitionedSplitsOnNode(chosenNode).getCount(), 1);
remoteTask.abort();
// Sleep until cache expires
MILLISECONDS.sleep(100);
assertEquals(nodeTaskMap.getPartitionedSplitsOnNode(chosenNode), PartitionedSplitsInfo.forZeroSplits());
remoteTask.abort();
assertEquals(nodeTaskMap.getPartitionedSplitsOnNode(chosenNode), PartitionedSplitsInfo.forZeroSplits());
}
use of io.trino.metadata.InternalNode in project trino by trinodb.
the class TestNodeScheduler method testSplitCount.
@Test
public void testSplitCount() {
setUpNodes();
MockRemoteTaskFactory remoteTaskFactory = new MockRemoteTaskFactory(remoteTaskExecutor, remoteTaskScheduledExecutor);
InternalNode chosenNode = Iterables.get(nodeManager.getActiveConnectorNodes(CONNECTOR_ID), 0);
TaskId taskId1 = new TaskId(new StageId("test", 1), 1, 0);
RemoteTask remoteTask1 = remoteTaskFactory.createTableScanTask(taskId1, chosenNode, ImmutableList.of(new Split(CONNECTOR_ID, new TestSplitRemote(), Lifespan.taskWide()), new Split(CONNECTOR_ID, new TestSplitRemote(), Lifespan.taskWide())), nodeTaskMap.createPartitionedSplitCountTracker(chosenNode, taskId1));
TaskId taskId2 = new TaskId(new StageId("test", 1), 2, 0);
RemoteTask remoteTask2 = remoteTaskFactory.createTableScanTask(taskId2, chosenNode, ImmutableList.of(new Split(CONNECTOR_ID, new TestSplitRemote(), Lifespan.taskWide())), nodeTaskMap.createPartitionedSplitCountTracker(chosenNode, taskId2));
nodeTaskMap.addTask(chosenNode, remoteTask1);
nodeTaskMap.addTask(chosenNode, remoteTask2);
assertEquals(nodeTaskMap.getPartitionedSplitsOnNode(chosenNode).getCount(), 3);
remoteTask1.abort();
assertEquals(nodeTaskMap.getPartitionedSplitsOnNode(chosenNode).getCount(), 1);
remoteTask2.abort();
assertEquals(nodeTaskMap.getPartitionedSplitsOnNode(chosenNode), PartitionedSplitsInfo.forZeroSplits());
}
use of io.trino.metadata.InternalNode in project trino by trinodb.
the class TestNodeScheduler method testScheduleLocal.
@Test
public void testScheduleLocal() {
setUpNodes();
Split split = new Split(CONNECTOR_ID, new TestSplitLocallyAccessible(), Lifespan.taskWide());
Set<Split> splits = ImmutableSet.of(split);
Map.Entry<InternalNode, Split> assignment = getOnlyElement(nodeSelector.computeAssignments(splits, ImmutableList.copyOf(taskMap.values())).getAssignments().entries());
assertEquals(assignment.getKey().getHostAndPort(), split.getAddresses().get(0));
assertEquals(assignment.getValue(), split);
}
Aggregations