use of io.prestosql.metadata.InternalNode in project hetu-core by openlookeng.
the class TestDynamicFilterUtil method registerDf.
public static void registerDf(String filterId, Session session, JoinNode.DistributionType joinType, DynamicFilterService dynamicFilterService) {
JoinNode node = mock(JoinNode.class);
HashMap<String, Symbol> dfs = new HashMap<>();
List<JoinNode.EquiJoinClause> criteria = new ArrayList<JoinNode.EquiJoinClause>();
Symbol right = new Symbol("rightCol");
Symbol left = new Symbol("leftCol");
JoinNode.EquiJoinClause clause = new JoinNode.EquiJoinClause(left, right);
criteria.add(clause);
dfs.put(filterId, right);
when(node.getCriteria()).thenReturn(criteria);
when(node.getDynamicFilters()).thenReturn(dfs);
when(node.getDistributionType()).thenReturn(Optional.of(joinType));
RemoteSourceNode leftNode = mock(RemoteSourceNode.class);
when(node.getLeft()).thenReturn(leftNode);
HashSet<TaskId> tasks = new HashSet<>();
tasks.add(new TaskId("task1.0"));
tasks.add(new TaskId("task1.1"));
StageStateMachine stateMachine = mock(StageStateMachine.class);
when(stateMachine.getSession()).thenReturn(session);
InternalNode worker = mock(InternalNode.class);
InternalNode worker2 = mock(InternalNode.class);
HashSet<InternalNode> workers = new HashSet<>();
when(worker.getNodeIdentifier()).thenReturn("w1");
when(worker2.getNodeIdentifier()).thenReturn("w2");
workers.add(worker);
workers.add(worker2);
dynamicFilterService.registerTasks(node, tasks, workers, stateMachine);
}
use of io.prestosql.metadata.InternalNode in project hetu-core by openlookeng.
the class DistributedQueryRunner method isConnectionVisibleToAllNodes.
private boolean isConnectionVisibleToAllNodes(CatalogName catalogName) {
for (TestingPrestoServer server : servers) {
server.refreshNodes();
Set<InternalNode> activeNodesWithConnector = server.getActiveNodesWithConnector(catalogName);
if (activeNodesWithConnector.size() != servers.size()) {
return false;
}
}
return true;
}
use of io.prestosql.metadata.InternalNode in project hetu-core by openlookeng.
the class SqlStageExecution method scheduleTask.
private synchronized RemoteTask scheduleTask(InternalNode node, TaskId taskId, String instanceId, Multimap<PlanNodeId, Split> sourceSplits, OptionalInt totalPartitions) {
checkArgument(!allTasks.contains(taskId), "A task with id %s already exists", taskId);
if (SystemSessionProperties.isSnapshotEnabled(stateMachine.getSession())) {
// Snapshot: inform snapshot manager so it knows about all tasks,
// and can determine if a snapshot is complete for all tasks.
snapshotManager.addNewTask(taskId);
}
ImmutableMultimap.Builder<PlanNodeId, Split> initialSplits = ImmutableMultimap.builder();
initialSplits.putAll(sourceSplits);
sourceTasks.forEach((planNodeId, task) -> {
if (task.getTaskStatus().getState() != TaskState.FINISHED) {
initialSplits.put(planNodeId, newConnectSplit(taskId, task));
}
});
OutputBuffers localOutputBuffers = this.outputBuffers.get();
checkState(localOutputBuffers != null, "Initial output buffers must be set before a task can be scheduled");
RemoteTask task = remoteTaskFactory.createRemoteTask(stateMachine.getSession(), taskId, instanceId, node, stateMachine.getFragment(), initialSplits.build(), totalPartitions, localOutputBuffers, nodeTaskMap.createPartitionedSplitCountTracker(node, taskId), summarizeTaskInfo, Optional.ofNullable(parentId), snapshotManager);
completeSources.forEach(task::noMoreSplits);
allTasks.add(taskId);
tasks.computeIfAbsent(node, key -> newConcurrentHashSet()).add(task);
nodeTaskMap.addTask(node, task);
task.addStateChangeListener(new StageTaskListener());
task.addFinalTaskInfoListener(this::updateFinalTaskInfo);
if (!stateMachine.getState().isDone()) {
task.start();
} else {
// stage finished while we were scheduling this task
task.abort();
}
return task;
}
use of io.prestosql.metadata.InternalNode in project hetu-core by openlookeng.
the class DistributedResourceGroupTemp method refreshGlobalValues.
private void refreshGlobalValues() {
synchronized (root) {
globalTotalRunningQueries = localRunningQueries.size();
globalTotalQueuedQueries = localQueuedQueries.size();
globalDescendantQueuedQueries = localDescendantQueuedQueries;
globalDescendantRunningQueries = localDescendantRunningQueries;
globalCachedMemoryUsageBytes = localCachedMemoryUsageBytes;
globalCpuUsageMillis = localCpuUsageMillis;
internalNodeManager.refreshNodes();
try {
for (InternalNode coordinator : internalNodeManager.getCoordinators()) {
if (coordinator.equals(internalNodeManager.getCurrentNode())) {
continue;
}
StateMap<String, String> resourceGroupMap = ((StateMap) stateStore.getOrCreateStateCollection(createCoordinatorCollectionName(coordinator), StateCollection.Type.MAP));
DistributedResourceGroupAggrStats groupAggrStats = resourceGroupMap.containsKey(getId().toString()) ? MAPPER.readerFor(DistributedResourceGroupAggrStats.class).readValue(resourceGroupMap.get(getId().toString())) : null;
if (groupAggrStats != null) {
globalTotalRunningQueries += groupAggrStats.getRunningQueries();
globalTotalQueuedQueries += groupAggrStats.getQueuedQueries();
globalDescendantQueuedQueries += groupAggrStats.getDescendantQueuedQueries();
globalDescendantRunningQueries += groupAggrStats.getDescendantRunningQueries();
globalCachedMemoryUsageBytes += groupAggrStats.getCachedMemoryUsageBytes();
globalCpuUsageMillis += groupAggrStats.getCachedMemoryUsageBytes();
}
}
} catch (JsonProcessingException e) {
throw new RuntimeException(String.format("Error fetching resource group state with group id = %s, caused by ObjectMapper: %s", id, e.getMessage()));
}
}
}
use of io.prestosql.metadata.InternalNode in project hetu-core by openlookeng.
the class NodePartitioningManager method getNodePartitioningMap.
public NodePartitionMap getNodePartitioningMap(Session session, PartitioningHandle partitioningHandle, boolean isSnapshotEnabled, Integer nodeCount) {
requireNonNull(session, "session is null");
requireNonNull(partitioningHandle, "partitioningHandle is null");
if (partitioningHandle.getConnectorHandle() instanceof SystemPartitioningHandle) {
return ((SystemPartitioningHandle) partitioningHandle.getConnectorHandle()).getNodePartitionMap(session, nodeScheduler, isSnapshotEnabled, nodeCount);
}
CatalogName catalogName = partitioningHandle.getConnectorId().orElseThrow(() -> new IllegalArgumentException("No connector ID for partitioning handle: " + partitioningHandle));
ConnectorNodePartitioningProvider partitioningProvider = partitioningProviders.get(catalogName);
checkArgument(partitioningProvider != null, "No partitioning provider for connector %s", catalogName);
ConnectorBucketNodeMap connectorBucketNodeMap = getConnectorBucketNodeMap(session, partitioningHandle);
// safety check for crazy partitioning
checkArgument(connectorBucketNodeMap.getBucketCount() < 1_000_000, "Too many buckets in partitioning: %s", connectorBucketNodeMap.getBucketCount());
List<InternalNode> bucketToNode;
if (connectorBucketNodeMap.hasFixedMapping()) {
bucketToNode = getFixedMapping(connectorBucketNodeMap);
} else {
NodeSelector nodeSelector = nodeScheduler.createNodeSelector(catalogName, false, null);
List<InternalNode> nodes;
if (isSnapshotEnabled) {
Integer count = nodeCount;
if (count == null) {
// Initial schedule: reserve some nodes
count = calculateTaskCount(nodeSelector.selectableNodeCount());
}
nodes = nodeSelector.selectRandomNodes(count);
checkCondition(nodes.size() == count, NO_NODES_AVAILABLE, "Snapshot: not enough worker nodes to resume expected number of tasks: " + count);
} else {
nodes = nodeSelector.allNodes();
}
bucketToNode = createArbitraryBucketToNode(nodes, connectorBucketNodeMap.getBucketCount());
}
int[] bucketToPartition = new int[connectorBucketNodeMap.getBucketCount()];
BiMap<InternalNode, Integer> nodeToPartition = HashBiMap.create();
int nextPartitionId = 0;
for (int bucket = 0; bucket < bucketToNode.size(); bucket++) {
InternalNode node = bucketToNode.get(bucket);
Integer partitionId = nodeToPartition.get(node);
if (partitionId == null) {
partitionId = nextPartitionId++;
nodeToPartition.put(node, partitionId);
}
bucketToPartition[bucket] = partitionId;
}
List<InternalNode> partitionToNode = IntStream.range(0, nodeToPartition.size()).mapToObj(partitionId -> nodeToPartition.inverse().get(partitionId)).collect(toImmutableList());
return new NodePartitionMap(partitionToNode, bucketToPartition, getSplitToBucket(session, partitioningHandle));
}
Aggregations