use of io.trino.spi.TrinoException in project trino by trinodb.
the class TopologyAwareNodeSelector method computeAssignments.
@Override
public SplitPlacementResult computeAssignments(Set<Split> splits, List<RemoteTask> existingTasks) {
NodeMap nodeMap = this.nodeMap.get().get();
Multimap<InternalNode, Split> assignment = HashMultimap.create();
NodeAssignmentStats assignmentStats = new NodeAssignmentStats(nodeTaskMap, nodeMap, existingTasks);
int[] topologicCounters = new int[topologicalSplitCounters.size()];
Set<NetworkLocation> filledLocations = new HashSet<>();
Set<InternalNode> blockedExactNodes = new HashSet<>();
boolean splitWaitingForAnyNode = false;
for (Split split : splits) {
SplitWeight splitWeight = split.getSplitWeight();
if (!split.isRemotelyAccessible()) {
List<InternalNode> candidateNodes = selectExactNodes(nodeMap, split.getAddresses(), includeCoordinator);
if (candidateNodes.isEmpty()) {
log.debug("No nodes available to schedule %s. Available nodes %s", split, nodeMap.getNodesByHost().keys());
throw new TrinoException(NO_NODES_AVAILABLE, "No nodes available to run query");
}
InternalNode chosenNode = bestNodeSplitCount(splitWeight, candidateNodes.iterator(), minCandidates, maxPendingSplitsWeightPerTask, assignmentStats);
if (chosenNode != null) {
assignment.put(chosenNode, split);
assignmentStats.addAssignedSplit(chosenNode, splitWeight);
} else // Exact node set won't matter, if a split is waiting for any node
if (!splitWaitingForAnyNode) {
blockedExactNodes.addAll(candidateNodes);
}
continue;
}
InternalNode chosenNode = null;
int depth = topologicalSplitCounters.size() - 1;
int chosenDepth = 0;
Set<NetworkLocation> locations = new HashSet<>();
for (HostAddress host : split.getAddresses()) {
locations.add(networkTopology.locate(host));
}
if (locations.isEmpty()) {
// Add the root location
locations.add(ROOT_LOCATION);
depth = 0;
}
// Try each address at progressively shallower network locations
for (int i = depth; i >= 0 && chosenNode == null; i--) {
for (NetworkLocation location : locations) {
// For example, locations which couldn't be located will be at the "root" location
if (location.getSegments().size() < i) {
continue;
}
location = location.subLocation(0, i);
if (filledLocations.contains(location)) {
continue;
}
Set<InternalNode> nodes = nodeMap.getWorkersByNetworkPath().get(location);
chosenNode = bestNodeSplitCount(splitWeight, new ResettableRandomizedIterator<>(nodes), minCandidates, calculateMaxPendingSplitsWeightPerTask(i, depth), assignmentStats);
if (chosenNode != null) {
chosenDepth = i;
break;
}
filledLocations.add(location);
}
}
if (chosenNode != null) {
assignment.put(chosenNode, split);
assignmentStats.addAssignedSplit(chosenNode, splitWeight);
topologicCounters[chosenDepth]++;
} else {
splitWaitingForAnyNode = true;
}
}
for (int i = 0; i < topologicCounters.length; i++) {
if (topologicCounters[i] > 0) {
topologicalSplitCounters.get(i).update(topologicCounters[i]);
}
}
ListenableFuture<Void> blocked;
long maxPendingForWildcardNetworkAffinity = calculateMaxPendingSplitsWeightPerTask(0, topologicalSplitCounters.size() - 1);
if (splitWaitingForAnyNode) {
blocked = toWhenHasSplitQueueSpaceFuture(existingTasks, calculateLowWatermark(maxPendingForWildcardNetworkAffinity));
} else {
blocked = toWhenHasSplitQueueSpaceFuture(blockedExactNodes, existingTasks, calculateLowWatermark(maxPendingForWildcardNetworkAffinity));
}
return new SplitPlacementResult(blocked, assignment);
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class AbstractMinMaxByNAggregationFunction method input.
public static void input(MethodHandle keyComparisonMethod, Type valueType, Type keyType, MinMaxByNState state, Block value, Block key, long n, int blockIndex) {
TypedKeyValueHeap heap = state.getTypedKeyValueHeap();
if (heap == null) {
if (n <= 0) {
throw new TrinoException(INVALID_FUNCTION_ARGUMENT, "third argument of max_by/min_by must be a positive integer");
}
checkCondition(n <= MAX_NUMBER_OF_VALUES, INVALID_FUNCTION_ARGUMENT, "third argument of max_by/min_by must be less than or equal to %s; found %s", MAX_NUMBER_OF_VALUES, n);
heap = new TypedKeyValueHeap(keyComparisonMethod, keyType, valueType, toIntExact(n));
state.setTypedKeyValueHeap(heap);
}
long startSize = heap.getEstimatedSize();
if (!key.isNull(blockIndex)) {
heap.add(key, value, blockIndex);
}
state.addMemoryUsage(heap.getEstimatedSize() - startSize);
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class SingleTypedHistogram method rehash.
private void rehash() {
long newCapacityLong = hashCapacity * 2L;
if (newCapacityLong > Integer.MAX_VALUE) {
throw new TrinoException(GENERIC_INSUFFICIENT_RESOURCES, "Size of hash table cannot exceed 1 billion entries");
}
int newCapacity = (int) newCapacityLong;
int newMask = newCapacity - 1;
IntBigArray newHashPositions = new IntBigArray(-1);
newHashPositions.ensureCapacity(newCapacity);
for (int i = 0; i < values.getPositionCount(); i++) {
// find an empty slot for the address
int hashPosition = getBucketId(hashCodeOperator.hashCodeNullSafe(values, i), newMask);
while (newHashPositions.get(hashPosition) != -1) {
hashPosition = (hashPosition + 1) & newMask;
}
// record the mapping
newHashPositions.set(hashPosition, i);
}
hashCapacity = newCapacity;
mask = newMask;
maxFill = calculateMaxFill(newCapacity);
hashPositions = newHashPositions;
this.counts.ensureCapacity(maxFill);
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class ValueStore method rehash.
@VisibleForTesting
void rehash() {
++rehashCount;
long newBucketCountLong = bucketCount * 2L;
if (newBucketCountLong > Integer.MAX_VALUE) {
throw new TrinoException(GENERIC_INSUFFICIENT_RESOURCES, "Size of hash table cannot exceed " + Integer.MAX_VALUE + " entries (" + newBucketCountLong + ")");
}
int newBucketCount = (int) newBucketCountLong;
int newMask = newBucketCount - 1;
IntBigArray newBuckets = new IntBigArray(-1);
newBuckets.ensureCapacity(newBucketCount);
for (int i = 0; i < values.getPositionCount(); i++) {
long valueHash = valueHashes.get(i);
int bucketId = getBucketId(valueHash, newMask);
int probeCount = 1;
while (newBuckets.get(bucketId) != EMPTY_BUCKET) {
int probe = nextProbe(probeCount);
bucketId = nextBucketId(bucketId, newMask, probe);
probeCount++;
}
// record the mapping
newBuckets.set(bucketId, i);
}
buckets = newBuckets;
// worst case is every bucket has a unique value, so pre-emptively keep this large enough to have a value for ever bucket
// TODO: could optimize the growth algorithm to be resize this only when necessary; this wastes memory but guarantees that if every value has a distinct hash, we have space
valueHashes.ensureCapacity(newBucketCount);
bucketCount = newBucketCount;
maxFill = calculateMaxFill(newBucketCount, MAX_FILL_RATIO);
mask = newMask;
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class GroupedTypedHistogram method rehash.
private void rehash() {
long newBucketCountLong = bucketCount * 2L;
if (newBucketCountLong > Integer.MAX_VALUE) {
throw new TrinoException(GENERIC_INSUFFICIENT_RESOURCES, "Size of hash table cannot exceed " + Integer.MAX_VALUE + " entries (" + newBucketCountLong + ")");
}
int newBucketCount = computeBucketCount((int) newBucketCountLong, MAX_FILL_RATIO);
int newMask = newBucketCount - 1;
IntBigArray newBuckets = new IntBigArray(-1);
newBuckets.ensureCapacity(newBucketCount);
for (int i = 0; i < nextNodePointer; i++) {
// find the old one
int bucketId = getBucketIdForNode(i, newMask);
int probeCount = 1;
int originalBucket = bucketId;
// find new one
while (newBuckets.get(bucketId) != -1) {
int probe = nextProbe(probeCount);
bucketId = nextBucketId(originalBucket, newMask, probe);
probeCount++;
}
// record the mapping
newBuckets.set(bucketId, i);
}
buckets = newBuckets;
bucketCount = newBucketCount;
maxFill = calculateMaxFill(newBucketCount, MAX_FILL_RATIO);
mask = newMask;
resizeNodeArrays(newBucketCount);
}
Aggregations