use of io.airlift.units.DataSize in project presto by prestodb.
the class ClusterMemoryManager method updatePools.
private synchronized void updatePools(Map<MemoryPoolId, Integer> queryCounts) {
// Update view of cluster memory and pools
List<MemoryInfo> nodeMemoryInfos = nodes.values().stream().map(RemoteNodeMemory::getInfo).filter(Optional::isPresent).map(Optional::get).collect(toImmutableList());
long totalClusterMemory = nodeMemoryInfos.stream().map(MemoryInfo::getTotalNodeMemory).mapToLong(DataSize::toBytes).sum();
clusterMemoryBytes.set(totalClusterMemory);
Set<MemoryPoolId> activePoolIds = nodeMemoryInfos.stream().flatMap(info -> info.getPools().keySet().stream()).collect(toImmutableSet());
// Make a copy to materialize the set difference
Set<MemoryPoolId> removedPools = ImmutableSet.copyOf(difference(pools.keySet(), activePoolIds));
for (MemoryPoolId removed : removedPools) {
unexport(pools.get(removed));
pools.remove(removed);
if (changeListeners.containsKey(removed)) {
for (Consumer<MemoryPoolInfo> listener : changeListeners.get(removed)) {
listenerExecutor.execute(() -> listener.accept(new MemoryPoolInfo(0, 0, ImmutableMap.of())));
}
}
}
for (MemoryPoolId id : activePoolIds) {
ClusterMemoryPool pool = pools.computeIfAbsent(id, poolId -> {
ClusterMemoryPool newPool = new ClusterMemoryPool(poolId);
String objectName = ObjectNames.builder(ClusterMemoryPool.class, newPool.getId().toString()).build();
try {
exporter.export(objectName, newPool);
} catch (JmxException e) {
log.error(e, "Error exporting memory pool %s", poolId);
}
return newPool;
});
pool.update(nodeMemoryInfos, queryCounts.getOrDefault(pool.getId(), 0));
if (changeListeners.containsKey(id)) {
MemoryPoolInfo info = pool.getInfo();
for (Consumer<MemoryPoolInfo> listener : changeListeners.get(id)) {
listenerExecutor.execute(() -> listener.accept(info));
}
}
}
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class ClusterMemoryManager method process.
public synchronized void process(Iterable<QueryExecution> queries) {
if (!enabled) {
return;
}
boolean outOfMemory = isClusterOutOfMemory();
if (!outOfMemory) {
lastTimeNotOutOfMemory = System.nanoTime();
}
boolean queryKilled = false;
long totalBytes = 0;
for (QueryExecution query : queries) {
long bytes = query.getTotalMemoryReservation();
DataSize sessionMaxQueryMemory = getQueryMaxMemory(query.getSession());
long queryMemoryLimit = Math.min(maxQueryMemory.toBytes(), sessionMaxQueryMemory.toBytes());
totalBytes += bytes;
if (resourceOvercommit(query.getSession()) && outOfMemory) {
// If a query has requested resource overcommit, only kill it if the cluster has run out of memory
DataSize memory = succinctBytes(bytes);
query.fail(new PrestoException(CLUSTER_OUT_OF_MEMORY, format("The cluster is out of memory and %s=true, so this query was killed. It was using %s of memory", RESOURCE_OVERCOMMIT, memory)));
queryKilled = true;
}
if (!resourceOvercommit(query.getSession()) && bytes > queryMemoryLimit) {
DataSize maxMemory = succinctBytes(queryMemoryLimit);
query.fail(exceededGlobalLimit(maxMemory));
queryKilled = true;
}
}
clusterMemoryUsageBytes.set(totalBytes);
if (killOnOutOfMemory) {
boolean shouldKillQuery = nanosSince(lastTimeNotOutOfMemory).compareTo(killOnOutOfMemoryDelay) > 0 && outOfMemory;
boolean lastKilledQueryIsGone = (lastKilledQuery == null);
if (!lastKilledQueryIsGone) {
ClusterMemoryPool generalPool = pools.get(GENERAL_POOL);
if (generalPool != null) {
lastKilledQueryIsGone = generalPool.getQueryMemoryReservations().containsKey(lastKilledQuery);
}
}
if (shouldKillQuery && lastKilledQueryIsGone && !queryKilled) {
// Kill the biggest query in the general pool
QueryExecution biggestQuery = null;
long maxMemory = -1;
for (QueryExecution query : queries) {
long bytesUsed = query.getTotalMemoryReservation();
if (bytesUsed > maxMemory && query.getMemoryPool().getId().equals(GENERAL_POOL)) {
biggestQuery = query;
maxMemory = bytesUsed;
}
}
if (biggestQuery != null) {
biggestQuery.fail(new PrestoException(CLUSTER_OUT_OF_MEMORY, "The cluster is out of memory, and your query was killed. Please try again in a few minutes."));
queriesKilledDueToOutOfMemory.incrementAndGet();
lastKilledQuery = biggestQuery.getQueryId();
}
}
}
Map<MemoryPoolId, Integer> countByPool = new HashMap<>();
for (QueryExecution query : queries) {
MemoryPoolId id = query.getMemoryPool().getId();
countByPool.put(id, countByPool.getOrDefault(id, 0) + 1);
}
updatePools(countByPool);
updateNodes(updateAssignments(queries));
// check if CPU usage is over limit
for (QueryExecution query : queries) {
Duration cpuTime = query.getTotalCpuTime();
Duration sessionLimit = getQueryMaxCpuTime(query.getSession());
Duration limit = maxQueryCpuTime.compareTo(sessionLimit) < 0 ? maxQueryCpuTime : sessionLimit;
if (cpuTime.compareTo(limit) > 0) {
query.fail(new ExceededCpuLimitException(limit));
}
}
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class ClientBuffer method loadPagesIfNecessary.
public void loadPagesIfNecessary(PagesSupplier pagesSupplier) {
requireNonNull(pagesSupplier, "pagesSupplier is null");
// Get the max size from the current pending read, which may not be the
// same pending read instance by the time pages are loaded but this is
// safe since the size is rechecked before returning pages.
DataSize maxSize;
synchronized (this) {
if (pendingRead == null) {
return;
}
maxSize = pendingRead.getMaxSize();
}
boolean dataAdded = loadPagesIfNecessary(pagesSupplier, maxSize);
if (dataAdded) {
PendingRead pendingRead;
synchronized (this) {
pendingRead = this.pendingRead;
}
if (pendingRead != null) {
processRead(pendingRead);
}
}
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class InternalResourceGroup method getInfo.
public ResourceGroupInfo getInfo() {
synchronized (root) {
checkState(!subGroups.isEmpty() || (descendantRunningQueries == 0 && descendantQueuedQueries == 0), "Leaf resource group has descendant queries.");
List<ResourceGroupInfo> infos = subGroups.values().stream().map(InternalResourceGroup::getInfo).collect(toImmutableList());
ResourceGroupState resourceGroupState;
if (canRunMore()) {
resourceGroupState = CAN_RUN;
} else if (canQueueMore()) {
resourceGroupState = CAN_QUEUE;
} else {
resourceGroupState = FULL;
}
return new ResourceGroupInfo(id, new DataSize(softMemoryLimitBytes, BYTE), maxRunningQueries, maxQueuedQueries, resourceGroupState, eligibleSubGroups.size(), new DataSize(cachedMemoryUsageBytes, BYTE), runningQueries.size() + descendantRunningQueries, queuedQueries.size() + descendantQueuedQueries, infos);
}
}
use of io.airlift.units.DataSize in project presto by prestodb.
the class DriverContext method getDriverStats.
public DriverStats getDriverStats() {
long totalScheduledTime = processWallNanos.get();
long totalCpuTime = processCpuNanos.get();
long totalUserTime = processUserNanos.get();
long totalBlockedTime = blockedWallNanos.get();
BlockedMonitor blockedMonitor = this.blockedMonitor.get();
if (blockedMonitor != null) {
totalBlockedTime += blockedMonitor.getBlockedTime();
}
List<OperatorStats> operators = ImmutableList.copyOf(transform(operatorContexts, OperatorContext::getOperatorStats));
OperatorStats inputOperator = getFirst(operators, null);
DataSize rawInputDataSize;
long rawInputPositions;
Duration rawInputReadTime;
DataSize processedInputDataSize;
long processedInputPositions;
DataSize outputDataSize;
long outputPositions;
if (inputOperator != null) {
rawInputDataSize = inputOperator.getInputDataSize();
rawInputPositions = inputOperator.getInputPositions();
rawInputReadTime = inputOperator.getAddInputWall();
processedInputDataSize = inputOperator.getOutputDataSize();
processedInputPositions = inputOperator.getOutputPositions();
OperatorStats outputOperator = requireNonNull(getLast(operators, null));
outputDataSize = outputOperator.getOutputDataSize();
outputPositions = outputOperator.getOutputPositions();
} else {
rawInputDataSize = new DataSize(0, BYTE);
rawInputPositions = 0;
rawInputReadTime = new Duration(0, MILLISECONDS);
processedInputDataSize = new DataSize(0, BYTE);
processedInputPositions = 0;
outputDataSize = new DataSize(0, BYTE);
outputPositions = 0;
}
long startNanos = this.startNanos.get();
if (startNanos < createNanos) {
startNanos = System.nanoTime();
}
Duration queuedTime = new Duration(startNanos - createNanos, NANOSECONDS);
long endNanos = this.endNanos.get();
Duration elapsedTime;
if (endNanos >= startNanos) {
elapsedTime = new Duration(endNanos - createNanos, NANOSECONDS);
} else {
elapsedTime = new Duration(0, NANOSECONDS);
}
ImmutableSet.Builder<BlockedReason> builder = ImmutableSet.builder();
for (OperatorStats operator : operators) {
if (operator.getBlockedReason().isPresent()) {
builder.add(operator.getBlockedReason().get());
}
}
return new DriverStats(createdTime, executionStartTime.get(), executionEndTime.get(), queuedTime.convertToMostSuccinctTimeUnit(), elapsedTime.convertToMostSuccinctTimeUnit(), succinctBytes(memoryReservation.get()), succinctBytes(peakMemoryReservation.get()), succinctBytes(systemMemoryReservation.get()), new Duration(totalScheduledTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), new Duration(totalCpuTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), new Duration(totalUserTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), new Duration(totalBlockedTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), blockedMonitor != null, builder.build(), rawInputDataSize.convertToMostSuccinctDataSize(), rawInputPositions, rawInputReadTime, processedInputDataSize.convertToMostSuccinctDataSize(), processedInputPositions, outputDataSize.convertToMostSuccinctDataSize(), outputPositions, ImmutableList.copyOf(transform(operatorContexts, OperatorContext::getOperatorStats)));
}
Aggregations