use of io.prestosql.spi.resourcegroups.KillPolicy in project hetu-core by openlookeng.
the class DistributedResourceGroup method internalCancelQuery.
public void internalCancelQuery() {
checkState(Thread.holdsLock(root), "Must hold lock to check cancel query");
synchronized (root) {
if (!subGroups.isEmpty()) {
for (BaseResourceGroup group : subGroups()) {
((DistributedResourceGroup) group).internalCancelQuery();
}
return;
}
if (cachedMemoryUsageBytes <= softMemoryLimitBytes) {
return;
}
Optional<SharedResourceGroupState> resourceGroupState = getSharedResourceGroupState();
if (!resourceGroupState.isPresent()) {
return;
}
Set<SharedQueryState> globalRunningQueries = resourceGroupState.get().getRunningQueries();
List<SharedQueryState> sortedQueryList;
Lock lock = stateStore.getLock(id.toString());
boolean locked = false;
try {
// If lock is not free, then we return immediately, so no need to refresh after taking lock.
// Before next call of this function, refresh will already happen.
locked = lock.tryLock();
if (locked) {
switch(killPolicy) {
case HIGH_MEMORY_QUERIES:
double absMemoryMargin = 1 - (double) memoryMarginPercent / 100;
double absQueryProgressMargin = 1 - (double) queryProgressMarginPercent / 100;
sortedQueryList = globalRunningQueries.stream().sorted((o1, o2) -> {
if (o1.getTotalMemoryReservation().toBytes() < o2.getTotalMemoryReservation().toBytes() * absMemoryMargin || o2.getTotalMemoryReservation().toBytes() < o1.getTotalMemoryReservation().toBytes() * absMemoryMargin) {
return ((Long) o2.getTotalMemoryReservation().toBytes()).compareTo(o1.getTotalMemoryReservation().toBytes());
}
// if query progress difference is within 5%, then order will be decided based on memory itself.
if (o1.getQueryProgress().orElse(0) < o2.getQueryProgress().orElse(0) * absQueryProgressMargin || o2.getQueryProgress().orElse(0) < o1.getQueryProgress().orElse(0) * absQueryProgressMargin) {
return ((Double) o1.getQueryProgress().orElse(0)).compareTo((o2.getQueryProgress().orElse(0)));
}
return ((Long) o2.getTotalMemoryReservation().toBytes()).compareTo(o1.getTotalMemoryReservation().toBytes());
}).collect(Collectors.toList());
break;
case OLDEST_QUERIES:
sortedQueryList = globalRunningQueries.stream().sorted(Comparator.comparing(o -> (o.getExecutionStartTime().get()))).collect(Collectors.toList());
break;
case RECENT_QUERIES:
sortedQueryList = globalRunningQueries.stream().sorted(Comparator.comparing(o -> (o.getExecutionStartTime().get()), Comparator.reverseOrder())).collect(Collectors.toList());
break;
case FINISH_PERCENTAGE_QUERIES:
sortedQueryList = globalRunningQueries.stream().sorted(Comparator.comparing(o -> (o.getQueryProgress().orElse(0)))).collect(Collectors.toList());
break;
case NO_KILL:
// fall through
default:
sortedQueryList = new ArrayList<>();
}
long tempGlobalCachedMemoryUsage = cachedMemoryUsageBytes;
long tempLocalCachedMemoryUsage = cachedMemoryUsageBytes;
// So in this case only Q1 will be killed from local coordinator.
for (SharedQueryState query : sortedQueryList) {
for (ManagedQueryExecution localQuery : runningQueries) {
if (query.getBasicQueryInfo().getQueryId().equals(localQuery.getBasicQueryInfo().getQueryId())) {
LOG.info("Query " + localQuery.getBasicQueryInfo().getQueryId() + " is getting killed for resource group " + this + " query will be killed with policy " + killPolicy);
localQuery.fail(new PrestoException(GENERIC_INSUFFICIENT_RESOURCES, "Memory consumption " + tempLocalCachedMemoryUsage + " exceed the limit " + softMemoryLimitBytes + "for resource group " + this));
queryFinished(localQuery);
tempLocalCachedMemoryUsage -= query.getTotalMemoryReservation().toBytes();
break;
}
}
tempGlobalCachedMemoryUsage -= query.getTotalMemoryReservation().toBytes();
if (tempGlobalCachedMemoryUsage <= softMemoryLimitBytes) {
break;
}
}
}
} catch (RuntimeException e) {
return;
} finally {
if (locked) {
lock.unlock();
}
}
}
}
use of io.prestosql.spi.resourcegroups.KillPolicy in project hetu-core by openlookeng.
the class DistributedResourceGroupTemp method internalCancelQuery.
public void internalCancelQuery() {
checkState(Thread.holdsLock(root), "Must hold lock to check cancel query");
synchronized (root) {
if (!subGroups.isEmpty()) {
for (BaseResourceGroup group : subGroups()) {
((DistributedResourceGroupTemp) group).internalCancelQuery();
}
return;
}
long cachedMemoryUsageBytes = getGlobalCachedMemoryUsageBytes();
if (cachedMemoryUsageBytes <= softMemoryLimitBytes) {
return;
}
Optional<SharedResourceGroupState> resourceGroupState = getSharedResourceGroupState();
if (!resourceGroupState.isPresent()) {
return;
}
Set<SharedQueryState> globalRunningQueries = resourceGroupState.get().getRunningQueries();
List<SharedQueryState> sortedQueryList;
Lock lock = stateStore.getLock(id.toString());
boolean locked = false;
try {
// If lock is not free, then we return immediately, so no need to refresh after taking lock.
// Before next call of this function, refresh will already happen.
locked = lock.tryLock();
if (locked) {
switch(killPolicy) {
case HIGH_MEMORY_QUERIES:
double absMemoryMargin = 1 - (double) memoryMarginPercent / 100;
double absQueryProgressMargin = 1 - (double) queryProgressMarginPercent / 100;
sortedQueryList = globalRunningQueries.stream().sorted((o1, o2) -> {
if (o1.getTotalMemoryReservation().toBytes() < o2.getTotalMemoryReservation().toBytes() * absMemoryMargin || o2.getTotalMemoryReservation().toBytes() < o1.getTotalMemoryReservation().toBytes() * absMemoryMargin) {
return ((Long) o2.getTotalMemoryReservation().toBytes()).compareTo(o1.getTotalMemoryReservation().toBytes());
}
// if query progress difference is within 5%, then order will be decided based on memory itself.
if (o1.getQueryProgress().orElse(0) < o2.getQueryProgress().orElse(0) * absQueryProgressMargin || o2.getQueryProgress().orElse(0) < o1.getQueryProgress().orElse(0) * absQueryProgressMargin) {
return ((Double) o1.getQueryProgress().orElse(0)).compareTo((o2.getQueryProgress().orElse(0)));
}
return ((Long) o2.getTotalMemoryReservation().toBytes()).compareTo(o1.getTotalMemoryReservation().toBytes());
}).collect(Collectors.toList());
break;
case OLDEST_QUERIES:
sortedQueryList = globalRunningQueries.stream().sorted(Comparator.comparing(o -> (o.getExecutionStartTime().get()))).collect(Collectors.toList());
break;
case RECENT_QUERIES:
sortedQueryList = globalRunningQueries.stream().sorted(Comparator.comparing(o -> (o.getExecutionStartTime().get()), Comparator.reverseOrder())).collect(Collectors.toList());
break;
case FINISH_PERCENTAGE_QUERIES:
sortedQueryList = globalRunningQueries.stream().sorted(Comparator.comparing(o -> (o.getQueryProgress().orElse(0)))).collect(Collectors.toList());
break;
case NO_KILL:
// fall through
default:
sortedQueryList = new ArrayList<>();
}
long tempGlobalCachedMemoryUsage = cachedMemoryUsageBytes;
long tempLocalCachedMemoryUsage = localCachedMemoryUsageBytes;
// So in this case only Q1 will be killed from local coordinator.
for (SharedQueryState query : sortedQueryList) {
for (ManagedQueryExecution localQuery : localRunningQueries) {
if (query.getBasicQueryInfo().getQueryId().equals(localQuery.getBasicQueryInfo().getQueryId())) {
LOG.info("Query " + localQuery.getBasicQueryInfo().getQueryId() + " is getting killed for resource group " + this + " query will be killed with policy " + killPolicy);
localQuery.fail(new PrestoException(GENERIC_INSUFFICIENT_RESOURCES, "Memory consumption " + tempLocalCachedMemoryUsage + " exceed the limit " + softMemoryLimitBytes + "for resource group " + this));
queryFinished(localQuery);
tempLocalCachedMemoryUsage -= query.getTotalMemoryReservation().toBytes();
break;
}
}
tempGlobalCachedMemoryUsage -= query.getTotalMemoryReservation().toBytes();
if (tempGlobalCachedMemoryUsage <= softMemoryLimitBytes) {
break;
}
}
}
} catch (RuntimeException e) {
return;
} finally {
if (locked) {
lock.unlock();
}
}
}
}
use of io.prestosql.spi.resourcegroups.KillPolicy in project hetu-core by openlookeng.
the class InternalResourceGroup method internalCancelQuery.
public void internalCancelQuery() {
checkState(Thread.holdsLock(root), "Must hold lock to check cancel query");
synchronized (root) {
if (!subGroups.isEmpty()) {
for (Iterator<InternalResourceGroup> iterator = dirtySubGroups.iterator(); iterator.hasNext(); ) {
InternalResourceGroup subGroup = iterator.next();
subGroup.internalCancelQuery();
}
return;
}
if (cachedMemoryUsageBytes <= softMemoryLimitBytes) {
return;
}
List<ManagedQueryExecution> sortedQueryList;
switch(killPolicy) {
case HIGH_MEMORY_QUERIES:
double absMemoryMargin = 1 - (double) memoryMarginPercent / 100;
double absQueryProgressMargin = 1 - (double) queryProgressMarginPercent / 100;
sortedQueryList = runningQueries.stream().sorted((o1, o2) -> {
if (o1.getTotalMemoryReservation().toBytes() < o2.getTotalMemoryReservation().toBytes() * absMemoryMargin || o2.getTotalMemoryReservation().toBytes() < o1.getTotalMemoryReservation().toBytes() * absMemoryMargin) {
return ((Long) o2.getTotalMemoryReservation().toBytes()).compareTo(o1.getTotalMemoryReservation().toBytes());
}
// if query progress difference is within 5%, then order will be decided based on memory itself.
if (o1.getQueryProgress().orElse(0) < o2.getQueryProgress().orElse(0) * absQueryProgressMargin || o2.getQueryProgress().orElse(0) < o1.getQueryProgress().orElse(0) * absQueryProgressMargin) {
return ((Double) o1.getQueryProgress().orElse(0)).compareTo((o2.getQueryProgress().orElse(0)));
}
return ((Long) o2.getTotalMemoryReservation().toBytes()).compareTo(o1.getTotalMemoryReservation().toBytes());
}).collect(Collectors.toList());
break;
case OLDEST_QUERIES:
sortedQueryList = runningQueries.stream().sorted(Comparator.comparing(o -> (o.getQueryExecutionStartTime().get()))).collect(Collectors.toList());
break;
case RECENT_QUERIES:
sortedQueryList = runningQueries.stream().sorted(Comparator.comparing(o -> (o.getQueryExecutionStartTime().get()), Comparator.reverseOrder())).collect(Collectors.toList());
break;
case FINISH_PERCENTAGE_QUERIES:
sortedQueryList = runningQueries.stream().sorted(Comparator.comparing(o -> (o.getQueryProgress().orElse(0)))).collect(Collectors.toList());
break;
case NO_KILL:
// fall through
default:
sortedQueryList = new ArrayList<>();
}
long tempCacheMemoryUsageBytes = cachedMemoryUsageBytes;
for (ManagedQueryExecution query : sortedQueryList) {
LOG.info("Query " + query.getBasicQueryInfo().getQueryId() + " is getting killed for resource group " + this + " query will be killed with policy " + killPolicy);
query.fail(new PrestoException(GENERIC_INSUFFICIENT_RESOURCES, "Memory consumption " + tempCacheMemoryUsageBytes + " exceed the limit " + softMemoryLimitBytes + "for resource group " + this));
queryFinished(query);
tempCacheMemoryUsageBytes -= query.getTotalMemoryReservation().toBytes();
if (tempCacheMemoryUsageBytes <= softMemoryLimitBytes) {
break;
}
}
}
}
Aggregations