use of com.hubspot.singularity.SlavePlacement in project Singularity by HubSpot.
the class SingularityValidator method checkResourcesForBounce.
public void checkResourcesForBounce(SingularityRequest request, boolean isIncremental) {
SlavePlacement placement = request.getSlavePlacement().or(defaultSlavePlacement);
if ((isAllowBounceToSameHost(request) && placement == SlavePlacement.SEPARATE_BY_REQUEST) || (!isAllowBounceToSameHost(request) && placement != SlavePlacement.GREEDY && placement != SlavePlacement.OPTIMISTIC)) {
int currentActiveSlaveCount = slaveManager.getNumObjectsAtState(MachineState.ACTIVE);
int requiredSlaveCount = isIncremental ? request.getInstancesSafe() + 1 : request.getInstancesSafe() * 2;
checkBadRequest(currentActiveSlaveCount >= requiredSlaveCount, "Not enough active slaves to successfully scale request %s to %s instances (minimum required: %s, current: %s).", request.getId(), request.getInstancesSafe(), requiredSlaveCount, currentActiveSlaveCount);
}
}
use of com.hubspot.singularity.SlavePlacement in project Singularity by HubSpot.
the class SingularityValidator method checkScale.
public void checkScale(SingularityRequest request, Optional<Integer> previousScale) {
SlavePlacement placement = request.getSlavePlacement().or(defaultSlavePlacement);
if (placement != SlavePlacement.GREEDY && placement != SlavePlacement.OPTIMISTIC) {
int currentActiveSlaveCount = slaveManager.getNumObjectsAtState(MachineState.ACTIVE);
int requiredSlaveCount = request.getInstancesSafe();
if (previousScale.isPresent() && placement == SlavePlacement.SEPARATE_BY_REQUEST) {
requiredSlaveCount += previousScale.get();
}
checkBadRequest(currentActiveSlaveCount >= requiredSlaveCount, "Not enough active slaves to successfully complete a bounce of request %s (minimum required: %s, current: %s). Consider deploying, or changing the slave placement strategy instead.", request.getId(), requiredSlaveCount, currentActiveSlaveCount);
}
}
use of com.hubspot.singularity.SlavePlacement in project Singularity by HubSpot.
the class SingularityAutoScaleSpreadAllPoller method runActionOnPoll.
@Override
public void runActionOnPoll() {
int currentActiveSlaveCount = slaveManager.getNumObjectsAtState(MachineState.ACTIVE);
for (SingularityRequestWithState requestWithState : requestManager.getActiveRequests()) {
lock.runWithRequestLock(() -> {
SingularityRequest request = requestWithState.getRequest();
SlavePlacement placement = request.getSlavePlacement().or(defaultSlavePlacement);
if (placement != SlavePlacement.SPREAD_ALL_SLAVES) {
return;
}
int requestInstanceCount = request.getInstancesSafe();
if (requestInstanceCount == currentActiveSlaveCount) {
LOG.trace("Active Request {} is already spread to all {} available slaves", request.getId(), currentActiveSlaveCount);
} else {
LOG.info("Scaling request {} from {} instances to {} available slaves", request.getId(), requestInstanceCount, currentActiveSlaveCount);
submitScaleRequest(requestWithState, currentActiveSlaveCount);
}
}, requestWithState.getRequest().getId(), getClass().getSimpleName());
}
}
use of com.hubspot.singularity.SlavePlacement in project Singularity by HubSpot.
the class SingularitySlaveAndRackManager method doesOfferMatch.
SlaveMatchState doesOfferMatch(SingularityOfferHolder offerHolder, SingularityTaskRequest taskRequest, List<SingularityTaskId> activeTaskIdsForRequest) {
final String host = offerHolder.getHostname();
final String rackId = offerHolder.getRackId();
final String slaveId = offerHolder.getSlaveId();
final MachineState currentSlaveState = slaveManager.getSlave(slaveId).get().getCurrentState().getState();
if (currentSlaveState == MachineState.FROZEN) {
return SlaveMatchState.SLAVE_FROZEN;
}
if (currentSlaveState.isDecommissioning()) {
return SlaveMatchState.SLAVE_DECOMMISSIONING;
}
final MachineState currentRackState = rackManager.getRack(rackId).get().getCurrentState().getState();
if (currentRackState == MachineState.FROZEN) {
return SlaveMatchState.RACK_FROZEN;
}
if (currentRackState.isDecommissioning()) {
return SlaveMatchState.RACK_DECOMMISSIONING;
}
if (!taskRequest.getRequest().getRackAffinity().or(Collections.emptyList()).isEmpty()) {
if (!taskRequest.getRequest().getRackAffinity().get().contains(rackId)) {
LOG.trace("Task {} requires a rack in {} (current rack {})", taskRequest.getPendingTask().getPendingTaskId(), taskRequest.getRequest().getRackAffinity().get(), rackId);
return SlaveMatchState.RACK_AFFINITY_NOT_MATCHING;
}
}
if (!isSlaveAttributesMatch(offerHolder, taskRequest)) {
return SlaveMatchState.SLAVE_ATTRIBUTES_DO_NOT_MATCH;
}
final SlavePlacement slavePlacement = taskRequest.getRequest().getSlavePlacement().or(configuration.getDefaultSlavePlacement());
if (!taskRequest.getRequest().isRackSensitive() && slavePlacement == SlavePlacement.GREEDY) {
// todo: account for this or let this behavior continue?
return SlaveMatchState.NOT_RACK_OR_SLAVE_PARTICULAR;
}
final int numDesiredInstances = taskRequest.getRequest().getInstancesSafe();
boolean allowBounceToSameHost = isAllowBounceToSameHost(taskRequest.getRequest());
Multiset<String> countPerRack = HashMultiset.create(slaveManager.getNumActive());
double numOnSlave = 0;
double numCleaningOnSlave = 0;
double numFromSameBounceOnSlave = 0;
double numOtherDeploysOnSlave = 0;
boolean taskLaunchedFromBounceWithActionId = taskRequest.getPendingTask().getPendingTaskId().getPendingType() == PendingType.BOUNCE && taskRequest.getPendingTask().getActionId().isPresent();
final String sanitizedHost = offerHolder.getSanitizedHost();
final String sanitizedRackId = offerHolder.getSanitizedRackId();
Collection<SingularityTaskId> cleaningTasks = leaderCache.getCleanupTaskIds();
for (SingularityTaskId taskId : activeTaskIdsForRequest) {
if (!cleaningTasks.contains(taskId) && taskRequest.getDeploy().getId().equals(taskId.getDeployId())) {
countPerRack.add(taskId.getSanitizedRackId());
}
if (!taskId.getSanitizedHost().equals(sanitizedHost)) {
continue;
}
if (taskRequest.getDeploy().getId().equals(taskId.getDeployId())) {
if (cleaningTasks.contains(taskId)) {
numCleaningOnSlave++;
} else {
numOnSlave++;
}
if (taskLaunchedFromBounceWithActionId) {
Optional<SingularityTask> maybeTask = taskManager.getTask(taskId);
boolean errorInTaskData = false;
if (maybeTask.isPresent()) {
SingularityPendingTask pendingTask = maybeTask.get().getTaskRequest().getPendingTask();
if (pendingTask.getPendingTaskId().getPendingType() == PendingType.BOUNCE) {
if (pendingTask.getActionId().isPresent()) {
if (pendingTask.getActionId().get().equals(taskRequest.getPendingTask().getActionId().get())) {
numFromSameBounceOnSlave++;
}
} else {
// No actionId present on bounce, fall back to more restrictive placement strategy
errorInTaskData = true;
}
}
} else {
// Could not find appropriate task data, fall back to more restrictive placement strategy
errorInTaskData = true;
}
if (errorInTaskData) {
allowBounceToSameHost = false;
}
}
} else {
numOtherDeploysOnSlave++;
}
}
if (taskRequest.getRequest().isRackSensitive()) {
final boolean isRackOk = isRackOk(countPerRack, sanitizedRackId, numDesiredInstances, taskRequest.getRequest().getId(), slaveId, host, numCleaningOnSlave, leaderCache);
if (!isRackOk) {
return SlaveMatchState.RACK_SATURATED;
}
}
switch(slavePlacement) {
case SEPARATE:
case SEPARATE_BY_DEPLOY:
case SPREAD_ALL_SLAVES:
if (allowBounceToSameHost && taskLaunchedFromBounceWithActionId) {
if (numFromSameBounceOnSlave > 0) {
LOG.trace("Rejecting SEPARATE task {} from slave {} ({}) due to numFromSameBounceOnSlave {}", taskRequest.getRequest().getId(), slaveId, host, numFromSameBounceOnSlave);
return SlaveMatchState.SLAVE_SATURATED;
}
} else {
if (numOnSlave > 0 || numCleaningOnSlave > 0) {
LOG.trace("Rejecting {} task {} from slave {} ({}) due to numOnSlave {} numCleaningOnSlave {}", slavePlacement.name(), taskRequest.getRequest().getId(), slaveId, host, numOnSlave, numCleaningOnSlave);
return SlaveMatchState.SLAVE_SATURATED;
}
}
break;
case SEPARATE_BY_REQUEST:
if (numOnSlave > 0 || numCleaningOnSlave > 0 || numOtherDeploysOnSlave > 0) {
LOG.trace("Rejecting SEPARATE_BY_REQUEST task {} from slave {} ({}) due to numOnSlave {} numCleaningOnSlave {} numOtherDeploysOnSlave {}", taskRequest.getRequest().getId(), slaveId, host, numOnSlave, numCleaningOnSlave, numOtherDeploysOnSlave);
return SlaveMatchState.SLAVE_SATURATED;
}
break;
case OPTIMISTIC:
// If no tasks are active for this request yet, we can fall back to greedy.
if (activeTaskIdsForRequest.size() > 0) {
Collection<SingularityPendingTaskId> pendingTasksForRequestClusterwide = leaderCache.getPendingTaskIdsForRequest(taskRequest.getRequest().getId());
Set<String> currentHostsForRequest = activeTaskIdsForRequest.stream().map(SingularityTaskId::getSanitizedHost).collect(Collectors.toSet());
final double numPerSlave = activeTaskIdsForRequest.size() / (double) currentHostsForRequest.size();
final double leniencyCoefficient = configuration.getPlacementLeniency();
final double threshold = numPerSlave * (1 + (pendingTasksForRequestClusterwide.size() * leniencyCoefficient));
final boolean isSlaveOk = numOnSlave <= threshold;
if (!isSlaveOk) {
LOG.trace("Rejecting OPTIMISTIC task {} from slave {} ({}) because numOnSlave {} violates threshold {} (based on active tasks for request {}, current hosts for request {}, pending tasks for request {})", taskRequest.getRequest().getId(), slaveId, host, numOnSlave, threshold, activeTaskIdsForRequest.size(), currentHostsForRequest.size(), pendingTasksForRequestClusterwide.size());
return SlaveMatchState.SLAVE_SATURATED;
}
}
break;
case GREEDY:
}
return SlaveMatchState.OK;
}
Aggregations