use of com.hubspot.singularity.SingularitySlaveUsage in project Singularity by HubSpot.
the class SingularityMesosOfferSchedulerTest method itAccountsForExpectedTaskUsage.
@Test
public void itAccountsForExpectedTaskUsage() {
initRequest();
double cpuReserved = 2;
double memMbReserved = 1000;
initFirstDeployWithResources(cpuReserved, memMbReserved);
saveAndSchedule(requestManager.getRequest(requestId).get().getRequest().toBuilder().setInstances(Optional.of(1)));
resourceOffers(3);
SingularityTaskId taskId = taskManager.getActiveTaskIds().get(0);
String t1 = taskId.getId();
// 2 cpus used
MesosTaskMonitorObject t1u1 = getTaskMonitor(t1, 10, TimeUnit.MILLISECONDS.toSeconds(taskId.getStartedAt()) + 5, 1000);
mesosClient.setSlaveResourceUsage("host1", Collections.singletonList(t1u1));
usagePoller.runActionOnPoll();
Map<ResourceUsageType, Number> longRunningTasksUsage = new HashMap<>();
longRunningTasksUsage.put(ResourceUsageType.CPU_USED, 0.1);
longRunningTasksUsage.put(ResourceUsageType.MEMORY_BYTES_USED, 0.1);
longRunningTasksUsage.put(ResourceUsageType.DISK_BYTES_USED, 0.1);
SingularitySlaveUsage smallUsage = new SingularitySlaveUsage(0.1, 0.1, Optional.of(10.0), 1, 1, Optional.of(30L), 1, 1, Optional.of(1024L), longRunningTasksUsage, 1, System.currentTimeMillis(), 1, 30000, 10, 0, 0, 0, 0, 107374182);
usageManager.saveSpecificSlaveUsageAndSetCurrent("host1", smallUsage);
usageManager.saveSpecificSlaveUsageAndSetCurrent("host2", smallUsage);
usageManager.saveSpecificSlaveUsageAndSetCurrent("host3", smallUsage);
requestResource.scale(requestId, new SingularityScaleRequest(Optional.of(3), Optional.absent(), Optional.absent(), Optional.absent(), Optional.absent(), Optional.absent(), Optional.absent(), Optional.absent()), SingularityUser.DEFAULT_USER);
Assert.assertEquals(2.0, usageManager.getRequestUtilizations().get(requestId).getCpuUsed(), 0.001);
Offer host2Offer = createOffer(6, 30000, 107374182, "host2", "host2");
slaveAndRackManager.checkOffer(host2Offer);
Offer host3Offer = createOffer(6, 30000, 107374182, "host3", "host3");
slaveAndRackManager.checkOffer(host3Offer);
Collection<SingularityOfferHolder> offerHolders = offerScheduler.checkOffers(Arrays.asList(host2Offer, host3Offer));
Assert.assertEquals(2, offerHolders.size());
// A single offer should only ever get a single task even though both have room for both tasks here. Adding a task should reduce the score for the next check
for (SingularityOfferHolder offerHolder : offerHolders) {
Assert.assertEquals(1, offerHolder.getAcceptedTasks().size());
}
}
use of com.hubspot.singularity.SingularitySlaveUsage in project Singularity by HubSpot.
the class SingularityUsageTest method itLimitsTheNumberOfTaskCleanupsToCreate.
@Test
public void itLimitsTheNumberOfTaskCleanupsToCreate() {
try {
configuration.setShuffleTasksForOverloadedSlaves(true);
configuration.setMaxTasksToShuffleTotal(1);
initRequest();
initFirstDeployWithResources(configuration.getMesosConfiguration().getDefaultCpus(), configuration.getMesosConfiguration().getDefaultMemory());
saveAndSchedule(requestManager.getRequest(requestId).get().getRequest().toBuilder().setInstances(Optional.of(3)));
resourceOffers(1);
SingularitySlaveUsage highUsage = new SingularitySlaveUsage(15, 10, Optional.of(10.0), 1, 1, Optional.of(30L), 1, 1, Optional.of(1024L), Collections.emptyMap(), 1, System.currentTimeMillis(), 1, 30000, 10, 15, 15, 15, 0, 107374182);
usageManager.saveSpecificSlaveUsageAndSetCurrent("host1", highUsage);
SingularityTaskId taskId1 = taskManager.getActiveTaskIds().get(0);
String t1 = taskId1.getId();
SingularityTaskId taskId2 = taskManager.getActiveTaskIds().get(1);
String t2 = taskId2.getId();
statusUpdate(taskManager.getTask(taskId1).get(), TaskState.TASK_STARTING, Optional.of(taskId1.getStartedAt()));
statusUpdate(taskManager.getTask(taskId2).get(), TaskState.TASK_STARTING, Optional.of(taskId2.getStartedAt()));
// task 1 using 3 cpus
MesosTaskMonitorObject t1u1 = getTaskMonitor(t1, 15, TimeUnit.MILLISECONDS.toSeconds(taskId1.getStartedAt()) + 5, 1024);
// task 2 using 2 cpus
MesosTaskMonitorObject t2u1 = getTaskMonitor(t2, 10, TimeUnit.MILLISECONDS.toSeconds(taskId2.getStartedAt()) + 5, 1024);
mesosClient.setSlaveResourceUsage("host1", Arrays.asList(t1u1, t2u1));
mesosClient.setSlaveMetricsSnapshot("host1", new MesosSlaveMetricsSnapshotObject(0, 0, 0, 10.0, 0, 0, 0, 0, 0, 0, 0, 0, 10.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0));
usagePoller.runActionOnPoll();
// First task is cleaned up
Assert.assertEquals(taskManager.getTaskCleanup(taskId1.getId()).get().getCleanupType(), TaskCleanupType.REBALANCE_CPU_USAGE);
// Second task doesn't get cleaned up dur to cluster wide limit
Assert.assertFalse(taskManager.getTaskCleanup(taskId2.getId()).isPresent());
} finally {
configuration.setShuffleTasksForOverloadedSlaves(false);
configuration.setMaxTasksToShuffleTotal(6);
}
}
Aggregations