use of org.apache.hadoop.yarn.util.resource.ResourceCalculator in project hadoop by apache.
the class FiCaSchedulerApp method getResourceUsageReport.
/**
* Recalculates the per-app, percent of queue metric, specific to the
* Capacity Scheduler.
*/
@Override
public ApplicationResourceUsageReport getResourceUsageReport() {
try {
// Use write lock here because
// SchedulerApplicationAttempt#getResourceUsageReport updated fields
// TODO: improve this
writeLock.lock();
ApplicationResourceUsageReport report = super.getResourceUsageReport();
Resource cluster = rmContext.getScheduler().getClusterResource();
Resource totalPartitionRes = rmContext.getNodeLabelManager().getResourceByLabel(getAppAMNodePartitionName(), cluster);
ResourceCalculator calc = rmContext.getScheduler().getResourceCalculator();
if (!calc.isInvalidDivisor(totalPartitionRes)) {
float queueAbsMaxCapPerPartition = ((AbstractCSQueue) getQueue()).getQueueCapacities().getAbsoluteCapacity(getAppAMNodePartitionName());
float queueUsagePerc = calc.divide(totalPartitionRes, report.getUsedResources(), Resources.multiply(totalPartitionRes, queueAbsMaxCapPerPartition)) * 100;
report.setQueueUsagePercentage(queueUsagePerc);
}
return report;
} finally {
writeLock.unlock();
}
}
use of org.apache.hadoop.yarn.util.resource.ResourceCalculator in project hadoop by apache.
the class StageAllocatorLowCostAligned method computeStageAllocation.
// computeJobAllocation()
@Override
public Map<ReservationInterval, Resource> computeStageAllocation(Plan plan, Map<Long, Resource> planLoads, RLESparseResourceAllocation planModifications, ReservationRequest rr, long stageEarliestStart, long stageDeadline, String user, ReservationId oldId) {
// Initialize
ResourceCalculator resCalc = plan.getResourceCalculator();
Resource capacity = plan.getTotalCapacity();
long step = plan.getStep();
// Create allocationRequestsearlies
RLESparseResourceAllocation allocationRequests = new RLESparseResourceAllocation(plan.getResourceCalculator());
// Initialize parameters
long duration = stepRoundUp(rr.getDuration(), step);
int windowSizeInDurations = (int) ((stageDeadline - stageEarliestStart) / duration);
int totalGangs = rr.getNumContainers() / rr.getConcurrency();
int numContainersPerGang = rr.getConcurrency();
Resource gang = Resources.multiply(rr.getCapability(), numContainersPerGang);
// Set maxGangsPerUnit
int maxGangsPerUnit = (int) Math.max(Math.floor(((double) totalGangs) / windowSizeInDurations), 1);
maxGangsPerUnit = Math.max(maxGangsPerUnit / smoothnessFactor, 1);
// If window size is too small, return null
if (windowSizeInDurations <= 0) {
return null;
}
// Initialize tree sorted by costs
TreeSet<DurationInterval> durationIntervalsSortedByCost = new TreeSet<DurationInterval>(new Comparator<DurationInterval>() {
@Override
public int compare(DurationInterval val1, DurationInterval val2) {
int cmp = Double.compare(val1.getTotalCost(), val2.getTotalCost());
if (cmp != 0) {
return cmp;
}
return (-1) * Long.compare(val1.getEndTime(), val2.getEndTime());
}
});
// Add durationIntervals that end at (endTime - n*duration) for some n.
for (long intervalEnd = stageDeadline; intervalEnd >= stageEarliestStart + duration; intervalEnd -= duration) {
long intervalStart = intervalEnd - duration;
// Get duration interval [intervalStart,intervalEnd)
DurationInterval durationInterval = getDurationInterval(intervalStart, intervalEnd, planLoads, planModifications, capacity, resCalc, step);
// If the interval can fit a gang, add it to the tree
if (durationInterval.canAllocate(gang, capacity, resCalc)) {
durationIntervalsSortedByCost.add(durationInterval);
}
}
// Allocate
int remainingGangs = totalGangs;
while (remainingGangs > 0) {
// If no durationInterval can fit a gang, break and return null
if (durationIntervalsSortedByCost.isEmpty()) {
break;
}
// Get best duration interval
DurationInterval bestDurationInterval = durationIntervalsSortedByCost.first();
int numGangsToAllocate = Math.min(maxGangsPerUnit, remainingGangs);
numGangsToAllocate = Math.min(numGangsToAllocate, bestDurationInterval.numCanFit(gang, capacity, resCalc));
// Add it
remainingGangs -= numGangsToAllocate;
ReservationInterval reservationInt = new ReservationInterval(bestDurationInterval.getStartTime(), bestDurationInterval.getEndTime());
Resource reservationRes = Resources.multiply(rr.getCapability(), rr.getConcurrency() * numGangsToAllocate);
planModifications.addInterval(reservationInt, reservationRes);
allocationRequests.addInterval(reservationInt, reservationRes);
// Remove from tree
durationIntervalsSortedByCost.remove(bestDurationInterval);
// Get updated interval
DurationInterval updatedDurationInterval = getDurationInterval(bestDurationInterval.getStartTime(), bestDurationInterval.getStartTime() + duration, planLoads, planModifications, capacity, resCalc, step);
// Add to tree, if possible
if (updatedDurationInterval.canAllocate(gang, capacity, resCalc)) {
durationIntervalsSortedByCost.add(updatedDurationInterval);
}
}
// Get the final allocation
Map<ReservationInterval, Resource> allocations = allocationRequests.toIntervalMap();
// If no gangs are left to place we succeed and return the allocation
if (remainingGangs <= 0) {
return allocations;
} else {
// We remove unwanted side-effect from planModifications (needed for ANY).
for (Map.Entry<ReservationInterval, Resource> tempAllocation : allocations.entrySet()) {
planModifications.removeInterval(tempAllocation.getKey(), tempAllocation.getValue());
}
// Return null to signal failure in this allocation
return null;
}
}
use of org.apache.hadoop.yarn.util.resource.ResourceCalculator in project hadoop by apache.
the class TestSimpleCapacityReplanner method testReplanningPlanCapacityLoss.
@Test
public void testReplanningPlanCapacityLoss() throws PlanningException {
Resource clusterCapacity = Resource.newInstance(100 * 1024, 100);
Resource minAlloc = Resource.newInstance(1024, 1);
Resource maxAlloc = Resource.newInstance(1024 * 8, 8);
ResourceCalculator res = new DefaultResourceCalculator();
long step = 1L;
Clock clock = mock(Clock.class);
ReservationAgent agent = mock(ReservationAgent.class);
SharingPolicy policy = new NoOverCommitPolicy();
policy.init("root.dedicated", null);
QueueMetrics queueMetrics = mock(QueueMetrics.class);
when(clock.getTime()).thenReturn(0L);
SimpleCapacityReplanner enf = new SimpleCapacityReplanner(clock);
RMContext context = ReservationSystemTestUtil.createMockRMContext();
ReservationSchedulerConfiguration conf = mock(ReservationSchedulerConfiguration.class);
when(conf.getEnforcementWindow(any(String.class))).thenReturn(6L);
enf.init("blah", conf);
// Initialize the plan with more resources
InMemoryPlan plan = new InMemoryPlan(queueMetrics, policy, agent, clusterCapacity, step, res, minAlloc, maxAlloc, "dedicated", enf, true, context, clock);
// add reservation filling the plan (separating them 1ms, so we are sure
// s2 follows s1 on acceptance
long ts = System.currentTimeMillis();
ReservationId r1 = ReservationId.newInstance(ts, 1);
int[] f5 = { 20, 20, 20, 20, 20 };
ReservationDefinition rDef = ReservationSystemTestUtil.createSimpleReservationDefinition(0, 0 + f5.length, f5.length);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r1, rDef, "u3", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
when(clock.getTime()).thenReturn(1L);
ReservationId r2 = ReservationId.newInstance(ts, 2);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r2, rDef, "u4", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
when(clock.getTime()).thenReturn(2L);
ReservationId r3 = ReservationId.newInstance(ts, 3);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r3, rDef, "u5", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
when(clock.getTime()).thenReturn(3L);
ReservationId r4 = ReservationId.newInstance(ts, 4);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r4, rDef, "u6", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
when(clock.getTime()).thenReturn(4L);
ReservationId r5 = ReservationId.newInstance(ts, 5);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r5, rDef, "u7", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
int[] f6 = { 50, 50, 50, 50, 50 };
ReservationId r6 = ReservationId.newInstance(ts, 6);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r6, rDef, "u3", "dedicated", 10, 10 + f6.length, generateAllocation(10, f6), res, minAlloc), false));
when(clock.getTime()).thenReturn(6L);
ReservationId r7 = ReservationId.newInstance(ts, 7);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r7, rDef, "u4", "dedicated", 10, 10 + f6.length, generateAllocation(10, f6), res, minAlloc), false));
// remove some of the resources (requires replanning)
plan.setTotalCapacity(Resource.newInstance(70 * 1024, 70));
when(clock.getTime()).thenReturn(0L);
// run the replanner
enf.plan(plan, null);
// check which reservation are still present
assertNotNull(plan.getReservationById(r1));
assertNotNull(plan.getReservationById(r2));
assertNotNull(plan.getReservationById(r3));
assertNotNull(plan.getReservationById(r6));
assertNotNull(plan.getReservationById(r7));
// and which ones are removed
assertNull(plan.getReservationById(r4));
assertNull(plan.getReservationById(r5));
// check resources at each moment in time no more exceed capacity
for (int i = 0; i < 20; i++) {
long tot = 0;
for (ReservationAllocation r : plan.getReservationsAtTime(i)) {
tot = r.getResourcesAtTime(i).getMemorySize();
}
assertTrue(tot <= 70 * 1024);
}
}
use of org.apache.hadoop.yarn.util.resource.ResourceCalculator in project hadoop by apache.
the class TestReservationSystemUtil method createReservationAllocation.
private ReservationAllocation createReservationAllocation(long startTime, long deadline, long step, int[] alloc, ReservationId id, Resource minAlloc) {
Map<ReservationInterval, Resource> allocations = ReservationSystemTestUtil.generateAllocation(startTime, step, alloc);
ResourceCalculator rs = mock(ResourceCalculator.class);
ReservationDefinition definition = ReservationSystemTestUtil.createSimpleReservationDefinition(startTime, deadline, step);
return new InMemoryReservationAllocation(id, definition, "user", ReservationSystemTestUtil.reservationQ, startTime, startTime + step, allocations, rs, minAlloc, false);
}
use of org.apache.hadoop.yarn.util.resource.ResourceCalculator in project hadoop by apache.
the class TestRLESparseResourceAllocation method testRangeOverlapping.
@Test
public void testRangeOverlapping() {
ResourceCalculator resCalc = new DefaultResourceCalculator();
RLESparseResourceAllocation r = new RLESparseResourceAllocation(resCalc);
int[] alloc = { 10, 10, 10, 10, 10, 10 };
int start = 100;
Set<Entry<ReservationInterval, Resource>> inputs = generateAllocation(start, alloc, false).entrySet();
for (Entry<ReservationInterval, Resource> ip : inputs) {
r.addInterval(ip.getKey(), ip.getValue());
}
long s = r.getEarliestStartTime();
long d = r.getLatestNonNullTime();
// tries to trigger "out-of-range" bug
r = r.getRangeOverlapping(s, d);
r = r.getRangeOverlapping(s - 1, d - 1);
r = r.getRangeOverlapping(s + 1, d + 1);
}
Aggregations