Search in sources :

Example 1 with PlanningQuotaException

use of org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningQuotaException in project hadoop by apache.

the class TestCapacityOverTimePolicy method testInstFailBySum.

@Test
public void testInstFailBySum() throws IOException, PlanningException {
    // generate allocation that exceed the instantaneous cap by sum
    int[] f = generateData(3600, (int) Math.ceil(0.3 * totCont));
    ReservationDefinition rDef = ReservationSystemTestUtil.createSimpleReservationDefinition(initTime, initTime + f.length + 1, f.length);
    assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(ReservationSystemTestUtil.getNewReservationId(), rDef, "u1", "dedicated", initTime, initTime + f.length, ReservationSystemTestUtil.generateAllocation(initTime, step, f), res, minAlloc), false));
    assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(ReservationSystemTestUtil.getNewReservationId(), rDef, "u1", "dedicated", initTime, initTime + f.length, ReservationSystemTestUtil.generateAllocation(initTime, step, f), res, minAlloc), false));
    try {
        assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(ReservationSystemTestUtil.getNewReservationId(), rDef, "u1", "dedicated", initTime, initTime + f.length, ReservationSystemTestUtil.generateAllocation(initTime, step, f), res, minAlloc), false));
        Assert.fail();
    } catch (PlanningQuotaException p) {
    // expected
    }
}
Also used : PlanningQuotaException(org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningQuotaException) ReservationDefinition(org.apache.hadoop.yarn.api.records.ReservationDefinition) Test(org.junit.Test)

Example 2 with PlanningQuotaException

use of org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningQuotaException in project hadoop by apache.

the class CapacityOverTimePolicy method validate.

/**
   * The validation algorithm walks over the RLE encoded allocation and
   * checks that for all transition points (when the start or end of the
   * checking window encounters a value in the RLE). At this point it
   * checkes whether the integral computed exceeds the quota limit. Note that
   * this might not find the exact time of a violation, but if a violation
   * exists it will find it. The advantage is a much lower number of checks
   * as compared to time-slot by time-slot checks.
   *
   * @param plan the plan to validate against
   * @param reservation the reservation allocation to test.
   * @throws PlanningException if the validation fails.
   */
@Override
public void validate(Plan plan, ReservationAllocation reservation) throws PlanningException {
    // cluster limits, and 3) maxInst (via override of available)
    try {
        super.validate(plan, reservation);
    } catch (PlanningException p) {
        //wrap it in proper quota exception
        throw new PlanningQuotaException(p);
    }
    //---- check for integral violations of capacity --------
    // Gather a view of what to check (curr allocation of user, minus old
    // version of this reservation, plus new version)
    RLESparseResourceAllocation consumptionForUserOverTime = plan.getConsumptionForUserOverTime(reservation.getUser(), reservation.getStartTime() - validWindow, reservation.getEndTime() + validWindow);
    ReservationAllocation old = plan.getReservationById(reservation.getReservationId());
    if (old != null) {
        consumptionForUserOverTime = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), plan.getTotalCapacity(), consumptionForUserOverTime, old.getResourcesOverTime(), RLEOperator.add, reservation.getStartTime() - validWindow, reservation.getEndTime() + validWindow);
    }
    RLESparseResourceAllocation resRLE = reservation.getResourcesOverTime();
    RLESparseResourceAllocation toCheck = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), plan.getTotalCapacity(), consumptionForUserOverTime, resRLE, RLEOperator.add, Long.MIN_VALUE, Long.MAX_VALUE);
    NavigableMap<Long, Resource> integralUp = new TreeMap<>();
    NavigableMap<Long, Resource> integralDown = new TreeMap<>();
    long prevTime = toCheck.getEarliestStartTime();
    IntegralResource prevResource = new IntegralResource(0L, 0L);
    IntegralResource runningTot = new IntegralResource(0L, 0L);
    // add intermediate points
    Map<Long, Resource> temp = new TreeMap<>();
    for (Map.Entry<Long, Resource> pointToCheck : toCheck.getCumulative().entrySet()) {
        Long timeToCheck = pointToCheck.getKey();
        Resource resourceToCheck = pointToCheck.getValue();
        Long nextPoint = toCheck.getCumulative().higherKey(timeToCheck);
        if (nextPoint == null || toCheck.getCumulative().get(nextPoint) == null) {
            continue;
        }
        for (int i = 1; i <= (nextPoint - timeToCheck) / validWindow; i++) {
            temp.put(timeToCheck + (i * validWindow), resourceToCheck);
        }
    }
    temp.putAll(toCheck.getCumulative());
    // compute point-wise integral for the up-fronts and down-fronts
    for (Map.Entry<Long, Resource> currPoint : temp.entrySet()) {
        Long currTime = currPoint.getKey();
        Resource currResource = currPoint.getValue();
        //add to running total current contribution
        prevResource.multiplyBy(currTime - prevTime);
        runningTot.add(prevResource);
        integralUp.put(currTime, normalizeToResource(runningTot, validWindow));
        integralDown.put(currTime + validWindow, normalizeToResource(runningTot, validWindow));
        if (currResource != null) {
            prevResource.memory = currResource.getMemorySize();
            prevResource.vcores = currResource.getVirtualCores();
        } else {
            prevResource.memory = 0L;
            prevResource.vcores = 0L;
        }
        prevTime = currTime;
    }
    // compute final integral as delta of up minus down transitions
    RLESparseResourceAllocation intUp = new RLESparseResourceAllocation(integralUp, plan.getResourceCalculator());
    RLESparseResourceAllocation intDown = new RLESparseResourceAllocation(integralDown, plan.getResourceCalculator());
    RLESparseResourceAllocation integral = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), plan.getTotalCapacity(), intUp, intDown, RLEOperator.subtract, Long.MIN_VALUE, Long.MAX_VALUE);
    // define over-time integral limit
    // note: this is aligned with the normalization done above
    NavigableMap<Long, Resource> tlimit = new TreeMap<>();
    Resource maxAvgRes = Resources.multiply(plan.getTotalCapacity(), maxAvg);
    tlimit.put(toCheck.getEarliestStartTime() - validWindow, maxAvgRes);
    RLESparseResourceAllocation targetLimit = new RLESparseResourceAllocation(tlimit, plan.getResourceCalculator());
    // compare using merge() limit with integral
    try {
        RLESparseResourceAllocation.merge(plan.getResourceCalculator(), plan.getTotalCapacity(), targetLimit, integral, RLEOperator.subtractTestNonNegative, reservation.getStartTime() - validWindow, reservation.getEndTime() + validWindow);
    } catch (PlanningException p) {
        throw new PlanningQuotaException("Integral (avg over time) quota capacity " + maxAvg + " over a window of " + validWindow / 1000 + " seconds, " + " would be exceeded by accepting reservation: " + reservation.getReservationId(), p);
    }
}
Also used : PlanningQuotaException(org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningQuotaException) Resource(org.apache.hadoop.yarn.api.records.Resource) TreeMap(java.util.TreeMap) PlanningException(org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException) TreeMap(java.util.TreeMap) Map(java.util.Map) NavigableMap(java.util.NavigableMap)

Example 3 with PlanningQuotaException

use of org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningQuotaException in project hadoop by apache.

the class TestCapacityOverTimePolicy method testFailAvgBySum.

@Test
public void testFailAvgBySum() throws IOException, PlanningException {
    // generate an allocation which violates the 25% average by sum
    Map<ReservationInterval, Resource> req = new TreeMap<ReservationInterval, Resource>();
    long win = 86400000 / 4 + 1;
    int cont = (int) Math.ceil(0.5 * totCont);
    req.put(new ReservationInterval(initTime, initTime + win), ReservationSystemUtil.toResource(ReservationRequest.newInstance(Resource.newInstance(1024, 1), cont)));
    ReservationDefinition rDef = ReservationSystemTestUtil.createSimpleReservationDefinition(initTime, initTime + win, win);
    assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(ReservationSystemTestUtil.getNewReservationId(), rDef, "u1", "dedicated", initTime, initTime + win, req, res, minAlloc), false));
    try {
        assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(ReservationSystemTestUtil.getNewReservationId(), null, "u1", "dedicated", initTime, initTime + win, req, res, minAlloc), false));
        Assert.fail("should not have accepted this");
    } catch (PlanningQuotaException e) {
    // expected
    }
}
Also used : PlanningQuotaException(org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningQuotaException) ReservationDefinition(org.apache.hadoop.yarn.api.records.ReservationDefinition) Resource(org.apache.hadoop.yarn.api.records.Resource) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Aggregations

PlanningQuotaException (org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningQuotaException)3 TreeMap (java.util.TreeMap)2 ReservationDefinition (org.apache.hadoop.yarn.api.records.ReservationDefinition)2 Resource (org.apache.hadoop.yarn.api.records.Resource)2 Test (org.junit.Test)2 Map (java.util.Map)1 NavigableMap (java.util.NavigableMap)1 PlanningException (org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException)1