use of org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval in project hadoop by apache.
the class ClientRMService method listReservations.
@Override
public ReservationListResponse listReservations(ReservationListRequest requestInfo) throws YarnException, IOException {
// Check if reservation system is enabled
checkReservationSytem(AuditConstants.LIST_RESERVATION_REQUEST);
ReservationListResponse response = recordFactory.newRecordInstance(ReservationListResponse.class);
Plan plan = rValidator.validateReservationListRequest(reservationSystem, requestInfo);
boolean includeResourceAllocations = requestInfo.getIncludeResourceAllocations();
ReservationId reservationId = null;
if (requestInfo.getReservationId() != null && !requestInfo.getReservationId().isEmpty()) {
reservationId = ReservationId.parseReservationId(requestInfo.getReservationId());
}
checkReservationACLs(requestInfo.getQueue(), AuditConstants.LIST_RESERVATION_REQUEST, reservationId);
long startTime = Math.max(requestInfo.getStartTime(), 0);
long endTime = requestInfo.getEndTime() <= -1 ? Long.MAX_VALUE : requestInfo.getEndTime();
Set<ReservationAllocation> reservations;
reservations = plan.getReservations(reservationId, new ReservationInterval(startTime, endTime));
List<ReservationAllocationState> info = ReservationSystemUtil.convertAllocationsToReservationInfo(reservations, includeResourceAllocations);
response.setReservationAllocationState(info);
return response;
}
use of org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval in project hadoop by apache.
the class StageAllocatorGreedy method computeStageAllocation.
@Override
public Map<ReservationInterval, Resource> computeStageAllocation(Plan plan, Map<Long, Resource> planLoads, RLESparseResourceAllocation planModifications, ReservationRequest rr, long stageEarliestStart, long stageDeadline, String user, ReservationId oldId) throws PlanningException {
Resource totalCapacity = plan.getTotalCapacity();
Map<ReservationInterval, Resource> allocationRequests = new HashMap<ReservationInterval, Resource>();
// compute the gang as a resource and get the duration
Resource gang = Resources.multiply(rr.getCapability(), rr.getConcurrency());
long dur = rr.getDuration();
long step = plan.getStep();
// ceil the duration to the next multiple of the plan step
if (dur % step != 0) {
dur += (step - (dur % step));
}
// we know for sure that this division has no remainder (part of contract
// with user, validate before
int gangsToPlace = rr.getNumContainers() / rr.getConcurrency();
int maxGang = 0;
RLESparseResourceAllocation netAvailable = plan.getAvailableResourceOverTime(user, oldId, stageEarliestStart, stageDeadline);
netAvailable = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), plan.getTotalCapacity(), netAvailable, planModifications, RLEOperator.subtract, stageEarliestStart, stageDeadline);
// an invalid range of times
while (gangsToPlace > 0 && stageDeadline - dur >= stageEarliestStart) {
// as we run along we remember how many gangs we can fit, and what
// was the most constraining moment in time (we will restart just
// after that to place the next batch)
maxGang = gangsToPlace;
long minPoint = stageDeadline;
int curMaxGang = maxGang;
// move backward
for (long t = stageDeadline - plan.getStep(); t >= stageDeadline - dur && maxGang > 0; t = t - plan.getStep()) {
Resource netAvailableRes = netAvailable.getCapacityAtTime(t);
// compute maximum number of gangs we could fit
curMaxGang = (int) Math.floor(Resources.divide(plan.getResourceCalculator(), totalCapacity, netAvailableRes, gang));
// pick the minimum between available resources in this instant, and how
// many gangs we have to place
curMaxGang = Math.min(gangsToPlace, curMaxGang);
// the minimum (useful for next attempts)
if (curMaxGang <= maxGang) {
maxGang = curMaxGang;
minPoint = t;
}
}
// gangsToPlace
if (maxGang > 0) {
gangsToPlace -= maxGang;
ReservationInterval reservationInt = new ReservationInterval(stageDeadline - dur, stageDeadline);
Resource reservationRes = Resources.multiply(rr.getCapability(), rr.getConcurrency() * maxGang);
// remember occupied space (plan is read-only till we find a plausible
// allocation for the entire request). This is needed since we might be
// placing other ReservationRequest within the same
// ReservationDefinition,
// and we must avoid double-counting the available resources
planModifications.addInterval(reservationInt, reservationRes);
allocationRequests.put(reservationInt, reservationRes);
}
// reset our new starting point (curDeadline) to the most constraining
// point so far, we will look "left" of that to find more places where
// to schedule gangs (for sure nothing on the "right" of this point can
// fit a full gang.
stageDeadline = minPoint;
}
// if no gangs are left to place we succeed and return the allocation
if (gangsToPlace == 0) {
return allocationRequests;
} else {
// for ANY).
for (Map.Entry<ReservationInterval, Resource> tempAllocation : allocationRequests.entrySet()) {
planModifications.removeInterval(tempAllocation.getKey(), tempAllocation.getValue());
}
// and return null to signal failure in this allocation
return null;
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval in project hadoop by apache.
the class StageAllocatorGreedyRLE method computeStageAllocation.
@Override
public Map<ReservationInterval, Resource> computeStageAllocation(Plan plan, Map<Long, Resource> planLoads, RLESparseResourceAllocation planModifications, ReservationRequest rr, long stageEarliestStart, long stageDeadline, String user, ReservationId oldId) throws PlanningException {
// abort early if the interval is not satisfiable
if (stageEarliestStart + rr.getDuration() > stageDeadline) {
return null;
}
Map<ReservationInterval, Resource> allocationRequests = new HashMap<ReservationInterval, Resource>();
Resource totalCapacity = plan.getTotalCapacity();
// compute the gang as a resource and get the duration
Resource sizeOfGang = Resources.multiply(rr.getCapability(), rr.getConcurrency());
long dur = rr.getDuration();
long step = plan.getStep();
// ceil the duration to the next multiple of the plan step
if (dur % step != 0) {
dur += (step - (dur % step));
}
// we know for sure that this division has no remainder (part of contract
// with user, validate before
int gangsToPlace = rr.getNumContainers() / rr.getConcurrency();
// get available resources from plan
RLESparseResourceAllocation netRLERes = plan.getAvailableResourceOverTime(user, oldId, stageEarliestStart, stageDeadline);
// remove plan modifications
netRLERes = RLESparseResourceAllocation.merge(plan.getResourceCalculator(), totalCapacity, netRLERes, planModifications, RLEOperator.subtract, stageEarliestStart, stageDeadline);
// an invalid range of times
while (gangsToPlace > 0 && stageEarliestStart + dur <= stageDeadline) {
// as we run along we remember how many gangs we can fit, and what
// was the most constraining moment in time (we will restart just
// after that to place the next batch)
int maxGang = gangsToPlace;
long minPoint = -1;
// focus our attention to a time-range under consideration
NavigableMap<Long, Resource> partialMap = netRLERes.getRangeOverlapping(stageEarliestStart, stageDeadline).getCumulative();
// revert the map for right-to-left allocation
if (!allocateLeft) {
partialMap = partialMap.descendingMap();
}
Iterator<Entry<Long, Resource>> netIt = partialMap.entrySet().iterator();
long oldT = stageDeadline;
// interval (with outside loop)
while (maxGang > 0 && netIt.hasNext()) {
long t;
Resource curAvailRes;
Entry<Long, Resource> e = netIt.next();
if (allocateLeft) {
t = Math.max(e.getKey(), stageEarliestStart);
curAvailRes = e.getValue();
} else {
t = oldT;
oldT = e.getKey();
//attention: higher means lower, because we reversed the map direction
curAvailRes = partialMap.higherEntry(t).getValue();
}
// check exit/skip conditions/
if (curAvailRes == null) {
//skip undefined regions (should not happen beside borders)
continue;
}
if (exitCondition(t, stageEarliestStart, stageDeadline, dur)) {
break;
}
// compute maximum number of gangs we could fit
int curMaxGang = (int) Math.floor(Resources.divide(plan.getResourceCalculator(), totalCapacity, curAvailRes, sizeOfGang));
curMaxGang = Math.min(gangsToPlace, curMaxGang);
// the minimum (useful for next attempts)
if (curMaxGang <= maxGang) {
maxGang = curMaxGang;
minPoint = t;
}
}
// update data structures that retain the progress made so far
gangsToPlace = trackProgress(planModifications, rr, stageEarliestStart, stageDeadline, allocationRequests, dur, gangsToPlace, maxGang);
// reset the next range of time-intervals to deal with
if (allocateLeft) {
// end of this allocation
if (partialMap.higherKey(minPoint) == null) {
stageEarliestStart = stageEarliestStart + dur;
} else {
stageEarliestStart = Math.min(partialMap.higherKey(minPoint), stageEarliestStart + dur);
}
} else {
// same as above moving right-to-left
if (partialMap.higherKey(minPoint) == null) {
stageDeadline = stageDeadline - dur;
} else {
stageDeadline = Math.max(partialMap.higherKey(minPoint), stageDeadline - dur);
}
}
}
// if no gangs are left to place we succeed and return the allocation
if (gangsToPlace == 0) {
return allocationRequests;
} else {
// for ANY).
for (Map.Entry<ReservationInterval, Resource> tempAllocation : allocationRequests.entrySet()) {
planModifications.removeInterval(tempAllocation.getKey(), tempAllocation.getValue());
}
// and return null to signal failure in this allocation
return null;
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval in project hadoop by apache.
the class StageAllocatorGreedyRLE method trackProgress.
private int trackProgress(RLESparseResourceAllocation planModifications, ReservationRequest rr, long stageEarliestStart, long stageDeadline, Map<ReservationInterval, Resource> allocationRequests, long dur, int gangsToPlace, int maxGang) {
// gangsToPlace
if (maxGang > 0) {
gangsToPlace -= maxGang;
ReservationInterval reservationInt = computeReservationInterval(stageEarliestStart, stageDeadline, dur);
Resource reservationRes = Resources.multiply(rr.getCapability(), rr.getConcurrency() * maxGang);
// remember occupied space (plan is read-only till we find a plausible
// allocation for the entire request). This is needed since we might be
// placing other ReservationRequest within the same
// ReservationDefinition,
// and we must avoid double-counting the available resources
planModifications.addInterval(reservationInt, reservationRes);
allocationRequests.put(reservationInt, reservationRes);
}
return gangsToPlace;
}
use of org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInterval in project hadoop by apache.
the class IterativePlanner method isNonPreemptiveAllocation.
private boolean isNonPreemptiveAllocation(Map<ReservationInterval, Resource> curAlloc) {
// Checks whether a stage allocation is non preemptive or not.
// Assumption: the intervals are non-intersecting (as returned by
// computeStageAllocation()).
// For a non-preemptive allocation, only two end points appear exactly once
Set<Long> endPoints = new HashSet<Long>(2 * curAlloc.size());
for (Entry<ReservationInterval, Resource> entry : curAlloc.entrySet()) {
ReservationInterval interval = entry.getKey();
Resource resource = entry.getValue();
// Ignore intervals with no allocation
if (Resources.equals(resource, Resource.newInstance(0, 0))) {
continue;
}
// Get endpoints
Long left = interval.getStartTime();
Long right = interval.getEndTime();
// Add left endpoint if we haven't seen it before, remove otherwise
if (!endPoints.contains(left)) {
endPoints.add(left);
} else {
endPoints.remove(left);
}
// Add right endpoint if we haven't seen it before, remove otherwise
if (!endPoints.contains(right)) {
endPoints.add(right);
} else {
endPoints.remove(right);
}
}
// Non-preemptive only if endPoints is of size 2
return (endPoints.size() == 2);
}
Aggregations