use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class TestDistributedScheduler method createResourceRequest.
private ResourceRequest createResourceRequest(ExecutionType execType, int numContainers, String resourceName) {
ResourceRequest opportunisticReq = Records.newRecord(ResourceRequest.class);
opportunisticReq.setExecutionTypeRequest(ExecutionTypeRequest.newInstance(execType, true));
opportunisticReq.setNumContainers(numContainers);
opportunisticReq.setCapability(Resource.newInstance(1024, 4));
opportunisticReq.setPriority(Priority.newInstance(100));
opportunisticReq.setRelaxLocality(true);
opportunisticReq.setResourceName(resourceName);
return opportunisticReq;
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class TestDistributedScheduler method testDistributedScheduler.
@Test
public void testDistributedScheduler() throws Exception {
Configuration conf = new Configuration();
DistributedScheduler distributedScheduler = new DistributedScheduler();
RequestInterceptor finalReqIntcptr = setup(conf, distributedScheduler);
registerAM(distributedScheduler, finalReqIntcptr, Arrays.asList(RemoteNode.newInstance(NodeId.newInstance("a", 1), "http://a:1"), RemoteNode.newInstance(NodeId.newInstance("b", 2), "http://b:2")));
final AtomicBoolean flipFlag = new AtomicBoolean(true);
Mockito.when(finalReqIntcptr.allocateForDistributedScheduling(Mockito.any(DistributedSchedulingAllocateRequest.class))).thenAnswer(new Answer<DistributedSchedulingAllocateResponse>() {
@Override
public DistributedSchedulingAllocateResponse answer(InvocationOnMock invocationOnMock) throws Throwable {
flipFlag.set(!flipFlag.get());
if (flipFlag.get()) {
return createAllocateResponse(Arrays.asList(RemoteNode.newInstance(NodeId.newInstance("c", 3), "http://c:3"), RemoteNode.newInstance(NodeId.newInstance("d", 4), "http://d:4")));
} else {
return createAllocateResponse(Arrays.asList(RemoteNode.newInstance(NodeId.newInstance("d", 4), "http://d:4"), RemoteNode.newInstance(NodeId.newInstance("c", 3), "http://c:3")));
}
}
});
AllocateRequest allocateRequest = Records.newRecord(AllocateRequest.class);
ResourceRequest guaranteedReq = createResourceRequest(ExecutionType.GUARANTEED, 5, "*");
ResourceRequest opportunisticReq = createResourceRequest(ExecutionType.OPPORTUNISTIC, 4, "*");
allocateRequest.setAskList(Arrays.asList(guaranteedReq, opportunisticReq));
// Verify 4 containers were allocated
AllocateResponse allocateResponse = distributedScheduler.allocate(allocateRequest);
Assert.assertEquals(4, allocateResponse.getAllocatedContainers().size());
// Verify equal distribution on hosts a and b, and none on c or d
Map<NodeId, List<ContainerId>> allocs = mapAllocs(allocateResponse, 4);
Assert.assertEquals(2, allocs.get(NodeId.newInstance("a", 1)).size());
Assert.assertEquals(2, allocs.get(NodeId.newInstance("b", 2)).size());
Assert.assertNull(allocs.get(NodeId.newInstance("c", 3)));
Assert.assertNull(allocs.get(NodeId.newInstance("d", 4)));
// New Allocate request
allocateRequest = Records.newRecord(AllocateRequest.class);
opportunisticReq = createResourceRequest(ExecutionType.OPPORTUNISTIC, 6, "*");
allocateRequest.setAskList(Arrays.asList(guaranteedReq, opportunisticReq));
// Verify 6 containers were allocated
allocateResponse = distributedScheduler.allocate(allocateRequest);
Assert.assertEquals(6, allocateResponse.getAllocatedContainers().size());
// Verify new containers are equally distribution on hosts c and d,
// and none on a or b
allocs = mapAllocs(allocateResponse, 6);
Assert.assertEquals(3, allocs.get(NodeId.newInstance("c", 3)).size());
Assert.assertEquals(3, allocs.get(NodeId.newInstance("d", 4)).size());
Assert.assertNull(allocs.get(NodeId.newInstance("a", 1)));
Assert.assertNull(allocs.get(NodeId.newInstance("b", 2)));
// Ensure the DistributedScheduler respects the list order..
// The first request should be allocated to "d" since it is ranked higher
// The second request should be allocated to "c" since the ranking is
// flipped on every allocate response.
allocateRequest = Records.newRecord(AllocateRequest.class);
opportunisticReq = createResourceRequest(ExecutionType.OPPORTUNISTIC, 1, "*");
allocateRequest.setAskList(Arrays.asList(guaranteedReq, opportunisticReq));
allocateResponse = distributedScheduler.allocate(allocateRequest);
allocs = mapAllocs(allocateResponse, 1);
Assert.assertEquals(1, allocs.get(NodeId.newInstance("d", 4)).size());
allocateRequest = Records.newRecord(AllocateRequest.class);
opportunisticReq = createResourceRequest(ExecutionType.OPPORTUNISTIC, 1, "*");
allocateRequest.setAskList(Arrays.asList(guaranteedReq, opportunisticReq));
allocateResponse = distributedScheduler.allocate(allocateRequest);
allocs = mapAllocs(allocateResponse, 1);
Assert.assertEquals(1, allocs.get(NodeId.newInstance("c", 3)).size());
allocateRequest = Records.newRecord(AllocateRequest.class);
opportunisticReq = createResourceRequest(ExecutionType.OPPORTUNISTIC, 1, "*");
allocateRequest.setAskList(Arrays.asList(guaranteedReq, opportunisticReq));
allocateResponse = distributedScheduler.allocate(allocateRequest);
allocs = mapAllocs(allocateResponse, 1);
Assert.assertEquals(1, allocs.get(NodeId.newInstance("d", 4)).size());
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class ContainerUpdateContext method matchContainerToOutstandingIncreaseReq.
/**
* Check if a new container is to be matched up against an outstanding
* Container increase request.
* @param node SchedulerNode.
* @param schedulerKey SchedulerRequestKey.
* @param rmContainer RMContainer.
* @return ContainerId.
*/
public ContainerId matchContainerToOutstandingIncreaseReq(SchedulerNode node, SchedulerRequestKey schedulerKey, RMContainer rmContainer) {
ContainerId retVal = null;
Container container = rmContainer.getContainer();
Map<Resource, Map<NodeId, Set<ContainerId>>> resourceMap = outstandingIncreases.get(schedulerKey);
if (resourceMap != null) {
Map<NodeId, Set<ContainerId>> locationMap = resourceMap.get(container.getResource());
if (locationMap != null) {
Set<ContainerId> containerIds = locationMap.get(container.getNodeId());
if (containerIds != null && !containerIds.isEmpty()) {
retVal = containerIds.iterator().next();
}
}
}
// We also need to add these requests back.. to be reallocated.
if (resourceMap != null && retVal == null) {
Map<SchedulerRequestKey, Map<String, ResourceRequest>> reqsToUpdate = new HashMap<>();
Map<String, ResourceRequest> resMap = createResourceRequests(rmContainer, node, schedulerKey, rmContainer.getContainer().getResource());
reqsToUpdate.put(schedulerKey, resMap);
appSchedulingInfo.addToPlacementSets(true, reqsToUpdate);
return UNDEFINED;
}
return retVal;
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class ContainerUpdateContext method checkAndAddToOutstandingIncreases.
/**
* Add the container to outstanding increases.
* @param rmContainer RMContainer.
* @param schedulerNode SchedulerNode.
* @param updateRequest UpdateContainerRequest.
* @return true if updated to outstanding increases was successful.
*/
public synchronized boolean checkAndAddToOutstandingIncreases(RMContainer rmContainer, SchedulerNode schedulerNode, UpdateContainerRequest updateRequest) {
Container container = rmContainer.getContainer();
SchedulerRequestKey schedulerKey = SchedulerRequestKey.create(updateRequest, rmContainer.getAllocatedSchedulerKey());
Map<Resource, Map<NodeId, Set<ContainerId>>> resourceMap = outstandingIncreases.get(schedulerKey);
if (resourceMap == null) {
resourceMap = new HashMap<>();
outstandingIncreases.put(schedulerKey, resourceMap);
} else {
// Updating Resource for and existing increase container
if (ContainerUpdateType.INCREASE_RESOURCE == updateRequest.getContainerUpdateType()) {
cancelPreviousRequest(schedulerNode, schedulerKey);
} else {
return false;
}
}
Resource resToIncrease = getResourceToIncrease(updateRequest, rmContainer);
Map<NodeId, Set<ContainerId>> locationMap = resourceMap.get(resToIncrease);
if (locationMap == null) {
locationMap = new HashMap<>();
resourceMap.put(resToIncrease, locationMap);
}
Set<ContainerId> containerIds = locationMap.get(container.getNodeId());
if (containerIds == null) {
containerIds = new HashSet<>();
locationMap.put(container.getNodeId(), containerIds);
}
if (outstandingDecreases.containsKey(container.getId())) {
return false;
}
containerIds.add(container.getId());
if (!Resources.isNone(resToIncrease)) {
Map<SchedulerRequestKey, Map<String, ResourceRequest>> updateResReqs = new HashMap<>();
Map<String, ResourceRequest> resMap = createResourceRequests(rmContainer, schedulerNode, schedulerKey, resToIncrease);
updateResReqs.put(schedulerKey, resMap);
appSchedulingInfo.addToPlacementSets(false, updateResReqs);
}
return true;
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class ContainerUpdateContext method cancelPreviousRequest.
private void cancelPreviousRequest(SchedulerNode schedulerNode, SchedulerRequestKey schedulerKey) {
SchedulingPlacementSet<SchedulerNode> schedulingPlacementSet = appSchedulingInfo.getSchedulingPlacementSet(schedulerKey);
if (schedulingPlacementSet != null) {
Map<String, ResourceRequest> resourceRequests = schedulingPlacementSet.getResourceRequests();
ResourceRequest prevReq = resourceRequests.get(ResourceRequest.ANY);
// resource = prev update req capability
if (prevReq != null) {
appSchedulingInfo.allocate(NodeType.OFF_SWITCH, schedulerNode, schedulerKey, Container.newInstance(UNDEFINED, schedulerNode.getNodeID(), "host:port", prevReq.getCapability(), schedulerKey.getPriority(), null));
}
}
}
Aggregations