use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.
the class TestDistributedScheduling method testAMOpportunistic.
/**
* Check if an AM can ask for opportunistic containers and get them.
* @throws Exception
*/
@Test
public void testAMOpportunistic() throws Exception {
// Basic container to request
Resource capability = Resource.newInstance(1024, 1);
Priority priority = Priority.newInstance(1);
// Get the cluster topology
List<NodeReport> nodeReports = rmClient.getNodeReports(NodeState.RUNNING);
String node = nodeReports.get(0).getNodeId().getHost();
String rack = nodeReports.get(0).getRackName();
String[] nodes = new String[] { node };
String[] racks = new String[] { rack };
// Create an AM to request resources
AMRMClient<AMRMClient.ContainerRequest> amClient = null;
try {
amClient = new AMRMClientImpl<AMRMClient.ContainerRequest>(client);
amClient.init(yarnConf);
amClient.start();
amClient.registerApplicationMaster(NetUtils.getHostname(), 1024, "");
// AM requests an opportunistic container
ExecutionTypeRequest execTypeRequest = ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true);
ContainerRequest containerRequest = new AMRMClient.ContainerRequest(capability, nodes, racks, priority, 0, true, null, execTypeRequest);
amClient.addContainerRequest(containerRequest);
// Wait until the container is allocated
ContainerId opportunisticContainerId = null;
for (int i = 0; i < 10 && opportunisticContainerId == null; i++) {
AllocateResponse allocResponse = amClient.allocate(0.1f);
List<Container> allocatedContainers = allocResponse.getAllocatedContainers();
for (Container allocatedContainer : allocatedContainers) {
// Check that this is the container we required
assertEquals(ExecutionType.OPPORTUNISTIC, allocatedContainer.getExecutionType());
opportunisticContainerId = allocatedContainer.getId();
}
sleep(100);
}
assertNotNull(opportunisticContainerId);
// The RM sees the container as OPPORTUNISTIC
ResourceScheduler scheduler = cluster.getResourceManager().getResourceScheduler();
RMContainer rmContainer = scheduler.getRMContainer(opportunisticContainerId);
assertEquals(ExecutionType.OPPORTUNISTIC, rmContainer.getExecutionType());
// Release the opportunistic container
amClient.releaseAssignedContainer(opportunisticContainerId);
// Wait for the release container to appear
boolean released = false;
for (int i = 0; i < 10 && !released; i++) {
AllocateResponse allocResponse = amClient.allocate(0.1f);
List<ContainerStatus> completedContainers = allocResponse.getCompletedContainersStatuses();
for (ContainerStatus completedContainer : completedContainers) {
ContainerId completedContainerId = completedContainer.getContainerId();
assertEquals(completedContainerId, opportunisticContainerId);
released = true;
}
if (!released) {
sleep(100);
}
}
assertTrue(released);
// The RM shouldn't see the container anymore
rmContainer = scheduler.getRMContainer(opportunisticContainerId);
assertNull(rmContainer);
// Clean the AM
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null);
} finally {
if (amClient != null && amClient.getServiceState() == Service.STATE.STARTED) {
amClient.close();
}
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.
the class RMServerUtils method checkSchedContainerChangeRequest.
/**
* Validate increase/decrease request.
* <pre>
* - Throw exception when any other error happens
* </pre>
*/
public static void checkSchedContainerChangeRequest(SchedContainerChangeRequest request, boolean increase) throws InvalidResourceRequestException {
RMContext rmContext = request.getRmContext();
ContainerId containerId = request.getContainerId();
RMContainer rmContainer = request.getRMContainer();
Resource targetResource = request.getTargetCapacity();
// Compare targetResource and original resource
Resource originalResource = rmContainer.getAllocatedResource();
// <20G, 8>
if (increase) {
if (originalResource.getMemorySize() > targetResource.getMemorySize() || originalResource.getVirtualCores() > targetResource.getVirtualCores()) {
String msg = "Trying to increase a container, but target resource has some" + " resource < original resource, target=" + targetResource + " original=" + originalResource + " containerId=" + containerId;
throw new InvalidResourceRequestException(msg);
}
} else {
if (originalResource.getMemorySize() < targetResource.getMemorySize() || originalResource.getVirtualCores() < targetResource.getVirtualCores()) {
String msg = "Trying to decrease a container, but target resource has " + "some resource > original resource, target=" + targetResource + " original=" + originalResource + " containerId=" + containerId;
throw new InvalidResourceRequestException(msg);
}
}
// Target resource of the increase request is more than NM can offer
ResourceScheduler scheduler = rmContext.getScheduler();
RMNode rmNode = request.getSchedulerNode().getRMNode();
if (!Resources.fitsIn(scheduler.getResourceCalculator(), scheduler.getClusterResource(), targetResource, rmNode.getTotalCapability())) {
String msg = "Target resource=" + targetResource + " of containerId=" + containerId + " is more than node's total resource=" + rmNode.getTotalCapability();
throw new InvalidResourceRequestException(msg);
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.
the class RMServerUtils method validateIncreaseDecreaseRequest.
// Sanity check and normalize target resource
private static boolean validateIncreaseDecreaseRequest(RMContext rmContext, UpdateContainerRequest request, Resource maximumAllocation) {
if (request.getCapability().getMemorySize() < 0 || request.getCapability().getMemorySize() > maximumAllocation.getMemorySize()) {
return false;
}
if (request.getCapability().getVirtualCores() < 0 || request.getCapability().getVirtualCores() > maximumAllocation.getVirtualCores()) {
return false;
}
ResourceScheduler scheduler = rmContext.getScheduler();
request.setCapability(scheduler.getNormalizedResource(request.getCapability()));
return true;
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.
the class RMWebServices method getSchedulerInfo.
@GET
@Path("/scheduler")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public SchedulerTypeInfo getSchedulerInfo() {
init();
ResourceScheduler rs = rm.getResourceScheduler();
SchedulerInfo sinfo;
if (rs instanceof CapacityScheduler) {
CapacityScheduler cs = (CapacityScheduler) rs;
CSQueue root = cs.getRootQueue();
sinfo = new CapacitySchedulerInfo(root, cs);
} else if (rs instanceof FairScheduler) {
FairScheduler fs = (FairScheduler) rs;
sinfo = new FairSchedulerInfo(fs);
} else if (rs instanceof FifoScheduler) {
sinfo = new FifoSchedulerInfo(this.rm);
} else {
throw new NotFoundException("Unknown scheduler configured");
}
return new SchedulerTypeInfo(sinfo);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.
the class RMWebServices method getNodes.
/**
* Returns all nodes in the cluster. If the states param is given, returns
* all nodes that are in the comma-separated list of states.
*/
@GET
@Path("/nodes")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public NodesInfo getNodes(@QueryParam("states") String states) {
init();
ResourceScheduler sched = this.rm.getResourceScheduler();
if (sched == null) {
throw new NotFoundException("Null ResourceScheduler instance");
}
EnumSet<NodeState> acceptedStates;
if (states == null) {
acceptedStates = EnumSet.allOf(NodeState.class);
} else {
acceptedStates = EnumSet.noneOf(NodeState.class);
for (String stateStr : states.split(",")) {
acceptedStates.add(NodeState.valueOf(StringUtils.toUpperCase(stateStr)));
}
}
Collection<RMNode> rmNodes = RMServerUtils.queryRMNodes(this.rm.getRMContext(), acceptedStates);
NodesInfo nodesInfo = new NodesInfo();
for (RMNode rmNode : rmNodes) {
NodeInfo nodeInfo = new NodeInfo(rmNode, sched);
if (EnumSet.of(NodeState.LOST, NodeState.DECOMMISSIONED, NodeState.REBOOTED).contains(rmNode.getState())) {
nodeInfo.setNodeHTTPAddress(EMPTY);
}
nodesInfo.add(nodeInfo);
}
return nodesInfo;
}
Aggregations