use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode in project hadoop by apache.
the class TestNodeLabelContainerAllocation method testQueuesWithoutAccessUsingPartitionedNodes.
@Test
public void testQueuesWithoutAccessUsingPartitionedNodes() throws Exception {
/**
* Test case: have a following queue structure:
*
* <pre>
* root
* / \
* a b
* (x)
* </pre>
*
* Only a can access label=x, two nodes in the cluster, n1 has x and n2 has
* no-label.
*
* When user-limit-factor=5, submit one application in queue b and request
* for infinite containers should be able to use up all cluster resources.
*/
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(this.conf);
// Define top-level queues
csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] { "a", "b" });
csConf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100);
final String A = CapacitySchedulerConfiguration.ROOT + ".a";
csConf.setCapacity(A, 50);
csConf.setAccessibleNodeLabels(A, toSet("x"));
csConf.setCapacityByLabel(A, "x", 100);
final String B = CapacitySchedulerConfiguration.ROOT + ".b";
csConf.setCapacity(B, 50);
csConf.setAccessibleNodeLabels(B, new HashSet<String>());
csConf.setUserLimitFactor(B, 5);
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x", false), NodeLabel.newInstance("y")));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
// label = x
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB);
// label = <empty>
MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB);
// app1 -> b
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
// Each application request 50 * 1GB container
am1.allocate("*", 1 * GB, 50, new ArrayList<ContainerId>());
// NM1 do 50 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId());
// How much cycles we waited to be allocated when available resource only on
// partitioned node
int cycleWaited = 0;
for (int i = 0; i < 50; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
if (schedulerNode1.getNumContainers() == 0) {
cycleWaited++;
}
}
// We will will 10 cycles before get allocated on partitioned node
// NM2 can allocate 10 containers totally, exclude already allocated AM
// container, we will wait 9 to fulfill non-partitioned node, and need wait
// one more cycle before allocating to non-partitioned node
Assert.assertEquals(10, cycleWaited);
// Both NM1/NM2 launched 10 containers, cluster resource is exhausted
checkLaunchedContainerNumOnNode(rm1, nm1.getNodeId(), 10);
checkLaunchedContainerNumOnNode(rm1, nm2.getNodeId(), 10);
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode in project hadoop by apache.
the class CapacitySchedulerPreemptionUtils method deductPreemptableResourcesBasedSelectedCandidates.
public static void deductPreemptableResourcesBasedSelectedCandidates(CapacitySchedulerPreemptionContext context, Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates) {
for (Set<RMContainer> containers : selectedCandidates.values()) {
for (RMContainer c : containers) {
SchedulerNode schedulerNode = context.getScheduler().getSchedulerNode(c.getAllocatedNode());
if (null == schedulerNode) {
continue;
}
String partition = schedulerNode.getPartition();
String queue = c.getQueueName();
TempQueuePerPartition tq = context.getQueueByPartition(queue, partition);
Resource res = c.getReservedResource();
if (null == res) {
res = c.getAllocatedResource();
}
if (null != res) {
tq.deductActuallyToBePreempted(context.getResourceCalculator(), tq.totalPartitionResource, res);
Collection<TempAppPerPartition> tas = tq.getApps();
if (null == tas || tas.isEmpty()) {
continue;
}
deductPreemptableResourcePerApp(context, tq.totalPartitionResource, tas, res, partition);
}
}
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode in project incubator-myriad by apache.
the class OfferLifeCycleManagerTest method setUp.
@Before
public void setUp() throws Exception {
NodeStore store = new NodeStore();
NodeIdProto nodeId = NodeIdProto.newBuilder().setHost("localhost").setPort(8000).build();
RMNode rmNode = new RMNodeImpl(new NodeIdPBImpl(nodeId), new MockRMContext(), "localhost", 8000, 8070, new NodeBase(), new ResourcePBImpl(), "1.0");
SchedulerNode node = new FiCaSchedulerNode(rmNode, false);
store.add(node);
manager = new OfferLifecycleManager(store, new MyriadDriver(new MockSchedulerDriver()));
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode in project incubator-myriad by apache.
the class LeastAMNodesFirstPolicy method onNodeUpdated.
/**
* Called whenever a NM HBs to RM. The NM's updates will already be recorded in the
* SchedulerNode before this method is called.
*
* @param event
*/
private void onNodeUpdated(NodeUpdateSchedulerEvent event) {
NodeId nodeID = event.getRMNode().getNodeID();
SchedulerNode schedulerNode = yarnScheduler.getSchedulerNode(nodeID);
// keep track of only one node per host
schedulerNodes.put(nodeID.getHost(), schedulerNode);
}
Aggregations