use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestAMRMClient method triggerSchedulingWithNMHeartBeat.
/**
* Make sure we get allocations regardless of timing issues.
*/
private void triggerSchedulingWithNMHeartBeat() {
// Simulate fair scheduler update thread
RMContext context = yarnCluster.getResourceManager().getRMContext();
if (context.getScheduler() instanceof FairScheduler) {
FairScheduler scheduler = (FairScheduler) context.getScheduler();
scheduler.update();
}
// Trigger NM's heartbeat to RM and trigger allocations
for (RMNode rmNode : context.getRMNodes().values()) {
context.getScheduler().handle(new NodeUpdateSchedulerEvent(rmNode));
}
if (context.getScheduler() instanceof FairScheduler) {
FairScheduler scheduler = (FairScheduler) context.getScheduler();
scheduler.update();
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class RMWebAppFilter method ahsRedirectPath.
private String ahsRedirectPath(String uri, RMWebApp rmWebApp) {
// TODO: Commonize URL parsing code. Will be done in YARN-4642.
String redirectPath = null;
if (uri.contains("/cluster/")) {
String[] parts = uri.split("/");
if (parts.length > 3) {
RMContext context = rmWebApp.getRMContext();
String type = parts[2];
ApplicationId appId = null;
ApplicationAttemptId appAttemptId = null;
ContainerId containerId = null;
switch(type) {
case "app":
try {
appId = Apps.toAppID(parts[3]);
} catch (YarnRuntimeException | NumberFormatException e) {
LOG.debug("Error parsing {} as an ApplicationId", parts[3], e);
return redirectPath;
}
if (!context.getRMApps().containsKey(appId)) {
redirectPath = pjoin(ahsPageURLPrefix, "app", appId);
}
break;
case "appattempt":
try {
appAttemptId = ApplicationAttemptId.fromString(parts[3]);
} catch (IllegalArgumentException e) {
LOG.debug("Error parsing {} as an ApplicationAttemptId", parts[3], e);
return redirectPath;
}
if (!context.getRMApps().containsKey(appAttemptId.getApplicationId())) {
redirectPath = pjoin(ahsPageURLPrefix, "appattempt", appAttemptId);
}
break;
case "container":
try {
containerId = ContainerId.fromString(parts[3]);
} catch (IllegalArgumentException e) {
LOG.debug("Error parsing {} as an ContainerId", parts[3], e);
return redirectPath;
}
if (!context.getRMApps().containsKey(containerId.getApplicationAttemptId().getApplicationId())) {
redirectPath = pjoin(ahsPageURLPrefix, "container", containerId);
}
break;
default:
break;
}
}
}
return redirectPath;
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestDistributedShellWithNodeLabels method initializeNodeLabels.
private void initializeNodeLabels() throws IOException {
RMContext rmContext = distShellTest.yarnCluster.getResourceManager(0).getRMContext();
// Setup node labels
RMNodeLabelsManager labelsMgr = rmContext.getNodeLabelManager();
Set<String> labels = new HashSet<String>();
labels.add("x");
labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(labels);
// Setup queue access to node labels
distShellTest.conf.set(PREFIX + "root.accessible-node-labels", "x");
distShellTest.conf.set(PREFIX + "root.accessible-node-labels.x.capacity", "100");
distShellTest.conf.set(PREFIX + "root.default.accessible-node-labels", "x");
distShellTest.conf.set(PREFIX + "root.default.accessible-node-labels.x.capacity", "100");
rmContext.getScheduler().reinitialize(distShellTest.conf, rmContext);
// Fetch node-ids from yarn cluster
NodeId[] nodeIds = new NodeId[NUM_NMS];
for (int i = 0; i < NUM_NMS; i++) {
NodeManager mgr = distShellTest.yarnCluster.getNodeManager(i);
nodeIds[i] = mgr.getNMContext().getNodeId();
}
// Set label x to NM[1]
labelsMgr.addLabelsToNode(ImmutableMap.of(nodeIds[1], labels));
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestSystemMetricsPublisherForV2 method setup.
@BeforeClass
public static void setup() throws Exception {
if (testRootDir.exists()) {
//cleanup before hand
FileContext.getLocalFSFileContext().delete(new Path(testRootDir.getAbsolutePath()), true);
}
RMContext rmContext = mock(RMContext.class);
rmAppsMapInContext = new ConcurrentHashMap<ApplicationId, RMApp>();
when(rmContext.getRMApps()).thenReturn(rmAppsMapInContext);
rmTimelineCollectorManager = new RMTimelineCollectorManager(rmContext);
when(rmContext.getRMTimelineCollectorManager()).thenReturn(rmTimelineCollectorManager);
Configuration conf = getTimelineV2Conf();
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, TimelineWriter.class);
rmTimelineCollectorManager.init(conf);
rmTimelineCollectorManager.start();
dispatcher.init(conf);
dispatcher.start();
metricsPublisher = new TimelineServiceV2Publisher(rmContext) {
@Override
protected Dispatcher getDispatcher() {
return dispatcher;
}
};
metricsPublisher.init(conf);
metricsPublisher.start();
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestReservations method testFindNodeToUnreserve.
@Test
public void testFindNodeToUnreserve() throws Exception {
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
setup(csConf);
final String user_0 = "user_0";
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
String host_1 = "host_1";
FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
// Setup resource-requests
Priority p = TestUtils.createMockPriority(5);
SchedulerRequestKey priorityMap = toSchedulerKey(p);
Resource capability = Resources.createResource(2 * GB, 0);
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
RMContext rmContext = mock(RMContext.class);
ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
DrainDispatcher drainDispatcher = new DrainDispatcher();
when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(app_0.getApplicationId(), 1);
ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
Container container = TestUtils.getMockContainer(containerId, node_1.getNodeID(), Resources.createResource(2 * GB), priorityMap.getPriority());
RMContainer rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, node_1.getNodeID(), "user", rmContext);
// nothing reserved
RMContainer toUnreserveContainer = app_0.findNodeToUnreserve(csContext.getClusterResource(), node_1, priorityMap, capability);
assertTrue(toUnreserveContainer == null);
// reserved but scheduler doesn't know about that node.
app_0.reserve(node_1, priorityMap, rmContainer, container);
node_1.reserveResource(app_0, priorityMap, rmContainer);
toUnreserveContainer = app_0.findNodeToUnreserve(csContext.getClusterResource(), node_1, priorityMap, capability);
assertTrue(toUnreserveContainer == null);
}
Aggregations