use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler in project hadoop by apache.
the class TestAMRMClient method triggerSchedulingWithNMHeartBeat.
/**
* Make sure we get allocations regardless of timing issues.
*/
private void triggerSchedulingWithNMHeartBeat() {
// Simulate fair scheduler update thread
RMContext context = yarnCluster.getResourceManager().getRMContext();
if (context.getScheduler() instanceof FairScheduler) {
FairScheduler scheduler = (FairScheduler) context.getScheduler();
scheduler.update();
}
// Trigger NM's heartbeat to RM and trigger allocations
for (RMNode rmNode : context.getRMNodes().values()) {
context.getScheduler().handle(new NodeUpdateSchedulerEvent(rmNode));
}
if (context.getScheduler() instanceof FairScheduler) {
FairScheduler scheduler = (FairScheduler) context.getScheduler();
scheduler.update();
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler in project hadoop by apache.
the class RMWebServices method getSchedulerInfo.
@GET
@Path("/scheduler")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public SchedulerTypeInfo getSchedulerInfo() {
init();
ResourceScheduler rs = rm.getResourceScheduler();
SchedulerInfo sinfo;
if (rs instanceof CapacityScheduler) {
CapacityScheduler cs = (CapacityScheduler) rs;
CSQueue root = cs.getRootQueue();
sinfo = new CapacitySchedulerInfo(root, cs);
} else if (rs instanceof FairScheduler) {
FairScheduler fs = (FairScheduler) rs;
sinfo = new FairSchedulerInfo(fs);
} else if (rs instanceof FifoScheduler) {
sinfo = new FifoSchedulerInfo(this.rm);
} else {
throw new NotFoundException("Unknown scheduler configured");
}
return new SchedulerTypeInfo(sinfo);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler in project hadoop by apache.
the class RMWebServices method dumpSchedulerLogs.
@POST
@Path("/scheduler/logs")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public String dumpSchedulerLogs(@FormParam("time") String time, @Context HttpServletRequest hsr) throws IOException {
init();
UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
ApplicationACLsManager aclsManager = rm.getApplicationACLsManager();
if (aclsManager.areACLsEnabled()) {
if (callerUGI == null || !aclsManager.isAdmin(callerUGI)) {
String msg = "Only admins can carry out this operation.";
throw new ForbiddenException(msg);
}
}
ResourceScheduler rs = rm.getResourceScheduler();
int period = Integer.parseInt(time);
if (period <= 0) {
throw new BadRequestException("Period must be greater than 0");
}
final String logHierarchy = "org.apache.hadoop.yarn.server.resourcemanager.scheduler";
String logfile = "yarn-scheduler-debug.log";
if (rs instanceof CapacityScheduler) {
logfile = "yarn-capacity-scheduler-debug.log";
} else if (rs instanceof FairScheduler) {
logfile = "yarn-fair-scheduler-debug.log";
}
AdHocLogDumper dumper = new AdHocLogDumper(logHierarchy, logfile);
// time period is sent to us in seconds
dumper.dumpLogs("DEBUG", period * 1000);
return "Capacity scheduler logs are being created.";
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler in project hadoop by apache.
the class FairSchedulerMetrics method trackApp.
@Override
public void trackApp(ApplicationAttemptId appAttemptId, String oldAppId) {
super.trackApp(appAttemptId, oldAppId);
FairScheduler fair = (FairScheduler) scheduler;
final FSAppAttempt app = fair.getSchedulerApp(appAttemptId);
metrics.register("variable.app." + oldAppId + ".demand.memory", new Gauge<Long>() {
@Override
public Long getValue() {
return app.getDemand().getMemorySize();
}
});
metrics.register("variable.app." + oldAppId + ".demand.vcores", new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getDemand().getVirtualCores();
}
});
metrics.register("variable.app." + oldAppId + ".usage.memory", new Gauge<Long>() {
@Override
public Long getValue() {
return app.getResourceUsage().getMemorySize();
}
});
metrics.register("variable.app." + oldAppId + ".usage.vcores", new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getResourceUsage().getVirtualCores();
}
});
metrics.register("variable.app." + oldAppId + ".minshare.memory", new Gauge<Long>() {
@Override
public Long getValue() {
return app.getMinShare().getMemorySize();
}
});
metrics.register("variable.app." + oldAppId + ".minshare.vcores", new Gauge<Long>() {
@Override
public Long getValue() {
return app.getMinShare().getMemorySize();
}
});
metrics.register("variable.app." + oldAppId + ".maxshare.memory", new Gauge<Long>() {
@Override
public Long getValue() {
return Math.min(app.getMaxShare().getMemorySize(), totalMemoryMB);
}
});
metrics.register("variable.app." + oldAppId + ".maxshare.vcores", new Gauge<Integer>() {
@Override
public Integer getValue() {
return Math.min(app.getMaxShare().getVirtualCores(), totalVCores);
}
});
metrics.register("variable.app." + oldAppId + ".fairshare.memory", new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getFairShare().getVirtualCores();
}
});
metrics.register("variable.app." + oldAppId + ".fairshare.vcores", new Gauge<Integer>() {
@Override
public Integer getValue() {
return app.getFairShare().getVirtualCores();
}
});
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler in project hadoop by apache.
the class TestWorkPreservingRMRestart method checkFSQueue.
private void checkFSQueue(ResourceManager rm, SchedulerApplication schedulerApp, Resource usedResources, Resource availableResources) throws Exception {
// waiting for RM's scheduling apps
int retry = 0;
Resource assumedFairShare = Resource.newInstance(8192, 8);
while (true) {
Thread.sleep(100);
if (assumedFairShare.equals(((FairScheduler) rm.getResourceScheduler()).getQueueManager().getRootQueue().getFairShare())) {
break;
}
retry++;
if (retry > 30) {
Assert.fail("Apps are not scheduled within assumed timeout");
}
}
FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
FSParentQueue root = scheduler.getQueueManager().getRootQueue();
// ************ check cluster used Resources ********
assertTrue(root.getPolicy() instanceof DominantResourceFairnessPolicy);
assertEquals(usedResources, root.getResourceUsage());
// ************ check app headroom ****************
FSAppAttempt schedulerAttempt = (FSAppAttempt) schedulerApp.getCurrentAppAttempt();
assertEquals(availableResources, schedulerAttempt.getHeadroom());
// ************ check queue metrics ****************
QueueMetrics queueMetrics = scheduler.getRootQueueMetrics();
assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemorySize(), availableResources.getVirtualCores(), usedResources.getMemorySize(), usedResources.getVirtualCores());
}
Aggregations