use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestFairSchedulerPlanFollower method setUp.
@Before
public void setUp() throws Exception {
conf = createConfiguration();
ReservationSystemTestUtil.setupFSAllocationFile(ALLOC_FILE);
// Setup
rmContext = TestUtils.getMockRMContext();
spyRMContext = spy(rmContext);
fs = ReservationSystemTestUtil.setupFairScheduler(spyRMContext, conf, 125);
scheduler = fs;
ConcurrentMap<ApplicationId, RMApp> spyApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
RMApp rmApp = mock(RMApp.class);
when(rmApp.getRMAppAttempt((ApplicationAttemptId) Matchers.any())).thenReturn(null);
Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
when(spyRMContext.getRMApps()).thenReturn(spyApps);
ReservationSystemTestUtil.setupFSAllocationFile(ALLOC_FILE);
setupPlanFollower();
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestFSRMStateStore method testHDFSRMStateStore.
@Test(timeout = 60000)
public void testHDFSRMStateStore() throws Exception {
final HdfsConfiguration conf = new HdfsConfiguration();
UserGroupInformation yarnAdmin = UserGroupInformation.createUserForTesting("yarn", new String[] { "admin" });
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.getFileSystem().mkdir(new Path("/yarn"), FsPermission.valueOf("-rwxrwxrwx"));
cluster.getFileSystem().setOwner(new Path("/yarn"), "yarn", "admin");
final UserGroupInformation hdfsAdmin = UserGroupInformation.getCurrentUser();
final StoreStateVerifier verifier = new StoreStateVerifier() {
@Override
void afterStoreApp(final RMStateStore store, final ApplicationId appId) {
try {
// Wait for things to settle
Thread.sleep(5000);
hdfsAdmin.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
verifyFilesUnreadablebyHDFS(cluster, ((FileSystemRMStateStore) store).getAppDir(appId));
return null;
}
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
void afterStoreAppAttempt(final RMStateStore store, final ApplicationAttemptId appAttId) {
try {
// Wait for things to settle
Thread.sleep(5000);
hdfsAdmin.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
verifyFilesUnreadablebyHDFS(cluster, ((FileSystemRMStateStore) store).getAppAttemptDir(appAttId));
return null;
}
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
try {
yarnAdmin.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
fsTester = new TestFSRMStateStoreTester(cluster, true);
testRMAppStateStore(fsTester, verifier);
return null;
}
});
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestLeafQueue method testApplicationQueuePercent.
@Test
public void testApplicationQueuePercent() throws Exception {
Resource res = Resource.newInstance(10 * 1024, 10);
CapacityScheduler scheduler = mock(CapacityScheduler.class);
when(scheduler.getClusterResource()).thenReturn(res);
when(scheduler.getResourceCalculator()).thenReturn(new DefaultResourceCalculator());
ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getScheduler()).thenReturn(scheduler);
when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap<ApplicationId, RMApp>());
RMNodeLabelsManager nlm = mock(RMNodeLabelsManager.class);
when(nlm.getResourceByLabel(any(), any())).thenReturn(res);
when(rmContext.getNodeLabelManager()).thenReturn(nlm);
// Queue "test" consumes 100% of the cluster, so its capacity and absolute
// capacity are both 1.0f.
Queue queue = createQueue("test", null, 1.0f, 1.0f);
final String user = "user1";
FiCaSchedulerApp app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
// Resource request
Resource requestedResource = Resource.newInstance(1536, 2);
app.getAppAttemptResourceUsage().incUsed(requestedResource);
// In "test" queue, 1536 used is 15% of both the queue and the cluster
assertEquals(15.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
// Queue "test2" is a child of root and its capacity is 50% of root. As a
// child of root, its absolute capaicty is also 50%.
queue = createQueue("test2", null, 0.5f, 0.5f);
app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
app.getAppAttemptResourceUsage().incUsed(requestedResource);
// In "test2" queue, 1536 used is 30% of "test2" and 15% of the cluster.
assertEquals(30.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
// Queue "test2.1" is 50% of queue "test2", which is 50% of the cluster.
// Therefore, "test2.1" capacity is 50% and absolute capacity is 25%.
AbstractCSQueue qChild = createQueue("test2.1", queue, 0.5f, 0.25f);
app = new FiCaSchedulerApp(appAttId, user, qChild, qChild.getAbstractUsersManager(), rmContext);
app.getAppAttemptResourceUsage().incUsed(requestedResource);
// In "test2.1" queue, 1536 used is 60% of "test2.1" and 15% of the cluster.
assertEquals(60.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestTimelineServiceClientIntegration method testPutExtendedEntities.
@Test
public void testPutExtendedEntities() throws Exception {
ApplicationId appId = ApplicationId.newInstance(0, 1);
TimelineV2Client client = TimelineV2Client.createTimelineClient(appId);
try {
// set the timeline service address manually
client.setTimelineServiceAddress(collectorManager.getRestServerBindAddress());
client.init(conf);
client.start();
ClusterEntity cluster = new ClusterEntity();
cluster.setId(YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
FlowRunEntity flow = new FlowRunEntity();
flow.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
flow.setName("test_flow_name");
flow.setVersion("test_flow_version");
flow.setRunId(1L);
flow.setParent(cluster.getType(), cluster.getId());
ApplicationEntity app = new ApplicationEntity();
app.setId(appId.toString());
flow.addChild(app.getType(), app.getId());
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
ApplicationAttemptEntity appAttempt = new ApplicationAttemptEntity();
appAttempt.setId(attemptId.toString());
ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
ContainerEntity container = new ContainerEntity();
container.setId(containerId.toString());
UserEntity user = new UserEntity();
user.setId(UserGroupInformation.getCurrentUser().getShortUserName());
QueueEntity queue = new QueueEntity();
queue.setId("default_queue");
client.putEntities(cluster, flow, app, appAttempt, container, user, queue);
client.putEntitiesAsync(cluster, flow, app, appAttempt, container, user, queue);
} finally {
client.stop();
}
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class EntityGroupFSTimelineStore method scanActiveLogs.
@InterfaceAudience.Private
@VisibleForTesting
int scanActiveLogs() throws IOException {
long startTime = Time.monotonicNow();
RemoteIterator<FileStatus> iter = list(activeRootPath);
int logsToScanCount = 0;
while (iter.hasNext()) {
FileStatus stat = iter.next();
String name = stat.getPath().getName();
ApplicationId appId = parseApplicationId(name);
if (appId != null) {
LOG.debug("scan logs for {} in {}", appId, stat.getPath());
logsToScanCount++;
AppLogs logs = getAndSetActiveLog(appId, stat.getPath());
executor.execute(new ActiveLogParser(logs));
} else {
LOG.debug("Unable to parse entry {}", name);
}
}
metrics.addActiveLogDirScanTime(Time.monotonicNow() - startTime);
return logsToScanCount;
}
Aggregations