Search in sources :

Example 16 with ApplicationId

use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.

the class TestFairSchedulerPlanFollower method setUp.

@Before
public void setUp() throws Exception {
    conf = createConfiguration();
    ReservationSystemTestUtil.setupFSAllocationFile(ALLOC_FILE);
    // Setup
    rmContext = TestUtils.getMockRMContext();
    spyRMContext = spy(rmContext);
    fs = ReservationSystemTestUtil.setupFairScheduler(spyRMContext, conf, 125);
    scheduler = fs;
    ConcurrentMap<ApplicationId, RMApp> spyApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMApp rmApp = mock(RMApp.class);
    when(rmApp.getRMAppAttempt((ApplicationAttemptId) Matchers.any())).thenReturn(null);
    Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
    when(spyRMContext.getRMApps()).thenReturn(spyApps);
    ReservationSystemTestUtil.setupFSAllocationFile(ALLOC_FILE);
    setupPlanFollower();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Before(org.junit.Before)

Example 17 with ApplicationId

use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.

the class TestFSRMStateStore method testHDFSRMStateStore.

@Test(timeout = 60000)
public void testHDFSRMStateStore() throws Exception {
    final HdfsConfiguration conf = new HdfsConfiguration();
    UserGroupInformation yarnAdmin = UserGroupInformation.createUserForTesting("yarn", new String[] { "admin" });
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.getFileSystem().mkdir(new Path("/yarn"), FsPermission.valueOf("-rwxrwxrwx"));
    cluster.getFileSystem().setOwner(new Path("/yarn"), "yarn", "admin");
    final UserGroupInformation hdfsAdmin = UserGroupInformation.getCurrentUser();
    final StoreStateVerifier verifier = new StoreStateVerifier() {

        @Override
        void afterStoreApp(final RMStateStore store, final ApplicationId appId) {
            try {
                // Wait for things to settle
                Thread.sleep(5000);
                hdfsAdmin.doAs(new PrivilegedExceptionAction<Void>() {

                    @Override
                    public Void run() throws Exception {
                        verifyFilesUnreadablebyHDFS(cluster, ((FileSystemRMStateStore) store).getAppDir(appId));
                        return null;
                    }
                });
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }

        @Override
        void afterStoreAppAttempt(final RMStateStore store, final ApplicationAttemptId appAttId) {
            try {
                // Wait for things to settle
                Thread.sleep(5000);
                hdfsAdmin.doAs(new PrivilegedExceptionAction<Void>() {

                    @Override
                    public Void run() throws Exception {
                        verifyFilesUnreadablebyHDFS(cluster, ((FileSystemRMStateStore) store).getAppAttemptDir(appAttId));
                        return null;
                    }
                });
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    };
    try {
        yarnAdmin.doAs(new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                fsTester = new TestFSRMStateStoreTester(cluster, true);
                testRMAppStateStore(fsTester, verifier);
                return null;
            }
        });
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 18 with ApplicationId

use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.

the class TestLeafQueue method testApplicationQueuePercent.

@Test
public void testApplicationQueuePercent() throws Exception {
    Resource res = Resource.newInstance(10 * 1024, 10);
    CapacityScheduler scheduler = mock(CapacityScheduler.class);
    when(scheduler.getClusterResource()).thenReturn(res);
    when(scheduler.getResourceCalculator()).thenReturn(new DefaultResourceCalculator());
    ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
    RMContext rmContext = mock(RMContext.class);
    when(rmContext.getEpoch()).thenReturn(3L);
    when(rmContext.getScheduler()).thenReturn(scheduler);
    when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMNodeLabelsManager nlm = mock(RMNodeLabelsManager.class);
    when(nlm.getResourceByLabel(any(), any())).thenReturn(res);
    when(rmContext.getNodeLabelManager()).thenReturn(nlm);
    // Queue "test" consumes 100% of the cluster, so its capacity and absolute
    // capacity are both 1.0f.
    Queue queue = createQueue("test", null, 1.0f, 1.0f);
    final String user = "user1";
    FiCaSchedulerApp app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
    // Resource request
    Resource requestedResource = Resource.newInstance(1536, 2);
    app.getAppAttemptResourceUsage().incUsed(requestedResource);
    // In "test" queue, 1536 used is 15% of both the queue and the cluster
    assertEquals(15.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
    assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
    // Queue "test2" is a child of root and its capacity is 50% of root. As a
    // child of root, its absolute capaicty is also 50%.
    queue = createQueue("test2", null, 0.5f, 0.5f);
    app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
    app.getAppAttemptResourceUsage().incUsed(requestedResource);
    // In "test2" queue, 1536 used is 30% of "test2" and 15% of the cluster.
    assertEquals(30.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
    assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
    // Queue "test2.1" is 50% of queue "test2", which is 50% of the cluster.
    // Therefore, "test2.1" capacity is 50% and absolute capacity is 25%.
    AbstractCSQueue qChild = createQueue("test2.1", queue, 0.5f, 0.25f);
    app = new FiCaSchedulerApp(appAttId, user, qChild, qChild.getAbstractUsersManager(), rmContext);
    app.getAppAttemptResourceUsage().incUsed(requestedResource);
    // In "test2.1" queue, 1536 used is 60% of "test2.1" and 15% of the cluster.
    assertEquals(60.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
    assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Queue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue) RMNodeLabelsManager(org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager) Test(org.junit.Test)

Example 19 with ApplicationId

use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.

the class TestTimelineServiceClientIntegration method testPutExtendedEntities.

@Test
public void testPutExtendedEntities() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(0, 1);
    TimelineV2Client client = TimelineV2Client.createTimelineClient(appId);
    try {
        // set the timeline service address manually
        client.setTimelineServiceAddress(collectorManager.getRestServerBindAddress());
        client.init(conf);
        client.start();
        ClusterEntity cluster = new ClusterEntity();
        cluster.setId(YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
        FlowRunEntity flow = new FlowRunEntity();
        flow.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
        flow.setName("test_flow_name");
        flow.setVersion("test_flow_version");
        flow.setRunId(1L);
        flow.setParent(cluster.getType(), cluster.getId());
        ApplicationEntity app = new ApplicationEntity();
        app.setId(appId.toString());
        flow.addChild(app.getType(), app.getId());
        ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
        ApplicationAttemptEntity appAttempt = new ApplicationAttemptEntity();
        appAttempt.setId(attemptId.toString());
        ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
        ContainerEntity container = new ContainerEntity();
        container.setId(containerId.toString());
        UserEntity user = new UserEntity();
        user.setId(UserGroupInformation.getCurrentUser().getShortUserName());
        QueueEntity queue = new QueueEntity();
        queue.setId("default_queue");
        client.putEntities(cluster, flow, app, appAttempt, container, user, queue);
        client.putEntitiesAsync(cluster, flow, app, appAttempt, container, user, queue);
    } finally {
        client.stop();
    }
}
Also used : ApplicationAttemptEntity(org.apache.hadoop.yarn.api.records.timelineservice.ApplicationAttemptEntity) TimelineV2Client(org.apache.hadoop.yarn.client.api.TimelineV2Client) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ContainerEntity(org.apache.hadoop.yarn.api.records.timelineservice.ContainerEntity) ApplicationEntity(org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity) FlowRunEntity(org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity) QueueEntity(org.apache.hadoop.yarn.api.records.timelineservice.QueueEntity) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) UserEntity(org.apache.hadoop.yarn.api.records.timelineservice.UserEntity) ClusterEntity(org.apache.hadoop.yarn.api.records.timelineservice.ClusterEntity) Test(org.junit.Test)

Example 20 with ApplicationId

use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.

the class EntityGroupFSTimelineStore method scanActiveLogs.

@InterfaceAudience.Private
@VisibleForTesting
int scanActiveLogs() throws IOException {
    long startTime = Time.monotonicNow();
    RemoteIterator<FileStatus> iter = list(activeRootPath);
    int logsToScanCount = 0;
    while (iter.hasNext()) {
        FileStatus stat = iter.next();
        String name = stat.getPath().getName();
        ApplicationId appId = parseApplicationId(name);
        if (appId != null) {
            LOG.debug("scan logs for {} in {}", appId, stat.getPath());
            logsToScanCount++;
            AppLogs logs = getAndSetActiveLog(appId, stat.getPath());
            executor.execute(new ActiveLogParser(logs));
        } else {
            LOG.debug("Unable to parse entry {}", name);
        }
    }
    metrics.addActiveLogDirScanTime(Time.monotonicNow() - startTime);
    return logsToScanCount;
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)849 Test (org.junit.Test)435 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)255 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)203 Configuration (org.apache.hadoop.conf.Configuration)190 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)154 IOException (java.io.IOException)153 Path (org.apache.hadoop.fs.Path)151 ArrayList (java.util.ArrayList)106 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)102 ApplicationReport (org.apache.hadoop.yarn.api.records.ApplicationReport)85 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)81 HashMap (java.util.HashMap)80 Resource (org.apache.hadoop.yarn.api.records.Resource)80 File (java.io.File)70 NodeId (org.apache.hadoop.yarn.api.records.NodeId)63 Credentials (org.apache.hadoop.security.Credentials)60 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)60 ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)59 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)56