Search in sources :

Example 71 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestAMAuthorization method testUnauthorizedAccess.

@Test
public void testUnauthorizedAccess() throws Exception {
    MyContainerManager containerManager = new MyContainerManager();
    rm = new MockRMWithAMS(conf, containerManager);
    rm.start();
    MockNM nm1 = rm.registerNode("localhost:1234", 5120);
    RMApp app = rm.submitApp(1024);
    nm1.nodeHeartbeat(true);
    int waitCount = 0;
    while (containerManager.containerTokens == null && waitCount++ < 40) {
        LOG.info("Waiting for AM Launch to happen..");
        Thread.sleep(1000);
    }
    Assert.assertNotNull(containerManager.containerTokens);
    RMAppAttempt attempt = app.getCurrentAppAttempt();
    ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId();
    waitForLaunchedState(attempt);
    final Configuration conf = rm.getConfig();
    final YarnRPC rpc = YarnRPC.create(conf);
    final InetSocketAddress serviceAddr = conf.getSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
    UserGroupInformation currentUser = UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
    // First try contacting NM without tokens
    ApplicationMasterProtocol client = currentUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {

        @Override
        public ApplicationMasterProtocol run() {
            return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class, serviceAddr, conf);
        }
    });
    RegisterApplicationMasterRequest request = Records.newRecord(RegisterApplicationMasterRequest.class);
    try {
        client.registerApplicationMaster(request);
        Assert.fail("Should fail with authorization error");
    } catch (Exception e) {
        if (isCause(AccessControlException.class, e)) {
            // Because there are no tokens, the request should be rejected as the
            // server side will assume we are trying simple auth.
            String expectedMessage = "";
            if (UserGroupInformation.isSecurityEnabled()) {
                expectedMessage = "Client cannot authenticate via:[TOKEN]";
            } else {
                expectedMessage = "SIMPLE authentication is not enabled.  Available:[TOKEN]";
            }
            Assert.assertTrue(e.getCause().getMessage().contains(expectedMessage));
        } else {
            throw e;
        }
    }
// TODO: Add validation of invalid authorization when there's more data in
// the AMRMToken
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) InetSocketAddress(java.net.InetSocketAddress) AccessControlException(org.apache.hadoop.security.AccessControlException) ApplicationMasterProtocol(org.apache.hadoop.yarn.api.ApplicationMasterProtocol) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) YarnRPC(org.apache.hadoop.yarn.ipc.YarnRPC) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) RegisterApplicationMasterRequest(org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 72 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class ApplicationCLI method failApplicationAttempt.

/**
   * Fails an application attempt.
   *
   * @param attemptId ID of the attempt to fail. If provided, applicationId
   *        parameter is not used.
   * @throws YarnException
   * @throws IOException
   */
private void failApplicationAttempt(String attemptId) throws YarnException, IOException {
    ApplicationId appId;
    ApplicationAttemptId attId;
    attId = ApplicationAttemptId.fromString(attemptId);
    appId = attId.getApplicationId();
    sysout.println("Failing attempt " + attId + " of application " + appId);
    client.failApplicationAttempt(attId);
}
Also used : ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Example 73 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestAggregatedLogFormat method testContainerLogsFileAccess.

@Test(timeout = 10000)
public void testContainerLogsFileAccess() throws IOException {
    // This test will run only if NativeIO is enabled as SecureIOUtils 
    // require it to be enabled.
    Assume.assumeTrue(NativeIO.isAvailable());
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    File workDir = new File(testWorkDir, "testContainerLogsFileAccess1");
    Path remoteAppLogFile = new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
    Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
    String data = "Log File content for container : ";
    // Creating files for container1. Log aggregator will try to read log files
    // with illegal user.
    ApplicationId applicationId = ApplicationId.newInstance(1, 1);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, 1);
    ContainerId testContainerId1 = ContainerId.newContainerId(applicationAttemptId, 1);
    Path appDir = new Path(srcFileRoot, testContainerId1.getApplicationAttemptId().getApplicationId().toString());
    Path srcFilePath1 = new Path(appDir, testContainerId1.toString());
    String stdout = "stdout";
    String stderr = "stderr";
    writeSrcFile(srcFilePath1, stdout, data + testContainerId1.toString() + stdout);
    writeSrcFile(srcFilePath1, stderr, data + testContainerId1.toString() + stderr);
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);
    LogKey logKey = new LogKey(testContainerId1);
    String randomUser = "randomUser";
    LogValue logValue = spy(new LogValue(Collections.singletonList(srcFileRoot.toString()), testContainerId1, randomUser));
    // It is trying simulate a situation where first log file is owned by
    // different user (probably symlink) and second one by the user itself.
    // The first file should not be aggregated. Because this log file has the invalid
    // user name.
    when(logValue.getUser()).thenReturn(randomUser).thenReturn(ugi.getShortUserName());
    logWriter.append(logKey, logValue);
    logWriter.close();
    BufferedReader in = new BufferedReader(new FileReader(new File(remoteAppLogFile.toUri().getRawPath())));
    String line;
    StringBuffer sb = new StringBuffer("");
    while ((line = in.readLine()) != null) {
        LOG.info(line);
        sb.append(line);
    }
    line = sb.toString();
    String expectedOwner = ugi.getShortUserName();
    if (Path.WINDOWS) {
        final String adminsGroupString = "Administrators";
        if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
            expectedOwner = adminsGroupString;
        }
    }
    // This file: stderr should not be aggregated.
    // And we will not aggregate the log message.
    String stdoutFile1 = StringUtils.join(File.separator, Arrays.asList(new String[] { workDir.getAbsolutePath(), "srcFiles", testContainerId1.getApplicationAttemptId().getApplicationId().toString(), testContainerId1.toString(), stderr }));
    // The file: stdout is expected to be aggregated.
    String stdoutFile2 = StringUtils.join(File.separator, Arrays.asList(new String[] { workDir.getAbsolutePath(), "srcFiles", testContainerId1.getApplicationAttemptId().getApplicationId().toString(), testContainerId1.toString(), stdout }));
    String message2 = "Owner '" + expectedOwner + "' for path " + stdoutFile2 + " did not match expected owner '" + ugi.getShortUserName() + "'";
    Assert.assertFalse(line.contains(message2));
    Assert.assertFalse(line.contains(data + testContainerId1.toString() + stderr));
    Assert.assertTrue(line.contains(data + testContainerId1.toString() + stdout));
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) LogKey(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) LogValue(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue) TestContainerId(org.apache.hadoop.yarn.api.TestContainerId) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) LogWriter(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter) BufferedReader(java.io.BufferedReader) FileReader(java.io.FileReader) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 74 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestAggregatedLogsBlock method writeLog.

private void writeLog(Configuration configuration, String user) throws Exception {
    ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 1);
    ApplicationAttemptId appAttemptId = ApplicationAttemptIdPBImpl.newInstance(appId, 1);
    ContainerId containerId = ContainerIdPBImpl.newContainerId(appAttemptId, 1);
    String path = "target/logs/" + user + "/logs/application_0_0001/localhost_1234";
    File f = new File(path);
    if (!f.getParentFile().exists()) {
        assertTrue(f.getParentFile().mkdirs());
    }
    List<String> rootLogDirs = Arrays.asList("target/logs/logs");
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    AggregatedLogFormat.LogWriter writer = new AggregatedLogFormat.LogWriter(configuration, new Path(path), ugi);
    writer.writeApplicationOwner(ugi.getUserName());
    Map<ApplicationAccessType, String> appAcls = new HashMap<ApplicationAccessType, String>();
    appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
    writer.writeApplicationACLs(appAcls);
    writer.append(new AggregatedLogFormat.LogKey("container_0_0001_01_000001"), new AggregatedLogFormat.LogValue(rootLogDirs, containerId, UserGroupInformation.getCurrentUser().getShortUserName()));
    writer.close();
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 75 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestTimelineClientForATS1_5 method testPostEntities.

@Test
public void testPostEntities() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    TimelineEntityGroupId groupId = TimelineEntityGroupId.newInstance(appId, "1");
    TimelineEntityGroupId groupId2 = TimelineEntityGroupId.newInstance(appId, "2");
    // Create two entities, includes an entity type and a summary type
    TimelineEntity[] entities = new TimelineEntity[2];
    entities[0] = generateEntity("entity_type");
    entities[1] = generateEntity("summary_type");
    try {
        // if attemptid is null, fall back to the original putEntities call, and
        // save the entity
        // into configured levelDB store
        client.putEntities(null, null, entities);
        verify(spyTimelineWriter, times(1)).putEntities(entities);
        reset(spyTimelineWriter);
        // if the attemptId is specified, but groupId is given as null, it would
        // fall back to the original putEntities call if we have the entity type.
        // the entity which is summary type would be written into FS
        ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance(appId, 1);
        client.putEntities(attemptId1, null, entities);
        TimelineEntity[] entityTDB = new TimelineEntity[1];
        entityTDB[0] = entities[0];
        verify(spyTimelineWriter, times(1)).putEntities(entityTDB);
        Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId1), "summarylog-" + attemptId1.toString())));
        reset(spyTimelineWriter);
        // if we specified attemptId as well as groupId, it would save the entity
        // into
        // FileSystem instead of levelDB store
        ApplicationAttemptId attemptId2 = ApplicationAttemptId.newInstance(appId, 2);
        client.putEntities(attemptId2, groupId, entities);
        client.putEntities(attemptId2, groupId2, entities);
        verify(spyTimelineWriter, times(0)).putEntities(any(TimelineEntity[].class));
        Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId2), "summarylog-" + attemptId2.toString())));
        Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId2), "entitylog-" + groupId.toString())));
        Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId2), "entitylog-" + groupId2.toString())));
        reset(spyTimelineWriter);
    } catch (Exception e) {
        Assert.fail("Exception is not expected. " + e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TimelineEntityGroupId(org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) TimelineEntity(org.apache.hadoop.yarn.api.records.timeline.TimelineEntity) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)514 Test (org.junit.Test)362 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)222 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)170 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)109 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)104 Configuration (org.apache.hadoop.conf.Configuration)87 Resource (org.apache.hadoop.yarn.api.records.Resource)82 ArrayList (java.util.ArrayList)75 NodeId (org.apache.hadoop.yarn.api.records.NodeId)74 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)65 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)63 NodeUpdateSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)60 Path (org.apache.hadoop.fs.Path)55 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)53 Priority (org.apache.hadoop.yarn.api.records.Priority)52 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)51 Container (org.apache.hadoop.yarn.api.records.Container)50 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)49 HashMap (java.util.HashMap)42