Search in sources :

Example 21 with ContainerId

use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.

the class MRAppMasterTestLaunchTime method testMRAppMasterCredentials.

@Test
public void testMRAppMasterCredentials() throws Exception {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.DEBUG);
    // Simulate credentials passed to AM via client->RM->NM
    Credentials credentials = new Credentials();
    byte[] identifier = "MyIdentifier".getBytes();
    byte[] password = "MyPassword".getBytes();
    Text kind = new Text("MyTokenKind");
    Text service = new Text("host:port");
    Token<? extends TokenIdentifier> myToken = new Token<TokenIdentifier>(identifier, password, kind, service);
    Text tokenAlias = new Text("myToken");
    credentials.addToken(tokenAlias, myToken);
    Text appTokenService = new Text("localhost:0");
    Token<AMRMTokenIdentifier> appToken = new Token<AMRMTokenIdentifier>(identifier, password, AMRMTokenIdentifier.KIND_NAME, appTokenService);
    credentials.addToken(appTokenService, appToken);
    Text keyAlias = new Text("mySecretKeyAlias");
    credentials.addSecretKey(keyAlias, "mySecretKey".getBytes());
    Token<? extends TokenIdentifier> storedToken = credentials.getToken(tokenAlias);
    JobConf conf = new JobConf();
    Path tokenFilePath = new Path(testDir, "tokens-file");
    Map<String, String> newEnv = new HashMap<String, String>();
    newEnv.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION, tokenFilePath.toUri().getPath());
    setNewEnvironmentHack(newEnv);
    credentials.writeTokenStorageFile(tokenFilePath, conf);
    ApplicationId appId = ApplicationId.newInstance(12345, 56);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, 546);
    String userName = UserGroupInformation.getCurrentUser().getShortUserName();
    // Create staging dir, so MRAppMaster doesn't barf.
    File stagingDir = new File(MRApps.getStagingAreaDir(conf, userName).toString());
    stagingDir.mkdirs();
    // Set login-user to null as that is how real world MRApp starts with.
    // This is null is the reason why token-file is read by UGI.
    UserGroupInformation.setLoginUser(null);
    MRAppMasterTest appMaster = new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis(), false, true);
    MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
    // Now validate the task credentials
    Credentials appMasterCreds = appMaster.getCredentials();
    Assert.assertNotNull(appMasterCreds);
    Assert.assertEquals(1, appMasterCreds.numberOfSecretKeys());
    Assert.assertEquals(1, appMasterCreds.numberOfTokens());
    // Validate the tokens - app token should not be present
    Token<? extends TokenIdentifier> usedToken = appMasterCreds.getToken(tokenAlias);
    Assert.assertNotNull(usedToken);
    Assert.assertEquals(storedToken, usedToken);
    // Validate the keys
    byte[] usedKey = appMasterCreds.getSecretKey(keyAlias);
    Assert.assertNotNull(usedKey);
    Assert.assertEquals("mySecretKey", new String(usedKey));
    // The credentials should also be added to conf so that OuputCommitter can
    // access it - app token should not be present
    Credentials confCredentials = conf.getCredentials();
    Assert.assertEquals(1, confCredentials.numberOfSecretKeys());
    Assert.assertEquals(1, confCredentials.numberOfTokens());
    Assert.assertEquals(storedToken, confCredentials.getToken(tokenAlias));
    Assert.assertEquals("mySecretKey", new String(confCredentials.getSecretKey(keyAlias)));
    // Verify the AM's ugi - app token should be present
    Credentials ugiCredentials = appMaster.getUgi().getCredentials();
    Assert.assertEquals(1, ugiCredentials.numberOfSecretKeys());
    Assert.assertEquals(2, ugiCredentials.numberOfTokens());
    Assert.assertEquals(storedToken, ugiCredentials.getToken(tokenAlias));
    Assert.assertEquals(appToken, ugiCredentials.getToken(appTokenService));
    Assert.assertEquals("mySecretKey", new String(ugiCredentials.getSecretKey(keyAlias)));
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) Token(org.apache.hadoop.security.token.Token) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Logger(org.apache.log4j.Logger) AMRMTokenIdentifier(org.apache.hadoop.yarn.security.AMRMTokenIdentifier) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JobConf(org.apache.hadoop.mapred.JobConf) File(java.io.File) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 22 with ContainerId

use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.

the class MRAppMasterTestLaunchTime method testMRAppMasterSuccessLock.

@Test
public void testMRAppMasterSuccessLock() throws IOException, InterruptedException {
    String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
    String containerIdStr = "container_1317529182569_0004_000002_1";
    String userName = "TestAppMasterUser";
    JobConf conf = new JobConf();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.fromString(applicationAttemptIdStr);
    JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
    Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
    Path end = MRApps.getEndJobCommitSuccessFile(conf, userName, jobId);
    FileSystem fs = FileSystem.get(conf);
    fs.create(start).close();
    fs.create(end).close();
    ContainerId containerId = ContainerId.fromString(containerIdStr);
    MRAppMaster appMaster = new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis(), false, false);
    boolean caught = false;
    try {
        MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
    } catch (IOException e) {
        //The IO Exception is expected
        LOG.info("Caught expected Exception", e);
        caught = true;
    }
    assertTrue(caught);
    assertTrue(appMaster.errorHappenedShutDown);
    assertEquals(JobStateInternal.SUCCEEDED, appMaster.forcedState);
    appMaster.stop();
    // verify the final status is SUCCEEDED
    verifyFailedStatus((MRAppMasterTest) appMaster, "SUCCEEDED");
}
Also used : Path(org.apache.hadoop.fs.Path) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) IOException(java.io.IOException) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 23 with ContainerId

use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.

the class MRAppMasterTestLaunchTime method testMRAppMasterFailLock.

@Test
public void testMRAppMasterFailLock() throws IOException, InterruptedException {
    String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
    String containerIdStr = "container_1317529182569_0004_000002_1";
    String userName = "TestAppMasterUser";
    JobConf conf = new JobConf();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.fromString(applicationAttemptIdStr);
    JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
    Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
    Path end = MRApps.getEndJobCommitFailureFile(conf, userName, jobId);
    FileSystem fs = FileSystem.get(conf);
    fs.create(start).close();
    fs.create(end).close();
    ContainerId containerId = ContainerId.fromString(containerIdStr);
    MRAppMaster appMaster = new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis(), false, false);
    boolean caught = false;
    try {
        MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
    } catch (IOException e) {
        //The IO Exception is expected
        LOG.info("Caught expected Exception", e);
        caught = true;
    }
    assertTrue(caught);
    assertTrue(appMaster.errorHappenedShutDown);
    assertEquals(JobStateInternal.FAILED, appMaster.forcedState);
    appMaster.stop();
    // verify the final status is FAILED
    verifyFailedStatus((MRAppMasterTest) appMaster, "FAILED");
}
Also used : Path(org.apache.hadoop.fs.Path) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) IOException(java.io.IOException) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 24 with ContainerId

use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.

the class MRAppMasterTestLaunchTime method testMRAppMasterJobLaunchTime.

@Test
public void testMRAppMasterJobLaunchTime() throws IOException, InterruptedException {
    String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
    String containerIdStr = "container_1317529182569_0004_000002_1";
    String userName = "TestAppMasterUser";
    JobConf conf = new JobConf();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.setInt(MRJobConfig.NUM_REDUCES, 0);
    conf.set(JHAdminConfig.MR_HS_JHIST_FORMAT, "json");
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.fromString(applicationAttemptIdStr);
    JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
    File dir = new File(MRApps.getStagingAreaDir(conf, userName).toString(), jobId.toString());
    dir.mkdirs();
    File historyFile = new File(JobHistoryUtils.getStagingJobHistoryFile(new Path(dir.toURI().toString()), jobId, (applicationAttemptId.getAttemptId() - 1)).toUri().getRawPath());
    historyFile.createNewFile();
    FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(historyFile), null);
    EventWriter writer = new EventWriter(out, EventWriter.WriteMode.JSON);
    writer.close();
    FileSystem fs = FileSystem.get(conf);
    JobSplitWriter.createSplitFiles(new Path(dir.getAbsolutePath()), conf, fs, new org.apache.hadoop.mapred.InputSplit[0]);
    ContainerId containerId = ContainerId.fromString(containerIdStr);
    MRAppMasterTestLaunchTime appMaster = new MRAppMasterTestLaunchTime(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis());
    MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
    appMaster.stop();
    assertTrue("Job launch time should not be negative.", appMaster.jobLaunchTime.get() >= 0);
}
Also used : Path(org.apache.hadoop.fs.Path) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) EventWriter(org.apache.hadoop.mapreduce.jobhistory.EventWriter) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileOutputStream(java.io.FileOutputStream) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) JobConf(org.apache.hadoop.mapred.JobConf) File(java.io.File) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 25 with ContainerId

use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.

the class MockJobs method newTaskAttempt.

public static TaskAttempt newTaskAttempt(TaskId tid, int i) {
    final TaskAttemptId taid = Records.newRecord(TaskAttemptId.class);
    taid.setTaskId(tid);
    taid.setId(i);
    final TaskAttemptReport report = newTaskAttemptReport(taid);
    return new TaskAttempt() {

        @Override
        public NodeId getNodeId() throws UnsupportedOperationException {
            throw new UnsupportedOperationException();
        }

        @Override
        public TaskAttemptId getID() {
            return taid;
        }

        @Override
        public TaskAttemptReport getReport() {
            return report;
        }

        @Override
        public long getLaunchTime() {
            return report.getStartTime();
        }

        @Override
        public long getFinishTime() {
            return report.getFinishTime();
        }

        @Override
        public int getShufflePort() {
            return ShuffleHandler.DEFAULT_SHUFFLE_PORT;
        }

        @Override
        public Counters getCounters() {
            if (report != null && report.getCounters() != null) {
                return new Counters(TypeConverter.fromYarn(report.getCounters()));
            }
            return null;
        }

        @Override
        public float getProgress() {
            return report.getProgress();
        }

        @Override
        public Phase getPhase() {
            return report.getPhase();
        }

        @Override
        public TaskAttemptState getState() {
            return report.getTaskAttemptState();
        }

        @Override
        public boolean isFinished() {
            switch(report.getTaskAttemptState()) {
                case SUCCEEDED:
                case FAILED:
                case KILLED:
                    return true;
            }
            return false;
        }

        @Override
        public ContainerId getAssignedContainerID() {
            ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(taid.getTaskId().getJobId().getAppId(), 0);
            ContainerId id = ContainerId.newContainerId(appAttemptId, 0);
            return id;
        }

        @Override
        public String getNodeHttpAddress() {
            return "localhost:8042";
        }

        @Override
        public List<String> getDiagnostics() {
            return Lists.newArrayList(report.getDiagnosticInfo());
        }

        @Override
        public String getAssignedContainerMgrAddress() {
            return "localhost:9998";
        }

        @Override
        public long getShuffleFinishTime() {
            return report.getShuffleFinishTime();
        }

        @Override
        public long getSortFinishTime() {
            return report.getSortFinishTime();
        }

        @Override
        public String getNodeRackName() {
            return "/default-rack";
        }
    };
}
Also used : TaskAttemptReport(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) Counters(org.apache.hadoop.mapreduce.Counters) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId)

Aggregations

ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)588 Test (org.junit.Test)339 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)173 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)169 ArrayList (java.util.ArrayList)161 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)119 Container (org.apache.hadoop.yarn.api.records.Container)104 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)94 MockNM (org.apache.hadoop.yarn.server.resourcemanager.MockNM)79 MockRM (org.apache.hadoop.yarn.server.resourcemanager.MockRM)78 MockAM (org.apache.hadoop.yarn.server.resourcemanager.MockAM)77 Path (org.apache.hadoop.fs.Path)76 HashMap (java.util.HashMap)74 Configuration (org.apache.hadoop.conf.Configuration)74 IOException (java.io.IOException)72 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)68 Resource (org.apache.hadoop.yarn.api.records.Resource)67 ContainerStatus (org.apache.hadoop.yarn.api.records.ContainerStatus)66 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)61 NodeId (org.apache.hadoop.yarn.api.records.NodeId)59