Search in sources :

Example 51 with ApplicationAccessType

use of org.apache.hadoop.yarn.api.records.ApplicationAccessType in project hadoop by apache.

the class TestNMLeveldbStateStoreService method testUnexpectedKeyDoesntThrowException.

@Test
public void testUnexpectedKeyDoesntThrowException() throws IOException {
    // test empty when no state
    List<RecoveredContainerState> recoveredContainers = stateStore.loadContainersState();
    assertTrue(recoveredContainers.isEmpty());
    // create a container request
    ApplicationId appId = ApplicationId.newInstance(1234, 3);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 4);
    ContainerId containerId = ContainerId.newContainerId(appAttemptId, 5);
    LocalResource lrsrc = LocalResource.newInstance(URL.newInstance("hdfs", "somehost", 12345, "/some/path/to/rsrc"), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, 123L, 1234567890L);
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put("rsrc", lrsrc);
    Map<String, String> env = new HashMap<String, String>();
    env.put("somevar", "someval");
    List<String> containerCmds = new ArrayList<String>();
    containerCmds.add("somecmd");
    containerCmds.add("somearg");
    Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
    serviceData.put("someservice", ByteBuffer.wrap(new byte[] { 0x1, 0x2, 0x3 }));
    ByteBuffer containerTokens = ByteBuffer.wrap(new byte[] { 0x7, 0x8, 0x9, 0xa });
    Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>();
    acls.put(ApplicationAccessType.VIEW_APP, "viewuser");
    acls.put(ApplicationAccessType.MODIFY_APP, "moduser");
    ContainerLaunchContext clc = ContainerLaunchContext.newInstance(localResources, env, containerCmds, serviceData, containerTokens, acls);
    Resource containerRsrc = Resource.newInstance(1357, 3);
    ContainerTokenIdentifier containerTokenId = new ContainerTokenIdentifier(containerId, "host", "user", containerRsrc, 9876543210L, 42, 2468, Priority.newInstance(7), 13579);
    Token containerToken = Token.newInstance(containerTokenId.getBytes(), ContainerTokenIdentifier.KIND.toString(), "password".getBytes(), "tokenservice");
    StartContainerRequest containerReq = StartContainerRequest.newInstance(clc, containerToken);
    stateStore.storeContainer(containerId, 0, containerReq);
    // add a invalid key
    byte[] invalidKey = ("ContainerManager/containers/" + containerId.toString() + "/invalidKey1234").getBytes();
    stateStore.getDB().put(invalidKey, new byte[1]);
    restartStateStore();
    recoveredContainers = stateStore.loadContainersState();
    assertEquals(1, recoveredContainers.size());
    RecoveredContainerState rcs = recoveredContainers.get(0);
    assertEquals(RecoveredContainerStatus.REQUESTED, rcs.getStatus());
    assertEquals(ContainerExitStatus.INVALID, rcs.getExitCode());
    assertEquals(false, rcs.getKilled());
    assertEquals(containerReq, rcs.getStartRequest());
    assertTrue(rcs.getDiagnostics().isEmpty());
    assertEquals(RecoveredContainerType.KILL, rcs.getRecoveryType());
    // assert unknown keys are cleaned up finally
    assertNotNull(stateStore.getDB().get(invalidKey));
    stateStore.removeContainer(containerId);
    assertNull(stateStore.getDB().get(invalidKey));
}
Also used : HashMap(java.util.HashMap) RecoveredContainerState(org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerState) ArrayList(java.util.ArrayList) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) Token(org.apache.hadoop.yarn.api.records.Token) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ByteBuffer(java.nio.ByteBuffer) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 52 with ApplicationAccessType

use of org.apache.hadoop.yarn.api.records.ApplicationAccessType in project hadoop by apache.

the class TestTaskAttemptContainerRequest method testAttemptContainerRequest.

//WARNING: This test must be the only test in this file.  This is because
// there is an optimization where the credentials passed in are cached
// statically so they do not need to be recomputed when creating a new
// ContainerLaunchContext. if other tests run first this code will cache
// their credentials and this test will fail trying to look for the
// credentials it inserted in.
@Test
public void testAttemptContainerRequest() throws Exception {
    final Text SECRET_KEY_ALIAS = new Text("secretkeyalias");
    final byte[] SECRET_KEY = ("secretkey").getBytes();
    Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(1);
    acls.put(ApplicationAccessType.VIEW_APP, "otheruser");
    ApplicationId appId = ApplicationId.newInstance(1, 1);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    Path jobFile = mock(Path.class);
    EventHandler eventHandler = mock(EventHandler.class);
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
    JobConf jobConf = new JobConf();
    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    jobConf.setBoolean("fs.file.impl.disable.cache", true);
    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
    // setup UGI for security so tokens and keys are preserved
    jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(jobConf);
    Credentials credentials = new Credentials();
    credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY);
    Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>(("tokenid").getBytes(), ("tokenpw").getBytes(), new Text("tokenkind"), new Text("tokenservice"));
    TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, mock(TaskSplitMetaInfo.class), jobConf, taListener, jobToken, credentials, SystemClock.getInstance(), null);
    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString());
    ContainerLaunchContext launchCtx = TaskAttemptImpl.createContainerLaunchContext(acls, jobConf, jobToken, taImpl.createRemoteTask(), TypeConverter.fromYarn(jobId), mock(WrappedJvmID.class), taListener, credentials);
    Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs());
    Credentials launchCredentials = new Credentials();
    DataInputByteBuffer dibb = new DataInputByteBuffer();
    dibb.reset(launchCtx.getTokens());
    launchCredentials.readTokenStorageStream(dibb);
    // verify all tokens specified for the task attempt are in the launch context
    for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) {
        Token<? extends TokenIdentifier> launchToken = launchCredentials.getToken(token.getService());
        Assert.assertNotNull("Token " + token.getService() + " is missing", launchToken);
        Assert.assertEquals("Token " + token.getService() + " mismatch", token, launchToken);
    }
    // verify the secret key is in the launch context
    Assert.assertNotNull("Secret key missing", launchCredentials.getSecretKey(SECRET_KEY_ALIAS));
    Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY, launchCredentials.getSecretKey(SECRET_KEY_ALIAS)));
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) HashMap(java.util.HashMap) TaskAttemptListener(org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener) InetSocketAddress(java.net.InetSocketAddress) DataInputByteBuffer(org.apache.hadoop.io.DataInputByteBuffer) EventHandler(org.apache.hadoop.yarn.event.EventHandler) Token(org.apache.hadoop.security.token.Token) WrappedJvmID(org.apache.hadoop.mapred.WrappedJvmID) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Path(org.apache.hadoop.fs.Path) JobTokenIdentifier(org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) Text(org.apache.hadoop.io.Text) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) TaskSplitMetaInfo(org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 53 with ApplicationAccessType

use of org.apache.hadoop.yarn.api.records.ApplicationAccessType in project hadoop by apache.

the class AMSimulator method submitApp.

private void submitApp() throws YarnException, InterruptedException, IOException {
    // ask for new application
    GetNewApplicationRequest newAppRequest = Records.newRecord(GetNewApplicationRequest.class);
    GetNewApplicationResponse newAppResponse = rm.getClientRMService().getNewApplication(newAppRequest);
    appId = newAppResponse.getApplicationId();
    // submit the application
    final SubmitApplicationRequest subAppRequest = Records.newRecord(SubmitApplicationRequest.class);
    ApplicationSubmissionContext appSubContext = Records.newRecord(ApplicationSubmissionContext.class);
    appSubContext.setApplicationId(appId);
    appSubContext.setMaxAppAttempts(1);
    appSubContext.setQueue(queue);
    appSubContext.setPriority(Priority.newInstance(0));
    ContainerLaunchContext conLauContext = Records.newRecord(ContainerLaunchContext.class);
    conLauContext.setApplicationACLs(new HashMap<ApplicationAccessType, String>());
    conLauContext.setCommands(new ArrayList<String>());
    conLauContext.setEnvironment(new HashMap<String, String>());
    conLauContext.setLocalResources(new HashMap<String, LocalResource>());
    conLauContext.setServiceData(new HashMap<String, ByteBuffer>());
    appSubContext.setAMContainerSpec(conLauContext);
    appSubContext.setResource(Resources.createResource(MR_AM_CONTAINER_RESOURCE_MEMORY_MB, MR_AM_CONTAINER_RESOURCE_VCORES));
    subAppRequest.setApplicationSubmissionContext(appSubContext);
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
    ugi.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws YarnException, IOException {
            rm.getClientRMService().submitApplication(subAppRequest);
            return null;
        }
    });
    LOG.info(MessageFormat.format("Submit a new application {0}", appId));
}
Also used : GetNewApplicationResponse(org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) SubmitApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) GetNewApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 54 with ApplicationAccessType

use of org.apache.hadoop.yarn.api.records.ApplicationAccessType in project hadoop by apache.

the class BaseAMRMProxyE2ETest method createApp.

protected ApplicationAttemptId createApp(YarnClient yarnClient, MiniYARNCluster yarnCluster, Configuration conf) throws Exception {
    ApplicationSubmissionContext appContext = yarnClient.createApplication().getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    appContext.setApplicationName("Test");
    Priority pri = Records.newRecord(Priority.class);
    pri.setPriority(0);
    appContext.setPriority(pri);
    appContext.setQueue("default");
    ContainerLaunchContext amContainer = BuilderUtils.newContainerLaunchContext(Collections.<String, LocalResource>emptyMap(), new HashMap<String, String>(), Arrays.asList("sleep", "10000"), new HashMap<String, ByteBuffer>(), null, new HashMap<ApplicationAccessType, String>());
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(Resource.newInstance(1024, 1));
    SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class);
    appRequest.setApplicationSubmissionContext(appContext);
    yarnClient.submitApplication(appContext);
    RMAppAttempt appAttempt = null;
    ApplicationAttemptId attemptId = null;
    while (true) {
        ApplicationReport appReport = yarnClient.getApplicationReport(appId);
        if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
            attemptId = appReport.getCurrentApplicationAttemptId();
            appAttempt = yarnCluster.getResourceManager().getRMContext().getRMApps().get(attemptId.getApplicationId()).getCurrentAppAttempt();
            while (true) {
                if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
                    break;
                }
            }
            break;
        }
    }
    Thread.sleep(1000);
    // Just dig into the ResourceManager and get the AMRMToken just for the sake
    // of testing.
    UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
    // emulate RM setup of AMRM token in credentials by adding the token
    // *before* setting the token service
    UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
    appAttempt.getAMRMToken().setService(ClientRMProxy.getAMRMTokenService(conf));
    return attemptId;
}
Also used : RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) Priority(org.apache.hadoop.yarn.api.records.Priority) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ByteBuffer(java.nio.ByteBuffer) SubmitApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Example 55 with ApplicationAccessType

use of org.apache.hadoop.yarn.api.records.ApplicationAccessType in project hadoop by apache.

the class TestYarnClient method testApplicationTypeLimit.

@Test(timeout = 30000)
public void testApplicationTypeLimit() throws Exception {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.DEBUG);
    MockRM rm = new MockRM();
    rm.start();
    RMApp app1 = rm.submitApp(200, "name", "user", new HashMap<ApplicationAccessType, String>(), false, "default", -1, null, "MAPREDUCE-LENGTH-IS-20");
    Assert.assertEquals("MAPREDUCE-LENGTH-IS-", app1.getApplicationType());
    rm.stop();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) Logger(org.apache.log4j.Logger) Test(org.junit.Test)

Aggregations

ApplicationAccessType (org.apache.hadoop.yarn.api.records.ApplicationAccessType)64 Test (org.junit.Test)32 HashMap (java.util.HashMap)27 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)24 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)23 Credentials (org.apache.hadoop.security.Credentials)19 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)19 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)18 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)17 ByteBuffer (java.nio.ByteBuffer)14 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)14 ArrayList (java.util.ArrayList)13 RMAppAttempt (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt)12 MockRM (org.apache.hadoop.yarn.server.resourcemanager.MockRM)11 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)10 MockNM (org.apache.hadoop.yarn.server.resourcemanager.MockNM)10 Path (org.apache.hadoop.fs.Path)9 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)9 ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)9 MemoryRMStateStore (org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore)9