Search in sources :

Example 56 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestQueueStateManager method getMockApplication.

private FiCaSchedulerApp getMockApplication(ApplicationId appId, String user, Resource amResource) {
    FiCaSchedulerApp application = mock(FiCaSchedulerApp.class);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    doReturn(applicationAttemptId.getApplicationId()).when(application).getApplicationId();
    doReturn(applicationAttemptId).when(application).getApplicationAttemptId();
    doReturn(user).when(application).getUser();
    doReturn(amResource).when(application).getAMResource();
    doReturn(Priority.newInstance(0)).when(application).getPriority();
    doReturn(CommonNodeLabelsManager.NO_LABEL).when(application).getAppAMNodePartitionName();
    doReturn(amResource).when(application).getAMResource(CommonNodeLabelsManager.NO_LABEL);
    when(application.compareInputOrderTo(any(FiCaSchedulerApp.class))).thenCallRealMethod();
    return application;
}
Also used : FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId)

Example 57 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestTimelineServiceClientIntegration method testPutExtendedEntities.

@Test
public void testPutExtendedEntities() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(0, 1);
    TimelineV2Client client = TimelineV2Client.createTimelineClient(appId);
    try {
        // set the timeline service address manually
        client.setTimelineServiceAddress(collectorManager.getRestServerBindAddress());
        client.init(conf);
        client.start();
        ClusterEntity cluster = new ClusterEntity();
        cluster.setId(YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
        FlowRunEntity flow = new FlowRunEntity();
        flow.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
        flow.setName("test_flow_name");
        flow.setVersion("test_flow_version");
        flow.setRunId(1L);
        flow.setParent(cluster.getType(), cluster.getId());
        ApplicationEntity app = new ApplicationEntity();
        app.setId(appId.toString());
        flow.addChild(app.getType(), app.getId());
        ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
        ApplicationAttemptEntity appAttempt = new ApplicationAttemptEntity();
        appAttempt.setId(attemptId.toString());
        ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
        ContainerEntity container = new ContainerEntity();
        container.setId(containerId.toString());
        UserEntity user = new UserEntity();
        user.setId(UserGroupInformation.getCurrentUser().getShortUserName());
        QueueEntity queue = new QueueEntity();
        queue.setId("default_queue");
        client.putEntities(cluster, flow, app, appAttempt, container, user, queue);
        client.putEntitiesAsync(cluster, flow, app, appAttempt, container, user, queue);
    } finally {
        client.stop();
    }
}
Also used : ApplicationAttemptEntity(org.apache.hadoop.yarn.api.records.timelineservice.ApplicationAttemptEntity) TimelineV2Client(org.apache.hadoop.yarn.client.api.TimelineV2Client) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ContainerEntity(org.apache.hadoop.yarn.api.records.timelineservice.ContainerEntity) ApplicationEntity(org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity) FlowRunEntity(org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity) QueueEntity(org.apache.hadoop.yarn.api.records.timelineservice.QueueEntity) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) UserEntity(org.apache.hadoop.yarn.api.records.timelineservice.UserEntity) ClusterEntity(org.apache.hadoop.yarn.api.records.timelineservice.ClusterEntity) Test(org.junit.Test)

Example 58 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class NMTokenSecretManagerInNM method recover.

public synchronized void recover() throws IOException {
    RecoveredNMTokensState state = stateStore.loadNMTokensState();
    MasterKey key = state.getCurrentMasterKey();
    if (key != null) {
        super.currentMasterKey = new MasterKeyData(key, createSecretKey(key.getBytes().array()));
    }
    key = state.getPreviousMasterKey();
    if (key != null) {
        previousMasterKey = new MasterKeyData(key, createSecretKey(key.getBytes().array()));
    }
    // restore the serial number from the current master key
    if (super.currentMasterKey != null) {
        super.serialNo = super.currentMasterKey.getMasterKey().getKeyId() + 1;
    }
    for (Map.Entry<ApplicationAttemptId, MasterKey> entry : state.getApplicationMasterKeys().entrySet()) {
        key = entry.getValue();
        oldMasterKeys.put(entry.getKey(), new MasterKeyData(key, createSecretKey(key.getBytes().array())));
    }
    // reconstruct app to app attempts map
    appToAppAttemptMap.clear();
    for (ApplicationAttemptId attempt : oldMasterKeys.keySet()) {
        ApplicationId app = attempt.getApplicationId();
        List<ApplicationAttemptId> attempts = appToAppAttemptMap.get(app);
        if (attempts == null) {
            attempts = new ArrayList<ApplicationAttemptId>();
            appToAppAttemptMap.put(app, attempts);
        }
        attempts.add(attempt);
    }
}
Also used : RecoveredNMTokensState(org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokensState) MasterKey(org.apache.hadoop.yarn.server.api.records.MasterKey) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) HashMap(java.util.HashMap) Map(java.util.Map) MasterKeyData(org.apache.hadoop.yarn.server.security.MasterKeyData)

Example 59 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class NMLeveldbStateStoreService method loadNMTokensState.

@Override
public RecoveredNMTokensState loadNMTokensState() throws IOException {
    RecoveredNMTokensState state = new RecoveredNMTokensState();
    state.applicationMasterKeys = new HashMap<ApplicationAttemptId, MasterKey>();
    LeveldbIterator iter = null;
    try {
        iter = new LeveldbIterator(db);
        iter.seek(bytes(NM_TOKENS_KEY_PREFIX));
        while (iter.hasNext()) {
            Entry<byte[], byte[]> entry = iter.next();
            String fullKey = asString(entry.getKey());
            if (!fullKey.startsWith(NM_TOKENS_KEY_PREFIX)) {
                break;
            }
            String key = fullKey.substring(NM_TOKENS_KEY_PREFIX.length());
            if (key.equals(CURRENT_MASTER_KEY_SUFFIX)) {
                state.currentMasterKey = parseMasterKey(entry.getValue());
            } else if (key.equals(PREV_MASTER_KEY_SUFFIX)) {
                state.previousMasterKey = parseMasterKey(entry.getValue());
            } else if (key.startsWith(ApplicationAttemptId.appAttemptIdStrPrefix)) {
                ApplicationAttemptId attempt;
                try {
                    attempt = ApplicationAttemptId.fromString(key);
                } catch (IllegalArgumentException e) {
                    throw new IOException("Bad application master key state for " + fullKey, e);
                }
                state.applicationMasterKeys.put(attempt, parseMasterKey(entry.getValue()));
            }
        }
    } catch (DBException e) {
        throw new IOException(e);
    } finally {
        if (iter != null) {
            iter.close();
        }
    }
    return state;
}
Also used : DBException(org.iq80.leveldb.DBException) LeveldbIterator(org.apache.hadoop.yarn.server.utils.LeveldbIterator) MasterKey(org.apache.hadoop.yarn.server.api.records.MasterKey) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) JniDBFactory.asString(org.fusesource.leveldbjni.JniDBFactory.asString) IOException(java.io.IOException)

Example 60 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class NMTokenSecretManagerInNM method retrievePassword.

/**
   * This method will be used to verify NMTokens generated by different master
   * keys.
   */
@Override
public synchronized byte[] retrievePassword(NMTokenIdentifier identifier) throws InvalidToken {
    int keyId = identifier.getKeyId();
    ApplicationAttemptId appAttemptId = identifier.getApplicationAttemptId();
    /*
     * MasterKey used for retrieving password will be as follows. 1) By default
     * older saved master key will be used. 2) If identifier's master key id
     * matches that of previous master key id then previous key will be used. 3)
     * If identifier's master key id matches that of current master key id then
     * current key will be used.
     */
    MasterKeyData oldMasterKey = oldMasterKeys.get(appAttemptId);
    MasterKeyData masterKeyToUse = oldMasterKey;
    if (previousMasterKey != null && keyId == previousMasterKey.getMasterKey().getKeyId()) {
        masterKeyToUse = previousMasterKey;
    } else if (keyId == currentMasterKey.getMasterKey().getKeyId()) {
        masterKeyToUse = currentMasterKey;
    }
    if (nodeId != null && !identifier.getNodeId().equals(nodeId)) {
        throw new InvalidToken("Given NMToken for application : " + appAttemptId.toString() + " is not valid for current node manager." + "expected : " + nodeId.toString() + " found : " + identifier.getNodeId().toString());
    }
    if (masterKeyToUse != null) {
        byte[] password = retrivePasswordInternal(identifier, masterKeyToUse);
        LOG.debug("NMToken password retrieved successfully!!");
        return password;
    }
    throw new InvalidToken("Given NMToken for application : " + appAttemptId.toString() + " seems to have been generated illegally.");
}
Also used : ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) MasterKeyData(org.apache.hadoop.yarn.server.security.MasterKeyData)

Aggregations

ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)514 Test (org.junit.Test)362 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)222 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)170 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)109 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)104 Configuration (org.apache.hadoop.conf.Configuration)87 Resource (org.apache.hadoop.yarn.api.records.Resource)82 ArrayList (java.util.ArrayList)75 NodeId (org.apache.hadoop.yarn.api.records.NodeId)74 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)65 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)63 NodeUpdateSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)60 Path (org.apache.hadoop.fs.Path)55 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)53 Priority (org.apache.hadoop.yarn.api.records.Priority)52 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)51 Container (org.apache.hadoop.yarn.api.records.Container)50 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)49 HashMap (java.util.HashMap)42