Search in sources :

Example 6 with YarnConfiguration

use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.

the class TestClientServiceDelegate method testNoRetryOnAMAuthorizationException.

@Test
public void testNoRetryOnAMAuthorizationException() throws Exception {
    if (!isAMReachableFromClient) {
        return;
    }
    ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
    when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(getRunningApplicationReport("am1", 78));
    // throw authorization exception on first invocation
    final MRClientProtocol amProxy = mock(MRClientProtocol.class);
    when(amProxy.getJobReport(any(GetJobReportRequest.class))).thenThrow(new AuthorizationException("Denied"));
    Configuration conf = new YarnConfiguration();
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
    conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, !isAMReachableFromClient);
    ClientServiceDelegate clientServiceDelegate = new ClientServiceDelegate(conf, rm, oldJobId, null) {

        @Override
        MRClientProtocol instantiateAMProxy(final InetSocketAddress serviceAddr) throws IOException {
            super.instantiateAMProxy(serviceAddr);
            return amProxy;
        }
    };
    try {
        clientServiceDelegate.getJobStatus(oldJobId);
        Assert.fail("Exception should be thrown upon AuthorizationException");
    } catch (IOException e) {
        Assert.assertEquals(AuthorizationException.class.getName() + ": Denied", e.getMessage());
    }
    // assert maxClientRetry is not decremented.
    Assert.assertEquals(conf.getInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES), clientServiceDelegate.getMaxClientRetry());
    verify(amProxy, times(1)).getJobReport(any(GetJobReportRequest.class));
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) InetSocketAddress(java.net.InetSocketAddress) IOException(java.io.IOException) GetJobReportRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest) MRClientProtocol(org.apache.hadoop.mapreduce.v2.api.MRClientProtocol) Test(org.junit.Test)

Example 7 with YarnConfiguration

use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.

the class TestHadoopArchiveLogsRunner method testHadoopArchiveLogs.

@Test(timeout = 50000)
public void testHadoopArchiveLogs() throws Exception {
    MiniDFSCluster dfsCluster = null;
    FileSystem fs = null;
    try (MiniYARNCluster yarnCluster = new MiniYARNCluster(TestHadoopArchiveLogsRunner.class.getSimpleName(), 1, 2, 1, 1)) {
        Configuration conf = new YarnConfiguration();
        conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
        conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
        yarnCluster.init(conf);
        yarnCluster.start();
        conf = yarnCluster.getConfig();
        dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        conf = new JobConf(conf);
        ApplicationId app1 = ApplicationId.newInstance(System.currentTimeMillis(), 1);
        fs = FileSystem.get(conf);
        Path remoteRootLogDir = new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
        Path workingDir = new Path(remoteRootLogDir, "archive-logs-work");
        String suffix = "logs";
        Path logDir = new Path(remoteRootLogDir, new Path(System.getProperty("user.name"), suffix));
        fs.mkdirs(logDir);
        Path app1Path = new Path(logDir, app1.toString());
        fs.mkdirs(app1Path);
        createFile(fs, new Path(app1Path, "log1"), 3);
        createFile(fs, new Path(app1Path, "log2"), 4);
        createFile(fs, new Path(app1Path, "log3"), 2);
        FileStatus[] app1Files = fs.listStatus(app1Path);
        Assert.assertEquals(3, app1Files.length);
        String[] args = new String[] { "-appId", app1.toString(), "-user", System.getProperty("user.name"), "-workingDir", workingDir.toString(), "-remoteRootLogDir", remoteRootLogDir.toString(), "-suffix", suffix };
        final HadoopArchiveLogsRunner halr = new HadoopArchiveLogsRunner(conf);
        assertEquals(0, ToolRunner.run(halr, args));
        fs = FileSystem.get(conf);
        app1Files = fs.listStatus(app1Path);
        Assert.assertEquals(1, app1Files.length);
        FileStatus harFile = app1Files[0];
        Assert.assertEquals(app1.toString() + ".har", harFile.getPath().getName());
        Path harPath = new Path("har:///" + harFile.getPath().toUri().getRawPath());
        FileStatus[] harLogs = HarFs.get(harPath.toUri(), conf).listStatus(harPath);
        Assert.assertEquals(3, harLogs.length);
        Arrays.sort(harLogs, new Comparator<FileStatus>() {

            @Override
            public int compare(FileStatus o1, FileStatus o2) {
                return o1.getPath().getName().compareTo(o2.getPath().getName());
            }
        });
        Assert.assertEquals("log1", harLogs[0].getPath().getName());
        Assert.assertEquals(3 * FILE_SIZE_INCREMENT, harLogs[0].getLen());
        Assert.assertEquals(new FsPermission(FsAction.READ_WRITE, FsAction.READ, FsAction.NONE), harLogs[0].getPermission());
        Assert.assertEquals(System.getProperty("user.name"), harLogs[0].getOwner());
        Assert.assertEquals("log2", harLogs[1].getPath().getName());
        Assert.assertEquals(4 * FILE_SIZE_INCREMENT, harLogs[1].getLen());
        Assert.assertEquals(new FsPermission(FsAction.READ_WRITE, FsAction.READ, FsAction.NONE), harLogs[1].getPermission());
        Assert.assertEquals(System.getProperty("user.name"), harLogs[1].getOwner());
        Assert.assertEquals("log3", harLogs[2].getPath().getName());
        Assert.assertEquals(2 * FILE_SIZE_INCREMENT, harLogs[2].getLen());
        Assert.assertEquals(new FsPermission(FsAction.READ_WRITE, FsAction.READ, FsAction.NONE), harLogs[2].getPermission());
        Assert.assertEquals(System.getProperty("user.name"), harLogs[2].getOwner());
        Assert.assertEquals(0, fs.listStatus(workingDir).length);
    } finally {
        if (fs != null) {
            fs.close();
        }
        if (dfsCluster != null) {
            dfsCluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileStatus(org.apache.hadoop.fs.FileStatus) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JobConf(org.apache.hadoop.mapred.JobConf) Test(org.junit.Test)

Example 8 with YarnConfiguration

use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.

the class SLSRunner method startRM.

private void startRM() throws IOException, ClassNotFoundException {
    Configuration rmConf = new YarnConfiguration();
    String schedulerClass = rmConf.get(YarnConfiguration.RM_SCHEDULER);
    // exercise/track behaviors that are not common to the scheduler api
    if (Class.forName(schedulerClass) == CapacityScheduler.class) {
        rmConf.set(YarnConfiguration.RM_SCHEDULER, SLSCapacityScheduler.class.getName());
    } else {
        rmConf.set(YarnConfiguration.RM_SCHEDULER, ResourceSchedulerWrapper.class.getName());
        rmConf.set(SLSConfiguration.RM_SCHEDULER, schedulerClass);
    }
    rmConf.set(SLSConfiguration.METRICS_OUTPUT_DIR, metricsOutputDir);
    final SLSRunner se = this;
    rm = new ResourceManager() {

        @Override
        protected ApplicationMasterLauncher createAMLauncher() {
            return new MockAMLauncher(se, this.rmContext, amMap);
        }
    };
    rm.init(rmConf);
    rm.start();
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) SLSConfiguration(org.apache.hadoop.yarn.sls.conf.SLSConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MockAMLauncher(org.apache.hadoop.yarn.sls.resourcemanager.MockAMLauncher) ApplicationMasterLauncher(org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher) ResourceManager(org.apache.hadoop.yarn.server.resourcemanager.ResourceManager) ResourceSchedulerWrapper(org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper) SLSCapacityScheduler(org.apache.hadoop.yarn.sls.scheduler.SLSCapacityScheduler)

Example 9 with YarnConfiguration

use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.

the class TestOpportunisticContainerAllocation method setup.

@BeforeClass
public static void setup() throws Exception {
    // start minicluster
    conf = new YarnConfiguration();
    conf.setLong(YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, ROLLING_INTERVAL_SEC);
    conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, AM_EXPIRE_MS);
    conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1000);
    // set the minimum allocation so that resource decrease can go under 1024
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
    conf.setBoolean(YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
    conf.setInt(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 10);
    conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
    yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
    yarnCluster.init(conf);
    yarnCluster.start();
    // start rm client
    yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();
    // get node info
    nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    priority = Priority.newInstance(1);
    priority2 = Priority.newInstance(2);
    priority3 = Priority.newInstance(3);
    priority4 = Priority.newInstance(4);
    capability = Resource.newInstance(512, 1);
    node = nodeReports.get(0).getNodeId().getHost();
    rack = nodeReports.get(0).getRackName();
    nodes = new String[] { node };
    racks = new String[] { rack };
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) BeforeClass(org.junit.BeforeClass)

Example 10 with YarnConfiguration

use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.

the class TestNMClient method setup.

@Before
public void setup() throws YarnException, IOException {
    // start minicluster
    conf = new YarnConfiguration();
    yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
    yarnCluster.init(conf);
    yarnCluster.start();
    assertNotNull(yarnCluster);
    assertEquals(STATE.STARTED, yarnCluster.getServiceState());
    // start rm client
    yarnClient = (YarnClientImpl) YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();
    assertNotNull(yarnClient);
    assertEquals(STATE.STARTED, yarnClient.getServiceState());
    // get node info
    nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    // submit new app
    ApplicationSubmissionContext appContext = yarnClient.createApplication().getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    // set the application name
    appContext.setApplicationName("Test");
    // Set the priority for the application master
    Priority pri = Priority.newInstance(0);
    appContext.setPriority(pri);
    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue("default");
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
    appContext.setAMContainerSpec(amContainer);
    // unmanaged AM
    appContext.setUnmanagedAM(true);
    // Create the request to send to the applications manager
    SubmitApplicationRequest appRequest = Records.newRecord(SubmitApplicationRequest.class);
    appRequest.setApplicationSubmissionContext(appContext);
    // Submit the application to the applications manager
    yarnClient.submitApplication(appContext);
    // wait for app to start
    int iterationsLeft = 30;
    RMAppAttempt appAttempt = null;
    while (iterationsLeft > 0) {
        ApplicationReport appReport = yarnClient.getApplicationReport(appId);
        if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
            attemptId = appReport.getCurrentApplicationAttemptId();
            appAttempt = yarnCluster.getResourceManager().getRMContext().getRMApps().get(attemptId.getApplicationId()).getCurrentAppAttempt();
            while (true) {
                if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
                    break;
                }
            }
            break;
        }
        sleep(1000);
        --iterationsLeft;
    }
    if (iterationsLeft == 0) {
        fail("Application hasn't bee started");
    }
    // Just dig into the ResourceManager and get the AMRMToken just for the sake
    // of testing.
    UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
    UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
    //creating an instance NMTokenCase
    nmTokenCache = new NMTokenCache();
    // start am rm client
    rmClient = (AMRMClientImpl<ContainerRequest>) AMRMClient.<ContainerRequest>createAMRMClient();
    //setting an instance NMTokenCase
    rmClient.setNMTokenCache(nmTokenCache);
    rmClient.init(conf);
    rmClient.start();
    assertNotNull(rmClient);
    assertEquals(STATE.STARTED, rmClient.getServiceState());
    // start am nm client
    nmClient = (NMClientImpl) NMClient.createNMClient();
    //propagating the AMRMClient NMTokenCache instance
    nmClient.setNMTokenCache(rmClient.getNMTokenCache());
    nmClient.init(conf);
    nmClient.start();
    assertNotNull(nmClient);
    assertEquals(STATE.STARTED, nmClient.getServiceState());
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Priority(org.apache.hadoop.yarn.api.records.Priority) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) ContainerRequest(org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) NMTokenCache(org.apache.hadoop.yarn.client.api.NMTokenCache) SubmitApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) Before(org.junit.Before)

Aggregations

YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)477 Test (org.junit.Test)300 Configuration (org.apache.hadoop.conf.Configuration)179 Before (org.junit.Before)65 IOException (java.io.IOException)63 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)51 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)48 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)47 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)41 Path (org.apache.hadoop.fs.Path)39 MockRM (org.apache.hadoop.yarn.server.resourcemanager.MockRM)38 File (java.io.File)37 ArrayList (java.util.ArrayList)28 HashMap (java.util.HashMap)27 RMContainerTokenSecretManager (org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager)27 MockNM (org.apache.hadoop.yarn.server.resourcemanager.MockNM)26 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)25 YarnClient (org.apache.hadoop.yarn.client.api.YarnClient)24 YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)24 RMContextImpl (org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl)24