Search in sources :

Example 26 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class TestNetworkedJob method testNetworkedJob.

/**
 * test JobConf 
 * @throws Exception
 */
@SuppressWarnings("deprecation")
@Test(timeout = 500000)
public void testNetworkedJob() throws Exception {
    // mock creation
    MiniMRClientCluster mr = null;
    FileSystem fileSys = null;
    try {
        mr = createMiniClusterWithCapacityScheduler();
        JobConf job = new JobConf(mr.getConfig());
        fileSys = FileSystem.get(job);
        fileSys.delete(testDir, true);
        FSDataOutputStream out = fileSys.create(inFile, true);
        out.writeBytes("This is a test file");
        out.close();
        FileInputFormat.setInputPaths(job, inFile);
        FileOutputFormat.setOutputPath(job, outDir);
        job.setInputFormat(TextInputFormat.class);
        job.setOutputFormat(TextOutputFormat.class);
        job.setMapperClass(IdentityMapper.class);
        job.setReducerClass(IdentityReducer.class);
        job.setNumReduceTasks(0);
        JobClient client = new JobClient(mr.getConfig());
        RunningJob rj = client.submitJob(job);
        JobID jobId = rj.getID();
        NetworkedJob runningJob = (NetworkedJob) client.getJob(jobId);
        runningJob.setJobPriority(JobPriority.HIGH.name());
        // test getters
        assertTrue(runningJob.getConfiguration().toString().endsWith("0001/job.xml"));
        assertEquals(jobId, runningJob.getID());
        assertEquals(jobId.toString(), runningJob.getJobID());
        assertEquals("N/A", runningJob.getJobName());
        assertTrue(runningJob.getJobFile().endsWith(".staging/" + runningJob.getJobID() + "/job.xml"));
        assertTrue(runningJob.getTrackingURL().length() > 0);
        assertTrue(runningJob.mapProgress() == 0.0f);
        assertTrue(runningJob.reduceProgress() == 0.0f);
        assertTrue(runningJob.cleanupProgress() == 0.0f);
        assertTrue(runningJob.setupProgress() == 0.0f);
        TaskCompletionEvent[] tce = runningJob.getTaskCompletionEvents(0);
        assertEquals(tce.length, 0);
        assertEquals("", runningJob.getHistoryUrl());
        assertFalse(runningJob.isRetired());
        assertEquals("", runningJob.getFailureInfo());
        assertEquals("N/A", runningJob.getJobStatus().getJobName());
        assertEquals(0, client.getMapTaskReports(jobId).length);
        try {
            client.getSetupTaskReports(jobId);
        } catch (YarnRuntimeException e) {
            assertEquals("Unrecognized task type: JOB_SETUP", e.getMessage());
        }
        try {
            client.getCleanupTaskReports(jobId);
        } catch (YarnRuntimeException e) {
            assertEquals("Unrecognized task type: JOB_CLEANUP", e.getMessage());
        }
        assertEquals(0, client.getReduceTaskReports(jobId).length);
        // test ClusterStatus
        ClusterStatus status = client.getClusterStatus(true);
        assertEquals(2, status.getActiveTrackerNames().size());
        // it method does not implemented and always return empty array or null;
        assertEquals(0, status.getBlacklistedTrackers());
        assertEquals(0, status.getBlacklistedTrackerNames().size());
        assertEquals(0, status.getBlackListedTrackersInfo().size());
        assertEquals(JobTrackerStatus.RUNNING, status.getJobTrackerStatus());
        assertEquals(1, status.getMapTasks());
        assertEquals(20, status.getMaxMapTasks());
        assertEquals(4, status.getMaxReduceTasks());
        assertEquals(0, status.getNumExcludedNodes());
        assertEquals(1, status.getReduceTasks());
        assertEquals(2, status.getTaskTrackers());
        assertEquals(0, status.getTTExpiryInterval());
        assertEquals(JobTrackerStatus.RUNNING, status.getJobTrackerStatus());
        assertEquals(0, status.getGraylistedTrackers());
        // test read and write
        ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
        status.write(new DataOutputStream(dataOut));
        ClusterStatus status2 = new ClusterStatus();
        status2.readFields(new DataInputStream(new ByteArrayInputStream(dataOut.toByteArray())));
        assertEquals(status.getActiveTrackerNames(), status2.getActiveTrackerNames());
        assertEquals(status.getBlackListedTrackersInfo(), status2.getBlackListedTrackersInfo());
        assertEquals(status.getMapTasks(), status2.getMapTasks());
        // test taskStatusfilter
        JobClient.setTaskOutputFilter(job, TaskStatusFilter.ALL);
        assertEquals(TaskStatusFilter.ALL, JobClient.getTaskOutputFilter(job));
        // runningJob.setJobPriority(JobPriority.HIGH.name());
        // test default map
        assertEquals(20, client.getDefaultMaps());
        assertEquals(4, client.getDefaultReduces());
        assertEquals("jobSubmitDir", client.getSystemDir().getName());
        // test queue information
        JobQueueInfo[] rootQueueInfo = client.getRootQueues();
        assertEquals(1, rootQueueInfo.length);
        assertEquals("default", rootQueueInfo[0].getQueueName());
        JobQueueInfo[] qinfo = client.getQueues();
        assertEquals(1, qinfo.length);
        assertEquals("default", qinfo[0].getQueueName());
        assertEquals(0, client.getChildQueues("default").length);
        assertEquals(1, client.getJobsFromQueue("default").length);
        assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith("/job.xml"));
        JobQueueInfo qi = client.getQueueInfo("default");
        assertEquals("default", qi.getQueueName());
        assertEquals("running", qi.getQueueState());
        QueueAclsInfo[] aai = client.getQueueAclsForCurrentUser();
        assertEquals(2, aai.length);
        assertEquals("root", aai[0].getQueueName());
        assertEquals("default", aai[1].getQueueName());
        // test JobClient
        // The following asserts read JobStatus twice and ensure the returned
        // JobStatus objects correspond to the same Job.
        assertEquals("Expected matching JobIDs", jobId, client.getJob(jobId).getJobStatus().getJobID());
        assertEquals("Expected matching startTimes", rj.getJobStatus().getStartTime(), client.getJob(jobId).getJobStatus().getStartTime());
    } finally {
        if (fileSys != null) {
            fileSys.delete(testDir, true);
        }
        if (mr != null) {
            mr.stop();
        }
    }
}
Also used : FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DataInputStream(java.io.DataInputStream) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ByteArrayInputStream(java.io.ByteArrayInputStream) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) NetworkedJob(org.apache.hadoop.mapred.JobClient.NetworkedJob) Test(org.junit.Test)

Example 27 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class WebServer method serviceStart.

@Override
protected void serviceStart() throws Exception {
    Configuration conf = getConfig();
    String bindAddress = WebAppUtils.getWebAppBindURL(conf, YarnConfiguration.NM_BIND_HOST, WebAppUtils.getNMWebAppURLWithoutScheme(conf));
    boolean enableCors = conf.getBoolean(YarnConfiguration.NM_WEBAPP_ENABLE_CORS_FILTER, YarnConfiguration.DEFAULT_NM_WEBAPP_ENABLE_CORS_FILTER);
    if (enableCors) {
        getConfig().setBoolean(HttpCrossOriginFilterInitializer.PREFIX + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
    }
    // Always load pseudo authentication filter to parse "user.name" in an URL
    // to identify a HTTP request's user.
    boolean hasHadoopAuthFilterInitializer = false;
    String filterInitializerConfKey = "hadoop.http.filter.initializers";
    Class<?>[] initializersClasses = conf.getClasses(filterInitializerConfKey);
    List<String> targets = new ArrayList<String>();
    if (initializersClasses != null) {
        for (Class<?> initializer : initializersClasses) {
            if (initializer.getName().equals(AuthenticationFilterInitializer.class.getName())) {
                hasHadoopAuthFilterInitializer = true;
                break;
            }
            targets.add(initializer.getName());
        }
    }
    if (!hasHadoopAuthFilterInitializer) {
        targets.add(AuthenticationFilterInitializer.class.getName());
        conf.set(filterInitializerConfKey, StringUtils.join(",", targets));
    }
    LOG.info("Instantiating NMWebApp at " + bindAddress);
    try {
        this.webApp = WebApps.$for("node", Context.class, this.nmContext, "ws").at(bindAddress).with(conf).withHttpSpnegoPrincipalKey(YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY).withHttpSpnegoKeytabKey(YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY).withCSRFProtection(YarnConfiguration.NM_CSRF_PREFIX).withXFSProtection(YarnConfiguration.NM_XFS_PREFIX).start(this.nmWebApp);
        this.port = this.webApp.httpServer().getConnectorAddress(0).getPort();
    } catch (Exception e) {
        String msg = "NMWebapps failed to start.";
        LOG.error(msg, e);
        throw new YarnRuntimeException(msg, e);
    }
    super.serviceStart();
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) AuthenticationFilterInitializer(org.apache.hadoop.security.AuthenticationFilterInitializer) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException)

Example 28 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class TestLocalDirsHandlerService method testValidPathsDirHandlerService.

@Test
public void testValidPathsDirHandlerService() throws Exception {
    Configuration conf = new YarnConfiguration();
    String localDir1 = new File("file:///" + testDir, "localDir1").getPath();
    String localDir2 = new File("hdfs:///" + testDir, "localDir2").getPath();
    conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
    String logDir1 = new File("file:///" + testDir, "logDir1").getPath();
    conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1);
    LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
    try {
        dirSvc.init(conf);
        Assert.fail("Service should have thrown an exception due to wrong URI");
    } catch (YarnRuntimeException e) {
    }
    Assert.assertEquals("Service should not be inited", STATE.STOPPED, dirSvc.getServiceState());
    dirSvc.close();
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) File(java.io.File) Test(org.junit.Test)

Example 29 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class LeveldbRMStateStore method createAttemptState.

private ApplicationAttemptStateData createAttemptState(String itemName, byte[] data) throws IOException {
    ApplicationAttemptId attemptId = ApplicationAttemptId.fromString(itemName);
    ApplicationAttemptStateDataPBImpl attemptState = new ApplicationAttemptStateDataPBImpl(ApplicationAttemptStateDataProto.parseFrom(data));
    if (!attemptId.equals(attemptState.getAttemptId())) {
        throw new YarnRuntimeException("The database entry for " + attemptId + " contains data for " + attemptState.getAttemptId());
    }
    return attemptState;
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ApplicationAttemptStateDataPBImpl(org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId)

Example 30 with YarnRuntimeException

use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.

the class MemoryRMStateStore method removeApplicationStateInternal.

@Override
public synchronized void removeApplicationStateInternal(ApplicationStateData appState) throws Exception {
    ApplicationId appId = appState.getApplicationSubmissionContext().getApplicationId();
    ApplicationStateData removed = state.appState.remove(appId);
    if (removed == null) {
        throw new YarnRuntimeException("Removing non-existing application state");
    }
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ApplicationStateData(org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Aggregations

YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)147 IOException (java.io.IOException)56 Configuration (org.apache.hadoop.conf.Configuration)38 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)28 Test (org.junit.Test)28 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)17 InetSocketAddress (java.net.InetSocketAddress)12 Path (org.apache.hadoop.fs.Path)12 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)9 InvocationTargetException (java.lang.reflect.InvocationTargetException)8 Server (org.apache.hadoop.ipc.Server)8 FileSystem (org.apache.hadoop.fs.FileSystem)7 FsPermission (org.apache.hadoop.fs.permission.FsPermission)7 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)7 FileNotFoundException (java.io.FileNotFoundException)6 ArrayList (java.util.ArrayList)6 HashMap (java.util.HashMap)6 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)6 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)6 ConnectException (java.net.ConnectException)5