Search in sources :

Example 1 with NetworkedJob

use of org.apache.hadoop.mapred.JobClient.NetworkedJob in project hadoop by apache.

the class TestNetworkedJob method testNetworkedJob.

/**
 * test JobConf 
 * @throws Exception
 */
@SuppressWarnings("deprecation")
@Test(timeout = 500000)
public void testNetworkedJob() throws Exception {
    // mock creation
    MiniMRClientCluster mr = null;
    FileSystem fileSys = null;
    try {
        mr = createMiniClusterWithCapacityScheduler();
        JobConf job = new JobConf(mr.getConfig());
        fileSys = FileSystem.get(job);
        fileSys.delete(testDir, true);
        FSDataOutputStream out = fileSys.create(inFile, true);
        out.writeBytes("This is a test file");
        out.close();
        FileInputFormat.setInputPaths(job, inFile);
        FileOutputFormat.setOutputPath(job, outDir);
        job.setInputFormat(TextInputFormat.class);
        job.setOutputFormat(TextOutputFormat.class);
        job.setMapperClass(IdentityMapper.class);
        job.setReducerClass(IdentityReducer.class);
        job.setNumReduceTasks(0);
        JobClient client = new JobClient(mr.getConfig());
        RunningJob rj = client.submitJob(job);
        JobID jobId = rj.getID();
        NetworkedJob runningJob = (NetworkedJob) client.getJob(jobId);
        runningJob.setJobPriority(JobPriority.HIGH.name());
        // test getters
        assertTrue(runningJob.getConfiguration().toString().endsWith("0001/job.xml"));
        assertEquals(jobId, runningJob.getID());
        assertEquals(jobId.toString(), runningJob.getJobID());
        assertEquals("N/A", runningJob.getJobName());
        assertTrue(runningJob.getJobFile().endsWith(".staging/" + runningJob.getJobID() + "/job.xml"));
        assertTrue(runningJob.getTrackingURL().length() > 0);
        assertTrue(runningJob.mapProgress() == 0.0f);
        assertTrue(runningJob.reduceProgress() == 0.0f);
        assertTrue(runningJob.cleanupProgress() == 0.0f);
        assertTrue(runningJob.setupProgress() == 0.0f);
        TaskCompletionEvent[] tce = runningJob.getTaskCompletionEvents(0);
        assertEquals(tce.length, 0);
        assertEquals("", runningJob.getHistoryUrl());
        assertFalse(runningJob.isRetired());
        assertEquals("", runningJob.getFailureInfo());
        assertEquals("N/A", runningJob.getJobStatus().getJobName());
        assertEquals(0, client.getMapTaskReports(jobId).length);
        try {
            client.getSetupTaskReports(jobId);
        } catch (YarnRuntimeException e) {
            assertEquals("Unrecognized task type: JOB_SETUP", e.getMessage());
        }
        try {
            client.getCleanupTaskReports(jobId);
        } catch (YarnRuntimeException e) {
            assertEquals("Unrecognized task type: JOB_CLEANUP", e.getMessage());
        }
        assertEquals(0, client.getReduceTaskReports(jobId).length);
        // test ClusterStatus
        ClusterStatus status = client.getClusterStatus(true);
        assertEquals(2, status.getActiveTrackerNames().size());
        // it method does not implemented and always return empty array or null;
        assertEquals(0, status.getBlacklistedTrackers());
        assertEquals(0, status.getBlacklistedTrackerNames().size());
        assertEquals(0, status.getBlackListedTrackersInfo().size());
        assertEquals(JobTrackerStatus.RUNNING, status.getJobTrackerStatus());
        assertEquals(1, status.getMapTasks());
        assertEquals(20, status.getMaxMapTasks());
        assertEquals(4, status.getMaxReduceTasks());
        assertEquals(0, status.getNumExcludedNodes());
        assertEquals(1, status.getReduceTasks());
        assertEquals(2, status.getTaskTrackers());
        assertEquals(0, status.getTTExpiryInterval());
        assertEquals(JobTrackerStatus.RUNNING, status.getJobTrackerStatus());
        assertEquals(0, status.getGraylistedTrackers());
        // test read and write
        ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
        status.write(new DataOutputStream(dataOut));
        ClusterStatus status2 = new ClusterStatus();
        status2.readFields(new DataInputStream(new ByteArrayInputStream(dataOut.toByteArray())));
        assertEquals(status.getActiveTrackerNames(), status2.getActiveTrackerNames());
        assertEquals(status.getBlackListedTrackersInfo(), status2.getBlackListedTrackersInfo());
        assertEquals(status.getMapTasks(), status2.getMapTasks());
        // test taskStatusfilter
        JobClient.setTaskOutputFilter(job, TaskStatusFilter.ALL);
        assertEquals(TaskStatusFilter.ALL, JobClient.getTaskOutputFilter(job));
        // runningJob.setJobPriority(JobPriority.HIGH.name());
        // test default map
        assertEquals(20, client.getDefaultMaps());
        assertEquals(4, client.getDefaultReduces());
        assertEquals("jobSubmitDir", client.getSystemDir().getName());
        // test queue information
        JobQueueInfo[] rootQueueInfo = client.getRootQueues();
        assertEquals(1, rootQueueInfo.length);
        assertEquals("default", rootQueueInfo[0].getQueueName());
        JobQueueInfo[] qinfo = client.getQueues();
        assertEquals(1, qinfo.length);
        assertEquals("default", qinfo[0].getQueueName());
        assertEquals(0, client.getChildQueues("default").length);
        assertEquals(1, client.getJobsFromQueue("default").length);
        assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith("/job.xml"));
        JobQueueInfo qi = client.getQueueInfo("default");
        assertEquals("default", qi.getQueueName());
        assertEquals("running", qi.getQueueState());
        QueueAclsInfo[] aai = client.getQueueAclsForCurrentUser();
        assertEquals(2, aai.length);
        assertEquals("root", aai[0].getQueueName());
        assertEquals("default", aai[1].getQueueName());
        // test JobClient
        // The following asserts read JobStatus twice and ensure the returned
        // JobStatus objects correspond to the same Job.
        assertEquals("Expected matching JobIDs", jobId, client.getJob(jobId).getJobStatus().getJobID());
        assertEquals("Expected matching startTimes", rj.getJobStatus().getStartTime(), client.getJob(jobId).getJobStatus().getStartTime());
    } finally {
        if (fileSys != null) {
            fileSys.delete(testDir, true);
        }
        if (mr != null) {
            mr.stop();
        }
    }
}
Also used : FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DataInputStream(java.io.DataInputStream) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ByteArrayInputStream(java.io.ByteArrayInputStream) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) NetworkedJob(org.apache.hadoop.mapred.JobClient.NetworkedJob) Test(org.junit.Test)

Aggregations

ByteArrayInputStream (java.io.ByteArrayInputStream)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 DataInputStream (java.io.DataInputStream)1 DataOutputStream (java.io.DataOutputStream)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 NetworkedJob (org.apache.hadoop.mapred.JobClient.NetworkedJob)1 YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)1 Test (org.junit.Test)1