Search in sources :

Example 11 with Counters

use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.

the class JHEventHandlerForSigtermTest method testProcessDoneFilesOnLastAMRetry.

// In case of all types of events, process Done files if it's last AM retry
@Test(timeout = 50000)
public void testProcessDoneFilesOnLastAMRetry() throws Exception {
    TestParams t = new TestParams(true);
    Configuration conf = new Configuration();
    JHEvenHandlerForTest realJheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
    JHEvenHandlerForTest jheh = spy(realJheh);
    jheh.init(conf);
    EventWriter mockWriter = null;
    try {
        jheh.start();
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
        verify(jheh, times(0)).processDoneFiles(any(JobId.class));
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, JobStateInternal.ERROR.toString())));
        verify(jheh, times(1)).processDoneFiles(any(JobId.class));
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), new Counters(), new Counters())));
        verify(jheh, times(2)).processDoneFiles(any(JobId.class));
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, JobStateInternal.FAILED.toString())));
        verify(jheh, times(3)).processDoneFiles(any(JobId.class));
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, JobStateInternal.KILLED.toString())));
        verify(jheh, times(4)).processDoneFiles(any(JobId.class));
        mockWriter = jheh.getEventWriter();
        verify(mockWriter, times(5)).write(any(HistoryEvent.class));
    } finally {
        jheh.stop();
        verify(mockWriter).close();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Counters(org.apache.hadoop.mapreduce.Counters) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 12 with Counters

use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.

the class JHEventHandlerForSigtermTest method testDefaultFsIsUsedForHistory.

@Test(timeout = 50000)
public void testDefaultFsIsUsedForHistory() throws Exception {
    // Create default configuration pointing to the minicluster
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, dfsCluster.getURI().toString());
    FileOutputStream os = new FileOutputStream(coreSitePath);
    conf.writeXml(os);
    os.close();
    // simulate execution under a non-default namenode
    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "file:///");
    TestParams t = new TestParams();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir);
    JHEvenHandlerForTest realJheh = new JHEvenHandlerForTest(t.mockAppContext, 0, false);
    JHEvenHandlerForTest jheh = spy(realJheh);
    jheh.init(conf);
    try {
        jheh.start();
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), new Counters(), new Counters())));
        // If we got here then event handler worked but we don't know with which
        // file system. Now we check that history stuff was written to minicluster
        FileSystem dfsFileSystem = dfsCluster.getFileSystem();
        assertTrue("Minicluster contains some history files", dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
        FileSystem localFileSystem = LocalFileSystem.get(conf);
        assertFalse("No history directory on non-default file system", localFileSystem.exists(new Path(t.dfsWorkDir)));
    } finally {
        jheh.stop();
        purgeHdfsHistoryIntermediateDoneDirectory(conf);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FileOutputStream(java.io.FileOutputStream) FileSystem(org.apache.hadoop.fs.FileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) Counters(org.apache.hadoop.mapreduce.Counters) Test(org.junit.Test)

Example 13 with Counters

use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.

the class JHEventHandlerForSigtermTest method testCountersToJSON.

@Test(timeout = 50000)
public void testCountersToJSON() throws Exception {
    JobHistoryEventHandler jheh = new JobHistoryEventHandler(null, 0);
    Counters counters = new Counters();
    CounterGroup group1 = counters.addGroup("DOCTORS", "Incarnations of the Doctor");
    group1.addCounter("PETER_CAPALDI", "Peter Capaldi", 12);
    group1.addCounter("MATT_SMITH", "Matt Smith", 11);
    group1.addCounter("DAVID_TENNANT", "David Tennant", 10);
    CounterGroup group2 = counters.addGroup("COMPANIONS", "Companions of the Doctor");
    group2.addCounter("CLARA_OSWALD", "Clara Oswald", 6);
    group2.addCounter("RORY_WILLIAMS", "Rory Williams", 5);
    group2.addCounter("AMY_POND", "Amy Pond", 4);
    group2.addCounter("MARTHA_JONES", "Martha Jones", 3);
    group2.addCounter("DONNA_NOBLE", "Donna Noble", 2);
    group2.addCounter("ROSE_TYLER", "Rose Tyler", 1);
    JsonNode jsonNode = JobHistoryEventUtils.countersToJSON(counters);
    String jsonStr = new ObjectMapper().writeValueAsString(jsonNode);
    String expected = "[{\"NAME\":\"COMPANIONS\",\"DISPLAY_NAME\":\"Companions " + "of the Doctor\",\"COUNTERS\":[{\"NAME\":\"AMY_POND\",\"DISPLAY_NAME\"" + ":\"Amy Pond\",\"VALUE\":4},{\"NAME\":\"CLARA_OSWALD\"," + "\"DISPLAY_NAME\":\"Clara Oswald\",\"VALUE\":6},{\"NAME\":" + "\"DONNA_NOBLE\",\"DISPLAY_NAME\":\"Donna Noble\",\"VALUE\":2}," + "{\"NAME\":\"MARTHA_JONES\",\"DISPLAY_NAME\":\"Martha Jones\"," + "\"VALUE\":3},{\"NAME\":\"RORY_WILLIAMS\",\"DISPLAY_NAME\":\"Rory " + "Williams\",\"VALUE\":5},{\"NAME\":\"ROSE_TYLER\",\"DISPLAY_NAME\":" + "\"Rose Tyler\",\"VALUE\":1}]},{\"NAME\":\"DOCTORS\",\"DISPLAY_NAME\"" + ":\"Incarnations of the Doctor\",\"COUNTERS\":[{\"NAME\":" + "\"DAVID_TENNANT\",\"DISPLAY_NAME\":\"David Tennant\",\"VALUE\":10}," + "{\"NAME\":\"MATT_SMITH\",\"DISPLAY_NAME\":\"Matt Smith\",\"VALUE\":" + "11},{\"NAME\":\"PETER_CAPALDI\",\"DISPLAY_NAME\":\"Peter Capaldi\"," + "\"VALUE\":12}]}]";
    Assert.assertEquals(expected, jsonStr);
}
Also used : CounterGroup(org.apache.hadoop.mapreduce.CounterGroup) Counters(org.apache.hadoop.mapreduce.Counters) JsonNode(com.fasterxml.jackson.databind.JsonNode) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test)

Example 14 with Counters

use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.

the class MockJobs method newTaskAttempt.

public static TaskAttempt newTaskAttempt(TaskId tid, int i) {
    final TaskAttemptId taid = Records.newRecord(TaskAttemptId.class);
    taid.setTaskId(tid);
    taid.setId(i);
    final TaskAttemptReport report = newTaskAttemptReport(taid);
    return new TaskAttempt() {

        @Override
        public NodeId getNodeId() throws UnsupportedOperationException {
            throw new UnsupportedOperationException();
        }

        @Override
        public TaskAttemptId getID() {
            return taid;
        }

        @Override
        public TaskAttemptReport getReport() {
            return report;
        }

        @Override
        public long getLaunchTime() {
            return report.getStartTime();
        }

        @Override
        public long getFinishTime() {
            return report.getFinishTime();
        }

        @Override
        public int getShufflePort() {
            return ShuffleHandler.DEFAULT_SHUFFLE_PORT;
        }

        @Override
        public Counters getCounters() {
            if (report != null && report.getCounters() != null) {
                return new Counters(TypeConverter.fromYarn(report.getCounters()));
            }
            return null;
        }

        @Override
        public float getProgress() {
            return report.getProgress();
        }

        @Override
        public Phase getPhase() {
            return report.getPhase();
        }

        @Override
        public TaskAttemptState getState() {
            return report.getTaskAttemptState();
        }

        @Override
        public boolean isFinished() {
            switch(report.getTaskAttemptState()) {
                case SUCCEEDED:
                case FAILED:
                case KILLED:
                    return true;
            }
            return false;
        }

        @Override
        public ContainerId getAssignedContainerID() {
            ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(taid.getTaskId().getJobId().getAppId(), 0);
            ContainerId id = ContainerId.newContainerId(appAttemptId, 0);
            return id;
        }

        @Override
        public String getNodeHttpAddress() {
            return "localhost:8042";
        }

        @Override
        public List<String> getDiagnostics() {
            return Lists.newArrayList(report.getDiagnosticInfo());
        }

        @Override
        public String getAssignedContainerMgrAddress() {
            return "localhost:9998";
        }

        @Override
        public long getShuffleFinishTime() {
            return report.getShuffleFinishTime();
        }

        @Override
        public long getSortFinishTime() {
            return report.getSortFinishTime();
        }

        @Override
        public String getNodeRackName() {
            return "/default-rack";
        }
    };
}
Also used : TaskAttemptReport(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) Counters(org.apache.hadoop.mapreduce.Counters) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId)

Example 15 with Counters

use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.

the class MockJobs method getCounters.

public static Counters getCounters(Collection<Task> tasks) {
    List<Task> completedTasks = new ArrayList<Task>();
    for (Task task : tasks) {
        if (task.getCounters() != null) {
            completedTasks.add(task);
        }
    }
    Counters counters = new Counters();
    return JobImpl.incrTaskCounters(counters, completedTasks);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) ArrayList(java.util.ArrayList) Counters(org.apache.hadoop.mapreduce.Counters)

Aggregations

Counters (org.apache.hadoop.mapreduce.Counters)72 Test (org.junit.Test)24 Job (org.apache.hadoop.mapreduce.Job)21 Path (org.apache.hadoop.fs.Path)14 Configuration (org.apache.hadoop.conf.Configuration)13 Counter (org.apache.hadoop.mapreduce.Counter)11 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)8 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)7 PhoenixScrutinyJobCounters (org.apache.phoenix.mapreduce.index.PhoenixScrutinyJobCounters)7 BaseTest (org.apache.phoenix.query.BaseTest)7 IOException (java.io.IOException)6 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)6 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)6 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 TableName (org.apache.hadoop.hbase.TableName)4 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)4 File (java.io.File)3 URI (java.net.URI)3 FileSystem (org.apache.hadoop.fs.FileSystem)3