Search in sources :

Example 21 with Counters

use of org.apache.hadoop.mapreduce.Counters in project ignite by apache.

the class HadoopClientProtocolSelfTest method testJobCounters.

/**
 * Tests job counters retrieval.
 *
 * @throws Exception If failed.
 */
public void testJobCounters() throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
    igfs.mkdirs(new IgfsPath(PATH_INPUT));
    try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
        bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n");
    }
    Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
    final Job job = Job.getInstance(conf);
    try {
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setMapperClass(TestCountingMapper.class);
        job.setReducerClass(TestCountingReducer.class);
        job.setCombinerClass(TestCountingCombiner.class);
        FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@" + PATH_INPUT));
        FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@" + PATH_OUTPUT));
        job.submit();
        final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);
        assertEquals(0, cntr.getValue());
        cntr.increment(10);
        assertEquals(10, cntr.getValue());
        // Transferring to map phase.
        setupLockFile.delete();
        // Transferring to reduce phase.
        mapLockFile.delete();
        job.waitForCompletion(false);
        assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());
        final Counters counters = job.getCounters();
        assertNotNull("counters cannot be null", counters);
        assertEquals("wrong counters count", 3, counters.countCounters());
        assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
        assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
    } catch (Throwable t) {
        log.error("Unexpected exception", t);
    } finally {
        job.getCluster().close();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) IgfsPath(org.apache.ignite.igfs.IgfsPath) Path(org.apache.hadoop.fs.Path) Counter(org.apache.hadoop.mapreduce.Counter) Configuration(org.apache.hadoop.conf.Configuration) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) OutputStreamWriter(java.io.OutputStreamWriter) Counters(org.apache.hadoop.mapreduce.Counters) Job(org.apache.hadoop.mapreduce.Job) BufferedWriter(java.io.BufferedWriter)

Example 22 with Counters

use of org.apache.hadoop.mapreduce.Counters in project cdap by caskdata.

the class MapReduceMetricsWriter method reportReduceTaskMetrics.

private void reportReduceTaskMetrics(TaskReport taskReport) {
    Counters counters = taskReport.getTaskCounters();
    MetricsContext metricsContext = reduceTaskMetricsCollectors.getUnchecked(taskReport.getTaskId());
    metricsContext.gauge(MapReduceMetrics.METRIC_TASK_INPUT_RECORDS, getTaskCounter(counters, TaskCounter.REDUCE_INPUT_RECORDS));
    metricsContext.gauge(MapReduceMetrics.METRIC_TASK_OUTPUT_RECORDS, getTaskCounter(counters, TaskCounter.REDUCE_OUTPUT_RECORDS));
    metricsContext.gauge(MapReduceMetrics.METRIC_TASK_COMPLETION, (long) (taskReport.getProgress() * 100));
}
Also used : MetricsContext(co.cask.cdap.api.metrics.MetricsContext) Counters(org.apache.hadoop.mapreduce.Counters)

Example 23 with Counters

use of org.apache.hadoop.mapreduce.Counters in project incubator-gobblin by apache.

the class CompactionSlaEventHelper method getRecordCount.

private static long getRecordCount(Optional<Job> job) {
    if (!job.isPresent()) {
        return -1l;
    }
    Counters counters = null;
    try {
        counters = job.get().getCounters();
    } catch (IOException e) {
        LOG.debug("Failed to get job counters. Record count will not be set. ", e);
        return -1l;
    }
    Counter recordCounter = counters.findCounter(AvroKeyDedupReducer.EVENT_COUNTER.RECORD_COUNT);
    if (recordCounter != null && recordCounter.getValue() != 0) {
        return recordCounter.getValue();
    }
    recordCounter = counters.findCounter(AvroKeyMapper.EVENT_COUNTER.RECORD_COUNT);
    if (recordCounter != null && recordCounter.getValue() != 0) {
        return recordCounter.getValue();
    }
    LOG.debug("Non zero record count not found in both mapper and reducer counters");
    return -1l;
}
Also used : Counter(org.apache.hadoop.mapreduce.Counter) Counters(org.apache.hadoop.mapreduce.Counters) IOException(java.io.IOException)

Example 24 with Counters

use of org.apache.hadoop.mapreduce.Counters in project incubator-gobblin by apache.

the class MRJobLauncher method countersToMetrics.

/**
 * Create a {@link org.apache.gobblin.metrics.GobblinMetrics} instance for this job run from the Hadoop counters.
 */
@VisibleForTesting
void countersToMetrics(GobblinMetrics metrics) throws IOException {
    Optional<Counters> counters = Optional.fromNullable(this.job.getCounters());
    if (counters.isPresent()) {
        // Write job-level counters
        CounterGroup jobCounterGroup = counters.get().getGroup(MetricGroup.JOB.name());
        for (Counter jobCounter : jobCounterGroup) {
            metrics.getCounter(jobCounter.getName()).inc(jobCounter.getValue());
        }
        // Write task-level counters
        CounterGroup taskCounterGroup = counters.get().getGroup(MetricGroup.TASK.name());
        for (Counter taskCounter : taskCounterGroup) {
            metrics.getCounter(taskCounter.getName()).inc(taskCounter.getValue());
        }
    }
}
Also used : Counter(org.apache.hadoop.mapreduce.Counter) CounterGroup(org.apache.hadoop.mapreduce.CounterGroup) Counters(org.apache.hadoop.mapreduce.Counters) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 25 with Counters

use of org.apache.hadoop.mapreduce.Counters in project tez by apache.

the class TezTypeConverters method fromTez.

public static Counters fromTez(TezCounters tezCounters) {
    if (tezCounters == null) {
        return null;
    }
    Counters counters = new Counters();
    for (CounterGroup xGrp : tezCounters) {
        counters.addGroup(xGrp.getName(), xGrp.getDisplayName());
        for (TezCounter xCounter : xGrp) {
            Counter counter = counters.findCounter(xGrp.getName(), xCounter.getName());
            counter.setValue(xCounter.getValue());
        }
    }
    return counters;
}
Also used : TezCounter(org.apache.tez.common.counters.TezCounter) Counter(org.apache.hadoop.mapreduce.Counter) CounterGroup(org.apache.tez.common.counters.CounterGroup) TezCounters(org.apache.tez.common.counters.TezCounters) Counters(org.apache.hadoop.mapreduce.Counters) TezCounter(org.apache.tez.common.counters.TezCounter)

Aggregations

Counters (org.apache.hadoop.mapreduce.Counters)72 Test (org.junit.Test)24 Job (org.apache.hadoop.mapreduce.Job)21 Path (org.apache.hadoop.fs.Path)14 Configuration (org.apache.hadoop.conf.Configuration)13 Counter (org.apache.hadoop.mapreduce.Counter)11 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)8 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)7 PhoenixScrutinyJobCounters (org.apache.phoenix.mapreduce.index.PhoenixScrutinyJobCounters)7 BaseTest (org.apache.phoenix.query.BaseTest)7 IOException (java.io.IOException)6 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)6 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)6 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 TableName (org.apache.hadoop.hbase.TableName)4 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)4 File (java.io.File)3 URI (java.net.URI)3 FileSystem (org.apache.hadoop.fs.FileSystem)3