use of org.apache.hadoop.mapreduce.Counters in project ignite by apache.
the class HadoopClientProtocolSelfTest method testJobCounters.
/**
* Tests job counters retrieval.
*
* @throws Exception If failed.
*/
public void testJobCounters() throws Exception {
IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
igfs.mkdirs(new IgfsPath(PATH_INPUT));
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n");
}
Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
final Job job = Job.getInstance(conf);
try {
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestCountingMapper.class);
job.setReducerClass(TestCountingReducer.class);
job.setCombinerClass(TestCountingCombiner.class);
FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@" + PATH_INPUT));
FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@" + PATH_OUTPUT));
job.submit();
final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);
assertEquals(0, cntr.getValue());
cntr.increment(10);
assertEquals(10, cntr.getValue());
// Transferring to map phase.
setupLockFile.delete();
// Transferring to reduce phase.
mapLockFile.delete();
job.waitForCompletion(false);
assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());
final Counters counters = job.getCounters();
assertNotNull("counters cannot be null", counters);
assertEquals("wrong counters count", 3, counters.countCounters());
assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
} catch (Throwable t) {
log.error("Unexpected exception", t);
} finally {
job.getCluster().close();
}
}
use of org.apache.hadoop.mapreduce.Counters in project cdap by caskdata.
the class MapReduceMetricsWriter method reportReduceTaskMetrics.
private void reportReduceTaskMetrics(TaskReport taskReport) {
Counters counters = taskReport.getTaskCounters();
MetricsContext metricsContext = reduceTaskMetricsCollectors.getUnchecked(taskReport.getTaskId());
metricsContext.gauge(MapReduceMetrics.METRIC_TASK_INPUT_RECORDS, getTaskCounter(counters, TaskCounter.REDUCE_INPUT_RECORDS));
metricsContext.gauge(MapReduceMetrics.METRIC_TASK_OUTPUT_RECORDS, getTaskCounter(counters, TaskCounter.REDUCE_OUTPUT_RECORDS));
metricsContext.gauge(MapReduceMetrics.METRIC_TASK_COMPLETION, (long) (taskReport.getProgress() * 100));
}
use of org.apache.hadoop.mapreduce.Counters in project incubator-gobblin by apache.
the class CompactionSlaEventHelper method getRecordCount.
private static long getRecordCount(Optional<Job> job) {
if (!job.isPresent()) {
return -1l;
}
Counters counters = null;
try {
counters = job.get().getCounters();
} catch (IOException e) {
LOG.debug("Failed to get job counters. Record count will not be set. ", e);
return -1l;
}
Counter recordCounter = counters.findCounter(AvroKeyDedupReducer.EVENT_COUNTER.RECORD_COUNT);
if (recordCounter != null && recordCounter.getValue() != 0) {
return recordCounter.getValue();
}
recordCounter = counters.findCounter(AvroKeyMapper.EVENT_COUNTER.RECORD_COUNT);
if (recordCounter != null && recordCounter.getValue() != 0) {
return recordCounter.getValue();
}
LOG.debug("Non zero record count not found in both mapper and reducer counters");
return -1l;
}
use of org.apache.hadoop.mapreduce.Counters in project incubator-gobblin by apache.
the class MRJobLauncher method countersToMetrics.
/**
* Create a {@link org.apache.gobblin.metrics.GobblinMetrics} instance for this job run from the Hadoop counters.
*/
@VisibleForTesting
void countersToMetrics(GobblinMetrics metrics) throws IOException {
Optional<Counters> counters = Optional.fromNullable(this.job.getCounters());
if (counters.isPresent()) {
// Write job-level counters
CounterGroup jobCounterGroup = counters.get().getGroup(MetricGroup.JOB.name());
for (Counter jobCounter : jobCounterGroup) {
metrics.getCounter(jobCounter.getName()).inc(jobCounter.getValue());
}
// Write task-level counters
CounterGroup taskCounterGroup = counters.get().getGroup(MetricGroup.TASK.name());
for (Counter taskCounter : taskCounterGroup) {
metrics.getCounter(taskCounter.getName()).inc(taskCounter.getValue());
}
}
}
use of org.apache.hadoop.mapreduce.Counters in project tez by apache.
the class TezTypeConverters method fromTez.
public static Counters fromTez(TezCounters tezCounters) {
if (tezCounters == null) {
return null;
}
Counters counters = new Counters();
for (CounterGroup xGrp : tezCounters) {
counters.addGroup(xGrp.getName(), xGrp.getDisplayName());
for (TezCounter xCounter : xGrp) {
Counter counter = counters.findCounter(xGrp.getName(), xCounter.getName());
counter.setValue(xCounter.getValue());
}
}
return counters;
}
Aggregations