Search in sources :

Example 6 with Counter

use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.

the class JSONHistoryViewerPrinter method printJobCounters.

private void printJobCounters(Counters totalCounters, Counters mapCounters, Counters reduceCounters) throws JSONException {
    // Killed jobs might not have counters
    if (totalCounters != null) {
        JSONObject jGroups = new JSONObject();
        for (String groupName : totalCounters.getGroupNames()) {
            CounterGroup totalGroup = totalCounters.getGroup(groupName);
            CounterGroup mapGroup = mapCounters.getGroup(groupName);
            CounterGroup reduceGroup = reduceCounters.getGroup(groupName);
            Iterator<Counter> ctrItr = totalGroup.iterator();
            JSONArray jGroup = new JSONArray();
            while (ctrItr.hasNext()) {
                JSONObject jCounter = new JSONObject();
                org.apache.hadoop.mapreduce.Counter counter = ctrItr.next();
                String name = counter.getName();
                long mapValue = mapGroup.findCounter(name).getValue();
                long reduceValue = reduceGroup.findCounter(name).getValue();
                long totalValue = counter.getValue();
                jCounter.put("counterName", name);
                jCounter.put("mapValue", mapValue);
                jCounter.put("reduceValue", reduceValue);
                jCounter.put("totalValue", totalValue);
                jGroup.put(jCounter);
            }
            jGroups.put(fixGroupNameForShuffleErrors(totalGroup.getName()), jGroup);
        }
        json.put("counters", jGroups);
    }
}
Also used : Counter(org.apache.hadoop.mapreduce.Counter) JSONObject(org.codehaus.jettison.json.JSONObject) CounterGroup(org.apache.hadoop.mapreduce.CounterGroup) Counter(org.apache.hadoop.mapreduce.Counter) JSONArray(org.codehaus.jettison.json.JSONArray)

Example 7 with Counter

use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.

the class FileSystemCounterGroup method incrAllCounters.

@Override
@SuppressWarnings("unchecked")
public void incrAllCounters(CounterGroupBase<C> other) {
    if (checkNotNull(other.getUnderlyingGroup(), "other group") instanceof FileSystemCounterGroup<?>) {
        for (Counter counter : other) {
            FSCounter c = (FSCounter) ((Counter) counter).getUnderlyingCounter();
            findCounter(c.scheme, c.key).increment(counter.getValue());
        }
    }
}
Also used : Counter(org.apache.hadoop.mapreduce.Counter) FileSystemCounter(org.apache.hadoop.mapreduce.FileSystemCounter)

Example 8 with Counter

use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.

the class FrameworkCounterGroup method readFields.

@Override
public void readFields(DataInput in) throws IOException {
    clear();
    int len = WritableUtils.readVInt(in);
    T[] enums = enumClass.getEnumConstants();
    for (int i = 0; i < len; ++i) {
        int ord = WritableUtils.readVInt(in);
        Counter counter = newCounter(enums[ord]);
        counter.setValue(WritableUtils.readVLong(in));
        counters[ord] = counter;
    }
}
Also used : Counter(org.apache.hadoop.mapreduce.Counter)

Example 9 with Counter

use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.

the class FrameworkCounterGroup method write.

/**
   * FrameworkGroup ::= #counter (key value)*
   */
@Override
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, size());
    for (int i = 0; i < counters.length; ++i) {
        Counter counter = (C) counters[i];
        if (counter != null) {
            WritableUtils.writeVInt(out, i);
            WritableUtils.writeVLong(out, counter.getValue());
        }
    }
}
Also used : Counter(org.apache.hadoop.mapreduce.Counter)

Example 10 with Counter

use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.

the class TestGridMixClasses method testSleepReducer.

/*
   * test SleepReducer
   */
@Test(timeout = 3000)
public void testSleepReducer() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(JobContext.NUM_REDUCES, 2);
    CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
    conf.setBoolean(FileOutputFormat.COMPRESS, true);
    CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
    conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
    TaskAttemptID taskId = new TaskAttemptID();
    RawKeyValueIterator input = new FakeRawKeyValueReducerIterator();
    Counter counter = new GenericCounter();
    Counter inputValueCounter = new GenericCounter();
    RecordWriter<NullWritable, NullWritable> output = new LoadRecordReduceWriter();
    OutputCommitter committer = new CustomOutputCommitter();
    StatusReporter reporter = new DummyReporter();
    RawComparator<GridmixKey> comparator = new FakeRawComparator();
    ReduceContext<GridmixKey, NullWritable, NullWritable, NullWritable> reducecontext = new ReduceContextImpl<GridmixKey, NullWritable, NullWritable, NullWritable>(conf, taskId, input, counter, inputValueCounter, output, committer, reporter, comparator, GridmixKey.class, NullWritable.class);
    org.apache.hadoop.mapreduce.Reducer<GridmixKey, NullWritable, NullWritable, NullWritable>.Context<GridmixKey, NullWritable, NullWritable, NullWritable> context = new WrappedReducer<GridmixKey, NullWritable, NullWritable, NullWritable>().getReducerContext(reducecontext);
    SleepReducer test = new SleepReducer();
    long start = System.currentTimeMillis();
    test.setup(context);
    long sleeper = context.getCurrentKey().getReduceOutputBytes();
    // status has been changed
    assertEquals("Sleeping... " + sleeper + " ms left", context.getStatus());
    // should sleep 0.9 sec
    assertTrue(System.currentTimeMillis() >= (start + sleeper));
    test.cleanup(context);
    // status has been changed again
    assertEquals("Slept for " + sleeper, context.getStatus());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ReduceContextImpl(org.apache.hadoop.mapreduce.task.ReduceContextImpl) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) GenericCounter(org.apache.hadoop.mapreduce.counters.GenericCounter) Counter(org.apache.hadoop.mapreduce.Counter) CustomOutputCommitter(org.apache.hadoop.CustomOutputCommitter) CustomOutputCommitter(org.apache.hadoop.CustomOutputCommitter) OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) GenericCounter(org.apache.hadoop.mapreduce.counters.GenericCounter) DummyReporter(org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl.DummyReporter) NullWritable(org.apache.hadoop.io.NullWritable) RawKeyValueIterator(org.apache.hadoop.mapred.RawKeyValueIterator) SleepReducer(org.apache.hadoop.mapred.gridmix.SleepJob.SleepReducer) WrappedReducer(org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer) SleepReducer(org.apache.hadoop.mapred.gridmix.SleepJob.SleepReducer) StatusReporter(org.apache.hadoop.mapreduce.StatusReporter) Test(org.junit.Test)

Aggregations

Counter (org.apache.hadoop.mapreduce.Counter)51 Configuration (org.apache.hadoop.conf.Configuration)15 CounterGroup (org.apache.hadoop.mapreduce.CounterGroup)13 Job (org.apache.hadoop.mapreduce.Job)12 Counters (org.apache.hadoop.mapreduce.Counters)11 IOException (java.io.IOException)8 Path (org.apache.hadoop.fs.Path)7 Map (java.util.Map)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 Test (org.junit.Test)4 TaskCounter (org.apache.hadoop.mapreduce.TaskCounter)3 FileNotFoundException (java.io.FileNotFoundException)2 SimpleDateFormat (java.text.SimpleDateFormat)2 ArrayList (java.util.ArrayList)2 ExecutionException (java.util.concurrent.ExecutionException)2 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)2 TimeoutException (java.util.concurrent.TimeoutException)2 Schema (org.apache.avro.Schema)2 CustomOutputCommitter (org.apache.hadoop.CustomOutputCommitter)2 BytesWritable (org.apache.hadoop.io.BytesWritable)2