use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.
the class JSONHistoryViewerPrinter method printJobCounters.
private void printJobCounters(Counters totalCounters, Counters mapCounters, Counters reduceCounters) throws JSONException {
// Killed jobs might not have counters
if (totalCounters != null) {
JSONObject jGroups = new JSONObject();
for (String groupName : totalCounters.getGroupNames()) {
CounterGroup totalGroup = totalCounters.getGroup(groupName);
CounterGroup mapGroup = mapCounters.getGroup(groupName);
CounterGroup reduceGroup = reduceCounters.getGroup(groupName);
Iterator<Counter> ctrItr = totalGroup.iterator();
JSONArray jGroup = new JSONArray();
while (ctrItr.hasNext()) {
JSONObject jCounter = new JSONObject();
org.apache.hadoop.mapreduce.Counter counter = ctrItr.next();
String name = counter.getName();
long mapValue = mapGroup.findCounter(name).getValue();
long reduceValue = reduceGroup.findCounter(name).getValue();
long totalValue = counter.getValue();
jCounter.put("counterName", name);
jCounter.put("mapValue", mapValue);
jCounter.put("reduceValue", reduceValue);
jCounter.put("totalValue", totalValue);
jGroup.put(jCounter);
}
jGroups.put(fixGroupNameForShuffleErrors(totalGroup.getName()), jGroup);
}
json.put("counters", jGroups);
}
}
use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.
the class FileSystemCounterGroup method incrAllCounters.
@Override
@SuppressWarnings("unchecked")
public void incrAllCounters(CounterGroupBase<C> other) {
if (checkNotNull(other.getUnderlyingGroup(), "other group") instanceof FileSystemCounterGroup<?>) {
for (Counter counter : other) {
FSCounter c = (FSCounter) ((Counter) counter).getUnderlyingCounter();
findCounter(c.scheme, c.key).increment(counter.getValue());
}
}
}
use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.
the class FrameworkCounterGroup method readFields.
@Override
public void readFields(DataInput in) throws IOException {
clear();
int len = WritableUtils.readVInt(in);
T[] enums = enumClass.getEnumConstants();
for (int i = 0; i < len; ++i) {
int ord = WritableUtils.readVInt(in);
Counter counter = newCounter(enums[ord]);
counter.setValue(WritableUtils.readVLong(in));
counters[ord] = counter;
}
}
use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.
the class FrameworkCounterGroup method write.
/**
* FrameworkGroup ::= #counter (key value)*
*/
@Override
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, size());
for (int i = 0; i < counters.length; ++i) {
Counter counter = (C) counters[i];
if (counter != null) {
WritableUtils.writeVInt(out, i);
WritableUtils.writeVLong(out, counter.getValue());
}
}
}
use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.
the class TestGridMixClasses method testSleepReducer.
/*
* test SleepReducer
*/
@Test(timeout = 3000)
public void testSleepReducer() throws Exception {
Configuration conf = new Configuration();
conf.setInt(JobContext.NUM_REDUCES, 2);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(FileOutputFormat.COMPRESS, true);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
TaskAttemptID taskId = new TaskAttemptID();
RawKeyValueIterator input = new FakeRawKeyValueReducerIterator();
Counter counter = new GenericCounter();
Counter inputValueCounter = new GenericCounter();
RecordWriter<NullWritable, NullWritable> output = new LoadRecordReduceWriter();
OutputCommitter committer = new CustomOutputCommitter();
StatusReporter reporter = new DummyReporter();
RawComparator<GridmixKey> comparator = new FakeRawComparator();
ReduceContext<GridmixKey, NullWritable, NullWritable, NullWritable> reducecontext = new ReduceContextImpl<GridmixKey, NullWritable, NullWritable, NullWritable>(conf, taskId, input, counter, inputValueCounter, output, committer, reporter, comparator, GridmixKey.class, NullWritable.class);
org.apache.hadoop.mapreduce.Reducer<GridmixKey, NullWritable, NullWritable, NullWritable>.Context<GridmixKey, NullWritable, NullWritable, NullWritable> context = new WrappedReducer<GridmixKey, NullWritable, NullWritable, NullWritable>().getReducerContext(reducecontext);
SleepReducer test = new SleepReducer();
long start = System.currentTimeMillis();
test.setup(context);
long sleeper = context.getCurrentKey().getReduceOutputBytes();
// status has been changed
assertEquals("Sleeping... " + sleeper + " ms left", context.getStatus());
// should sleep 0.9 sec
assertTrue(System.currentTimeMillis() >= (start + sleeper));
test.cleanup(context);
// status has been changed again
assertEquals("Slept for " + sleeper, context.getStatus());
}
Aggregations