use of org.apache.hadoop.mapred.Counters.Counter in project hadoop by apache.
the class TestShuffleScheduler method TestSucceedAndFailedCopyMap.
@SuppressWarnings("rawtypes")
@Test
public <K, V> void TestSucceedAndFailedCopyMap() throws Exception {
JobConf job = new JobConf();
job.setNumMapTasks(2);
//mock creation
TaskUmbilicalProtocol mockUmbilical = mock(TaskUmbilicalProtocol.class);
Reporter mockReporter = mock(Reporter.class);
FileSystem mockFileSystem = mock(FileSystem.class);
Class<? extends org.apache.hadoop.mapred.Reducer> combinerClass = job.getCombinerClass();
// needed for mock with generic
@SuppressWarnings("unchecked") CombineOutputCollector<K, V> mockCombineOutputCollector = (CombineOutputCollector<K, V>) mock(CombineOutputCollector.class);
org.apache.hadoop.mapreduce.TaskAttemptID mockTaskAttemptID = mock(org.apache.hadoop.mapreduce.TaskAttemptID.class);
LocalDirAllocator mockLocalDirAllocator = mock(LocalDirAllocator.class);
CompressionCodec mockCompressionCodec = mock(CompressionCodec.class);
Counter mockCounter = mock(Counter.class);
TaskStatus mockTaskStatus = mock(TaskStatus.class);
Progress mockProgress = mock(Progress.class);
MapOutputFile mockMapOutputFile = mock(MapOutputFile.class);
Task mockTask = mock(Task.class);
@SuppressWarnings("unchecked") MapOutput<K, V> output = mock(MapOutput.class);
ShuffleConsumerPlugin.Context<K, V> context = new ShuffleConsumerPlugin.Context<K, V>(mockTaskAttemptID, job, mockFileSystem, mockUmbilical, mockLocalDirAllocator, mockReporter, mockCompressionCodec, combinerClass, mockCombineOutputCollector, mockCounter, mockCounter, mockCounter, mockCounter, mockCounter, mockCounter, mockTaskStatus, mockProgress, mockProgress, mockTask, mockMapOutputFile, null);
TaskStatus status = new TaskStatus() {
@Override
public boolean getIsMap() {
return false;
}
@Override
public void addFetchFailedMap(TaskAttemptID mapTaskId) {
}
};
Progress progress = new Progress();
ShuffleSchedulerImpl<K, V> scheduler = new ShuffleSchedulerImpl<K, V>(job, status, null, null, progress, context.getShuffledMapsCounter(), context.getReduceShuffleBytes(), context.getFailedShuffleCounter());
MapHost host1 = new MapHost("host1", null);
TaskAttemptID failedAttemptID = new TaskAttemptID(new org.apache.hadoop.mapred.TaskID(new JobID("test", 0), TaskType.MAP, 0), 0);
TaskAttemptID succeedAttemptID = new TaskAttemptID(new org.apache.hadoop.mapred.TaskID(new JobID("test", 0), TaskType.MAP, 1), 1);
// handle output fetch failure for failedAttemptID, part I
scheduler.hostFailed(host1.getHostName());
// handle output fetch succeed for succeedAttemptID
long bytes = (long) 500 * 1024 * 1024;
scheduler.copySucceeded(succeedAttemptID, host1, bytes, 0, 500000, output);
// handle output fetch failure for failedAttemptID, part II
// for MAPREDUCE-6361: verify no NPE exception get thrown out
scheduler.copyFailed(failedAttemptID, host1, true, false);
}
use of org.apache.hadoop.mapred.Counters.Counter in project hadoop by apache.
the class GroupFactoryForTest method testFileSystemGroupIteratorConcurrency.
@Test
public void testFileSystemGroupIteratorConcurrency() {
Counters counters = new Counters();
// create 2 filesystem counter groups
counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);
// Iterate over the counters in this group while updating counters in
// the group
Group group = counters.getGroup(FileSystemCounter.class.getName());
Iterator<Counter> iterator = group.iterator();
counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
}
use of org.apache.hadoop.mapred.Counters.Counter in project hadoop by apache.
the class GroupFactoryForTest method testCounterValue.
/**
* Verify counter value works
*/
@SuppressWarnings("deprecation")
@Test
public void testCounterValue() {
Counters counters = new Counters();
final int NUMBER_TESTS = 100;
final int NUMBER_INC = 10;
final Random rand = new Random();
for (int i = 0; i < NUMBER_TESTS; i++) {
long initValue = rand.nextInt();
long expectedValue = initValue;
Counter counter = counters.findCounter("foo", "bar");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly", expectedValue, counter.getValue());
for (int j = 0; j < NUMBER_INC; j++) {
int incValue = rand.nextInt();
counter.increment(incValue);
expectedValue += incValue;
assertEquals("Counter value is not incremented correctly", expectedValue, counter.getValue());
}
expectedValue = rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly", expectedValue, counter.getValue());
}
}
use of org.apache.hadoop.mapred.Counters.Counter in project hadoop by apache.
the class TestPipes method runProgram.
static void runProgram(MiniMRCluster mr, MiniDFSCluster dfs, Path program, Path inputPath, Path outputPath, int numMaps, int numReduces, String[] expectedResults, JobConf conf) throws IOException {
Path wordExec = new Path("testing/bin/application");
JobConf job = null;
if (conf == null) {
job = mr.createJobConf();
} else {
job = new JobConf(conf);
}
job.setNumMapTasks(numMaps);
job.setNumReduceTasks(numReduces);
{
FileSystem fs = dfs.getFileSystem();
fs.delete(wordExec.getParent(), true);
fs.copyFromLocalFile(program, wordExec);
Submitter.setExecutable(job, fs.makeQualified(wordExec).toString());
Submitter.setIsJavaRecordReader(job, true);
Submitter.setIsJavaRecordWriter(job, true);
FileInputFormat.setInputPaths(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
RunningJob rJob = null;
if (numReduces == 0) {
rJob = Submitter.jobSubmit(job);
while (!rJob.isComplete()) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
throw new RuntimeException(ie);
}
}
} else {
rJob = Submitter.runJob(job);
}
assertTrue("pipes job failed", rJob.isSuccessful());
Counters counters = rJob.getCounters();
Counters.Group wordCountCounters = counters.getGroup("WORDCOUNT");
int numCounters = 0;
for (Counter c : wordCountCounters) {
System.out.println(c);
++numCounters;
}
assertTrue("No counters found!", (numCounters > 0));
}
List<String> results = new ArrayList<String>();
for (Path p : FileUtil.stat2Paths(dfs.getFileSystem().listStatus(outputPath, new Utils.OutputFileUtils.OutputFilesFilter()))) {
results.add(MapReduceTestUtil.readOutput(p, job));
}
assertEquals("number of reduces is wrong", expectedResults.length, results.size());
for (int i = 0; i < results.size(); i++) {
assertEquals("pipes program " + program + " output " + i + " wrong", expectedResults[i], results.get(i));
}
}
use of org.apache.hadoop.mapred.Counters.Counter in project hadoop by apache.
the class TestTaskAttemptListenerImpl method testCheckpointIDTracking.
@Test
public void testCheckpointIDTracking() throws IOException, InterruptedException {
SystemClock clock = SystemClock.getInstance();
org.apache.hadoop.mapreduce.v2.app.job.Task mockTask = mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class);
when(mockTask.canCommit(any(TaskAttemptId.class))).thenReturn(true);
Job mockJob = mock(Job.class);
when(mockJob.getTask(any(TaskId.class))).thenReturn(mockTask);
Dispatcher dispatcher = mock(Dispatcher.class);
@SuppressWarnings("unchecked") EventHandler<Event> ea = mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class);
AppContext appCtx = mock(AppContext.class);
when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
when(appCtx.getClock()).thenReturn(clock);
when(appCtx.getEventHandler()).thenReturn(ea);
JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
final TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, policy) {
@Override
protected void registerHeartbeatHandler(Configuration conf) {
taskHeartbeatHandler = hbHandler;
}
};
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.TASK_PREEMPTION, true);
//conf.setBoolean("preemption.reduce", true);
listener.init(conf);
listener.start();
TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.REDUCE, 1, 0);
List<Path> partialOut = new ArrayList<Path>();
partialOut.add(new Path("/prev1"));
partialOut.add(new Path("/prev2"));
Counters counters = mock(Counters.class);
final long CBYTES = 64L * 1024 * 1024;
final long CTIME = 4344L;
final Path CLOC = new Path("/test/1");
Counter cbytes = mock(Counter.class);
when(cbytes.getValue()).thenReturn(CBYTES);
Counter ctime = mock(Counter.class);
when(ctime.getValue()).thenReturn(CTIME);
when(counters.findCounter(eq(EnumCounter.CHECKPOINT_BYTES))).thenReturn(cbytes);
when(counters.findCounter(eq(EnumCounter.CHECKPOINT_MS))).thenReturn(ctime);
// propagating a taskstatus that contains a checkpoint id
TaskCheckpointID incid = new TaskCheckpointID(new FSCheckpointID(CLOC), partialOut, counters);
listener.setCheckpointID(org.apache.hadoop.mapred.TaskID.downgrade(tid.getTaskID()), incid);
// and try to get it back
CheckpointID outcid = listener.getCheckpointID(tid.getTaskID());
TaskCheckpointID tcid = (TaskCheckpointID) outcid;
assertEquals(CBYTES, tcid.getCheckpointBytes());
assertEquals(CTIME, tcid.getCheckpointTime());
assertTrue(partialOut.containsAll(tcid.getPartialCommittedOutput()));
assertTrue(tcid.getPartialCommittedOutput().containsAll(partialOut));
//assert it worked
assert outcid == incid;
listener.stop();
}
Aggregations