use of org.apache.hadoop.mapreduce.TaskAttemptID in project hadoop by apache.
the class TestCombineFileRecordReader method testProgressIsReportedIfInputASeriesOfEmptyFiles.
@SuppressWarnings("unchecked")
@Test
public void testProgressIsReportedIfInputASeriesOfEmptyFiles() throws IOException, InterruptedException {
JobConf conf = new JobConf();
Path[] paths = new Path[3];
File[] files = new File[3];
long[] fileLength = new long[3];
try {
for (int i = 0; i < 3; i++) {
File dir = new File(outDir.toString());
dir.mkdir();
files[i] = new File(dir, "testfile" + i);
FileWriter fileWriter = new FileWriter(files[i]);
fileWriter.flush();
fileWriter.close();
fileLength[i] = i;
paths[i] = new Path(outDir + "/testfile" + i);
}
CombineFileSplit combineFileSplit = new CombineFileSplit(paths, fileLength);
TaskAttemptID taskAttemptID = Mockito.mock(TaskAttemptID.class);
TaskReporter reporter = Mockito.mock(TaskReporter.class);
TaskAttemptContextImpl taskAttemptContext = new TaskAttemptContextImpl(conf, taskAttemptID, reporter);
CombineFileRecordReader cfrr = new CombineFileRecordReader(combineFileSplit, taskAttemptContext, TextRecordReaderWrapper.class);
cfrr.initialize(combineFileSplit, taskAttemptContext);
verify(reporter).progress();
Assert.assertFalse(cfrr.nextKeyValue());
verify(reporter, times(3)).progress();
} finally {
FileUtil.fullyDelete(new File(outDir.toString()));
}
}
use of org.apache.hadoop.mapreduce.TaskAttemptID in project hadoop by apache.
the class HsController method logs.
/**
* Render the logs page.
*/
public void logs() {
String logEntity = $(ENTITY_STRING);
JobID jid = null;
try {
jid = JobID.forName(logEntity);
set(JOB_ID, logEntity);
requireJob();
} catch (Exception e) {
// fall below
}
if (jid == null) {
try {
TaskAttemptID taskAttemptId = TaskAttemptID.forName(logEntity);
set(TASK_ID, taskAttemptId.getTaskID().toString());
set(JOB_ID, taskAttemptId.getJobID().toString());
requireTask();
requireJob();
} catch (Exception e) {
// fall below
}
}
render(HsLogsPage.class);
}
use of org.apache.hadoop.mapreduce.TaskAttemptID in project hadoop by apache.
the class TestCompletedTask method testCompletedTaskAttempt.
/**
* test some methods of CompletedTaskAttempt
*/
@Test(timeout = 5000)
public void testCompletedTaskAttempt() {
TaskAttemptInfo attemptInfo = mock(TaskAttemptInfo.class);
when(attemptInfo.getRackname()).thenReturn("Rackname");
when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
when(attemptInfo.getSortFinishTime()).thenReturn(12L);
when(attemptInfo.getShufflePort()).thenReturn(10);
JobID jobId = new JobID("12345", 0);
TaskID taskId = new TaskID(jobId, TaskType.REDUCE, 0);
TaskAttemptID taskAttemptId = new TaskAttemptID(taskId, 0);
when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
CompletedTaskAttempt taskAttemt = new CompletedTaskAttempt(null, attemptInfo);
assertEquals("Rackname", taskAttemt.getNodeRackName());
assertEquals(Phase.CLEANUP, taskAttemt.getPhase());
assertTrue(taskAttemt.isFinished());
assertEquals(11L, taskAttemt.getShuffleFinishTime());
assertEquals(12L, taskAttemt.getSortFinishTime());
assertEquals(10, taskAttemt.getShufflePort());
}
use of org.apache.hadoop.mapreduce.TaskAttemptID in project hadoop by apache.
the class TestCompletedTask method testTaskStartTimes.
@Test(timeout = 5000)
public void testTaskStartTimes() {
TaskId taskId = mock(TaskId.class);
TaskInfo taskInfo = mock(TaskInfo.class);
Map<TaskAttemptID, TaskAttemptInfo> taskAttempts = new TreeMap<TaskAttemptID, TaskAttemptInfo>();
TaskAttemptID id = new TaskAttemptID("0", 0, TaskType.MAP, 0, 0);
TaskAttemptInfo info = mock(TaskAttemptInfo.class);
when(info.getAttemptId()).thenReturn(id);
when(info.getStartTime()).thenReturn(10l);
taskAttempts.put(id, info);
id = new TaskAttemptID("1", 0, TaskType.MAP, 1, 1);
info = mock(TaskAttemptInfo.class);
when(info.getAttemptId()).thenReturn(id);
when(info.getStartTime()).thenReturn(20l);
taskAttempts.put(id, info);
when(taskInfo.getAllTaskAttempts()).thenReturn(taskAttempts);
CompletedTask task = new CompletedTask(taskId, taskInfo);
TaskReport report = task.getReport();
// Make sure the startTime returned by report is the lesser of the
// attempy launch times
assertTrue(report.getStartTime() == 10);
}
use of org.apache.hadoop.mapreduce.TaskAttemptID in project hadoop by apache.
the class TestCombineFileInputFormat method testReinit.
@Test
public void testReinit() throws Exception {
// Test that a split containing multiple files works correctly,
// with the child RecordReader getting its initialize() method
// called a second time.
TaskAttemptID taskId = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 0);
Configuration conf = new Configuration();
TaskAttemptContext context = new TaskAttemptContextImpl(conf, taskId);
// This will create a CombineFileRecordReader that itself contains a
// DummyRecordReader.
InputFormat inputFormat = new ChildRRInputFormat();
Path[] files = { new Path("file1"), new Path("file2") };
long[] lengths = { 1, 1 };
CombineFileSplit split = new CombineFileSplit(files, lengths);
RecordReader rr = inputFormat.createRecordReader(split, context);
assertTrue("Unexpected RR type!", rr instanceof CombineFileRecordReader);
// first initialize() call comes from MapTask. We'll do it here.
rr.initialize(split, context);
// First value is first filename.
assertTrue(rr.nextKeyValue());
assertEquals("file1", rr.getCurrentValue().toString());
// The inner RR will return false, because it only emits one (k, v) pair.
// But there's another sub-split to process. This returns true to us.
assertTrue(rr.nextKeyValue());
// And the 2nd rr will have its initialize method called correctly.
assertEquals("file2", rr.getCurrentValue().toString());
// But after both child RR's have returned their singleton (k, v), this
// should also return false.
assertFalse(rr.nextKeyValue());
}
Aggregations