use of org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl in project hadoop by apache.
the class MultipleOutputs method write.
/**
* Write key value to an output file name.
*
* Gets the record writer from job's output format.
* Job's output format should be a FileOutputFormat.
*
* @param key the key
* @param value the value
* @param baseOutputPath base-output path to write the record to.
* Note: Framework will generate unique filename for the baseOutputPath
* <b>Warning</b>: when the baseOutputPath is a path that resolves
* outside of the final job output directory, the directory is created
* immediately and then persists through subsequent task retries, breaking
* the concept of output committing.
*/
@SuppressWarnings("unchecked")
public void write(KEYOUT key, VALUEOUT value, String baseOutputPath) throws IOException, InterruptedException {
checkBaseOutputPath(baseOutputPath);
if (jobOutputFormatContext == null) {
jobOutputFormatContext = new TaskAttemptContextImpl(context.getConfiguration(), context.getTaskAttemptID(), new WrappedStatusReporter(context));
}
getRecordWriter(jobOutputFormatContext, baseOutputPath).write(key, value);
}
use of org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl in project hadoop by apache.
the class TestLineRecordReader method testStripBOM.
@Test
public void testStripBOM() throws IOException {
// the test data contains a BOM at the start of the file
// confirm the BOM is skipped by LineRecordReader
String UTF8_BOM = "";
URL testFileUrl = getClass().getClassLoader().getResource("testBOM.txt");
assertNotNull("Cannot find testBOM.txt", testFileUrl);
File testFile = new File(testFileUrl.getFile());
Path testFilePath = new Path(testFile.getAbsolutePath());
long testFileSize = testFile.length();
Configuration conf = new Configuration();
conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
// read the data and check whether BOM is skipped
FileSplit split = new FileSplit(testFilePath, 0, testFileSize, (String[]) null);
LineRecordReader reader = new LineRecordReader();
reader.initialize(split, context);
int numRecords = 0;
boolean firstLine = true;
boolean skipBOM = true;
while (reader.nextKeyValue()) {
if (firstLine) {
firstLine = false;
if (reader.getCurrentValue().toString().startsWith(UTF8_BOM)) {
skipBOM = false;
}
}
++numRecords;
}
reader.close();
assertTrue("BOM is not skipped", skipBOM);
}
use of org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl in project hadoop by apache.
the class TestLineRecordReader method testUncompressedInputCustomDelimiterPosValue.
@Test
public void testUncompressedInputCustomDelimiterPosValue() throws Exception {
Configuration conf = new Configuration();
conf.setInt("io.file.buffer.size", 10);
conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
String inputData = "abcdefghij++kl++mno";
Path inputFile = createInputFile(conf, inputData);
String delimiter = "++";
byte[] recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
int splitLength = 15;
FileSplit split = new FileSplit(inputFile, 0, splitLength, (String[]) null);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
LineRecordReader reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
// Get first record: "abcdefghij"
assertTrue("Expected record got nothing", reader.nextKeyValue());
LongWritable key = reader.getCurrentKey();
Text value = reader.getCurrentValue();
assertEquals("Wrong length for record value", 10, value.getLength());
assertEquals("Wrong position after record read", 0, key.get());
// Get second record: "kl"
assertTrue("Expected record got nothing", reader.nextKeyValue());
assertEquals("Wrong length for record value", 2, value.getLength());
// Key should be 12 right after "abcdefghij++"
assertEquals("Wrong position after record read", 12, key.get());
// Get third record: "mno"
assertTrue("Expected record got nothing", reader.nextKeyValue());
assertEquals("Wrong length for record value", 3, value.getLength());
// Key should be 16 right after "abcdefghij++kl++"
assertEquals("Wrong position after record read", 16, key.get());
assertFalse(reader.nextKeyValue());
// Key should be 19 right after "abcdefghij++kl++mno"
assertEquals("Wrong position after record read", 19, key.get());
// after refresh should be empty
key = reader.getCurrentKey();
assertNull("Unexpected key returned", key);
reader.close();
split = new FileSplit(inputFile, splitLength, inputData.length() - splitLength, (String[]) null);
reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
// No record is in the second split because the second split dropped
// the first record, which was already reported by the first split.
assertFalse("Unexpected record returned", reader.nextKeyValue());
key = reader.getCurrentKey();
assertNull("Unexpected key returned", key);
reader.close();
// multi char delimiter with starting part of the delimiter in the data
inputData = "abcd+efgh++ijk++mno";
inputFile = createInputFile(conf, inputData);
splitLength = 5;
split = new FileSplit(inputFile, 0, splitLength, (String[]) null);
reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
// Get first record: "abcd+efgh"
assertTrue("Expected record got nothing", reader.nextKeyValue());
key = reader.getCurrentKey();
value = reader.getCurrentValue();
assertEquals("Wrong position after record read", 0, key.get());
assertEquals("Wrong length for record value", 9, value.getLength());
// should have jumped over the delimiter, no record
assertFalse(reader.nextKeyValue());
assertEquals("Wrong position after record read", 11, key.get());
// after refresh should be empty
key = reader.getCurrentKey();
assertNull("Unexpected key returned", key);
reader.close();
// next split: check for duplicate or dropped records
split = new FileSplit(inputFile, splitLength, inputData.length() - splitLength, (String[]) null);
reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
assertTrue("Expected record got nothing", reader.nextKeyValue());
key = reader.getCurrentKey();
value = reader.getCurrentValue();
// Get second record: "ijk" first in this split
assertEquals("Wrong position after record read", 11, key.get());
assertEquals("Wrong length for record value", 3, value.getLength());
// Get third record: "mno" second in this split
assertTrue("Expected record got nothing", reader.nextKeyValue());
assertEquals("Wrong position after record read", 16, key.get());
assertEquals("Wrong length for record value", 3, value.getLength());
// should be at the end of the input
assertFalse(reader.nextKeyValue());
assertEquals("Wrong position after record read", 19, key.get());
reader.close();
inputData = "abcd|efgh|+|ij|kl|+|mno|pqr";
inputFile = createInputFile(conf, inputData);
delimiter = "|+|";
recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
// of the ambiguous bytes of the delimiter
for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
for (int splitSize = 1; splitSize < inputData.length(); splitSize++) {
// track where we are in the inputdata
int keyPosition = 0;
conf.setInt("io.file.buffer.size", bufferSize);
split = new FileSplit(inputFile, 0, bufferSize, (String[]) null);
reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
// Get the first record: "abcd|efgh" always possible
assertTrue("Expected record got nothing", reader.nextKeyValue());
key = reader.getCurrentKey();
value = reader.getCurrentValue();
assertTrue("abcd|efgh".equals(value.toString()));
// Position should be 0 right at the start
assertEquals("Wrong position after record read", keyPosition, key.get());
// Position should be 12 right after the first "|+|"
keyPosition = 12;
// get the next record: "ij|kl" if the split/buffer allows it
if (reader.nextKeyValue()) {
// check the record info: "ij|kl"
assertTrue("ij|kl".equals(value.toString()));
assertEquals("Wrong position after record read", keyPosition, key.get());
// Position should be 20 after the second "|+|"
keyPosition = 20;
}
// get the third record: "mno|pqr" if the split/buffer allows it
if (reader.nextKeyValue()) {
// check the record info: "mno|pqr"
assertTrue("mno|pqr".equals(value.toString()));
assertEquals("Wrong position after record read", keyPosition, key.get());
// Position should be the end of the input
keyPosition = inputData.length();
}
assertFalse("Unexpected record returned", reader.nextKeyValue());
// no more records can be read we should be at the last position
assertEquals("Wrong position after record read", keyPosition, key.get());
// after refresh should be empty
key = reader.getCurrentKey();
assertNull("Unexpected key returned", key);
reader.close();
}
}
}
use of org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl in project hadoop by apache.
the class TestLineRecordReader method testSplitRecordsForFile.
private void testSplitRecordsForFile(Configuration conf, long firstSplitLength, long testFileSize, Path testFilePath) throws IOException {
conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
assertTrue("unexpected test data at " + testFilePath, testFileSize > firstSplitLength);
String delimiter = conf.get("textinputformat.record.delimiter");
byte[] recordDelimiterBytes = null;
if (null != delimiter) {
recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
}
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
// read the data without splitting to count the records
FileSplit split = new FileSplit(testFilePath, 0, testFileSize, (String[]) null);
LineRecordReader reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
int numRecordsNoSplits = 0;
while (reader.nextKeyValue()) {
++numRecordsNoSplits;
}
reader.close();
// count the records in the first split
split = new FileSplit(testFilePath, 0, firstSplitLength, (String[]) null);
reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
int numRecordsFirstSplit = 0;
while (reader.nextKeyValue()) {
++numRecordsFirstSplit;
}
reader.close();
// count the records in the second split
split = new FileSplit(testFilePath, firstSplitLength, testFileSize - firstSplitLength, (String[]) null);
reader = new LineRecordReader(recordDelimiterBytes);
reader.initialize(split, context);
int numRecordsRemainingSplits = 0;
while (reader.nextKeyValue()) {
++numRecordsRemainingSplits;
}
reader.close();
assertEquals("Unexpected number of records in split ", numRecordsNoSplits, numRecordsFirstSplit + numRecordsRemainingSplits);
}
use of org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl in project hadoop by apache.
the class TestCombineFileRecordReader method testProgressIsReportedIfInputASeriesOfEmptyFiles.
@SuppressWarnings("unchecked")
@Test
public void testProgressIsReportedIfInputASeriesOfEmptyFiles() throws IOException, InterruptedException {
JobConf conf = new JobConf();
Path[] paths = new Path[3];
File[] files = new File[3];
long[] fileLength = new long[3];
try {
for (int i = 0; i < 3; i++) {
File dir = new File(outDir.toString());
dir.mkdir();
files[i] = new File(dir, "testfile" + i);
FileWriter fileWriter = new FileWriter(files[i]);
fileWriter.flush();
fileWriter.close();
fileLength[i] = i;
paths[i] = new Path(outDir + "/testfile" + i);
}
CombineFileSplit combineFileSplit = new CombineFileSplit(paths, fileLength);
TaskAttemptID taskAttemptID = Mockito.mock(TaskAttemptID.class);
TaskReporter reporter = Mockito.mock(TaskReporter.class);
TaskAttemptContextImpl taskAttemptContext = new TaskAttemptContextImpl(conf, taskAttemptID, reporter);
CombineFileRecordReader cfrr = new CombineFileRecordReader(combineFileSplit, taskAttemptContext, TextRecordReaderWrapper.class);
cfrr.initialize(combineFileSplit, taskAttemptContext);
verify(reporter).progress();
Assert.assertFalse(cfrr.nextKeyValue());
verify(reporter, times(3)).progress();
} finally {
FileUtil.fullyDelete(new File(outDir.toString()));
}
}
Aggregations