use of org.apache.hadoop.mapreduce.lib.input.CompressedSplitLineReader in project hydrator-plugins by cdapio.
the class CharsetTransformingLineRecordReaderTest method setUpRecordReaderForTest.
public void setUpRecordReaderForTest(SplitCompressionInputStream splitCompressionInputStream) throws IOException {
// Set up record reader to assume we'll read the file from the beggining, and the partition size is 32 bytes
// which is 8 characters in UTF-32, meaning we expect to read the first 2 lines for this partition.
recordReader = spy(new CharsetTransformingLineRecordReader(fixedLengthCharset, null, new CompressedSplitLineReader(splitCompressionInputStream, conf, null), 0, 0, 32, 4096));
// We will calculate position based on the number of bytes consumed from the input stream.
doAnswer(a -> (long) availableBytes - inputStream.available()).when(recordReader).getFilePosition();
}
use of org.apache.hadoop.mapreduce.lib.input.CompressedSplitLineReader in project hydrator-plugins by cdapio.
the class CharsetTransformingLineRecordReader method initialize.
/**
* Initialize method from parent class, simplified for this our use case from the base class.
*
* @param genericSplit File Split
* @param context Execution context
* @throws IOException if the underlying file or decompression operations fail.
*/
public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException {
FileSplit split = (FileSplit) genericSplit;
Configuration job = context.getConfiguration();
this.maxLineLength = job.getInt(MAX_LINE_LENGTH, Integer.MAX_VALUE);
start = split.getStart();
end = start + split.getLength();
final Path file = split.getPath();
// open the file and seek to the start of the split
final FileSystem fs = file.getFileSystem(job);
FSDataInputStream fileIn = fs.open(file);
SplittableCompressionCodec codec = new FixedLengthCharsetTransformingCodec(fixedLengthCharset);
decompressor = codec.createDecompressor();
final SplitCompressionInputStream cIn = codec.createInputStream(fileIn, decompressor, start, end, SplittableCompressionCodec.READ_MODE.CONTINUOUS);
in = new CompressedSplitLineReader(cIn, job, this.recordDelimiterBytes);
start = cIn.getAdjustedStart();
end = cIn.getAdjustedEnd();
filePosition = cIn;
// next() method.
if (start != 0) {
Text t = new Text();
start += in.readLine(t, 4096, Integer.MAX_VALUE);
LOG.info("Discarded line: " + t.toString());
}
this.pos = start;
}
use of org.apache.hadoop.mapreduce.lib.input.CompressedSplitLineReader in project shifu by ShifuML.
the class CombineRecordReader method initializeOne.
public void initializeOne(FileSplit split, TaskAttemptContext context) throws IOException {
Configuration job = context.getConfiguration();
this.maxLineLength = job.getInt(MAX_LINE_LENGTH, Integer.MAX_VALUE);
start = split.getStart();
end = start + split.getLength();
final Path file = split.getPath();
// open the file and seek to the start of the split
final FileSystem fs = file.getFileSystem(job);
fileIn = fs.open(file);
CompressionCodec codec = new CompressionCodecFactory(job).getCodec(file);
if (null != codec) {
isCompressedInput = true;
decompressor = CodecPool.getDecompressor(codec);
if (codec instanceof SplittableCompressionCodec) {
final SplitCompressionInputStream cIn = ((SplittableCompressionCodec) codec).createInputStream(fileIn, decompressor, start, end, SplittableCompressionCodec.READ_MODE.BYBLOCK);
in = new CompressedSplitLineReader(cIn, job, this.recordDelimiterBytes);
start = cIn.getAdjustedStart();
end = cIn.getAdjustedEnd();
filePosition = cIn;
} else {
in = new SplitLineReader(codec.createInputStream(fileIn, decompressor), job, this.recordDelimiterBytes);
filePosition = fileIn;
}
} else {
fileIn.seek(start);
in = new SplitLineReader(fileIn, job, this.recordDelimiterBytes);
filePosition = fileIn;
}
// next() method.
if (start != 0) {
start += in.readLine(new Text(), 0, maxBytesToConsume(start));
}
this.pos = start;
}
Aggregations