use of org.apache.hadoop.io.compress.Decompressor in project hadoop by apache.
the class CompressionEmulationUtil method getPossiblyDecompressedInputStream.
/**
* Returns a {@link InputStream} for a file that might be compressed.
*/
static InputStream getPossiblyDecompressedInputStream(Path file, Configuration conf, long offset) throws IOException {
FileSystem fs = file.getFileSystem(conf);
if (isCompressionEmulationEnabled(conf) && isInputCompressionEmulationEnabled(conf)) {
CompressionCodecFactory compressionCodecs = new CompressionCodecFactory(conf);
CompressionCodec codec = compressionCodecs.getCodec(file);
if (codec != null) {
Decompressor decompressor = CodecPool.getDecompressor(codec);
if (decompressor != null) {
CompressionInputStream in = codec.createInputStream(fs.open(file), decompressor);
// Use SplittableCompressionCodec?
return (InputStream) in;
}
}
}
FSDataInputStream in = fs.open(file);
in.seek(offset);
return (InputStream) in;
}
use of org.apache.hadoop.io.compress.Decompressor in project hbase by apache.
the class CellBlockBuilder method decompress.
private ByteBuffer decompress(CompressionCodec compressor, InputStream cellBlockStream, int osInitialSize) throws IOException {
// GZIPCodec fails w/ NPE if no configuration.
if (compressor instanceof Configurable) {
((Configurable) compressor).setConf(this.conf);
}
Decompressor poolDecompressor = CodecPool.getDecompressor(compressor);
CompressionInputStream cis = compressor.createInputStream(cellBlockStream, poolDecompressor);
ByteBufferOutputStream bbos;
try {
// TODO: This is ugly. The buffer will be resized on us if we guess wrong.
// TODO: Reuse buffers.
bbos = new ByteBufferOutputStream(osInitialSize);
IOUtils.copy(cis, bbos);
bbos.close();
return bbos.getByteBuffer();
} finally {
CodecPool.returnDecompressor(poolDecompressor);
}
}
use of org.apache.hadoop.io.compress.Decompressor in project hbase by apache.
the class Compression method decompress.
/**
* Decompresses data from the given stream using the configured compression
* algorithm. It will throw an exception if the dest buffer does not have
* enough space to hold the decompressed data.
*
* @param dest
* the output bytes buffer
* @param destOffset
* start writing position of the output buffer
* @param bufferedBoundedStream
* a stream to read compressed data from, bounded to the exact amount
* of compressed data
* @param compressedSize
* compressed data size, header not included
* @param uncompressedSize
* uncompressed data size, header not included
* @param compressAlgo
* compression algorithm used
* @throws IOException
*/
public static void decompress(byte[] dest, int destOffset, InputStream bufferedBoundedStream, int compressedSize, int uncompressedSize, Compression.Algorithm compressAlgo) throws IOException {
if (dest.length - destOffset < uncompressedSize) {
throw new IllegalArgumentException("Output buffer does not have enough space to hold " + uncompressedSize + " decompressed bytes, available: " + (dest.length - destOffset));
}
Decompressor decompressor = null;
try {
decompressor = compressAlgo.getDecompressor();
InputStream is = compressAlgo.createDecompressionStream(bufferedBoundedStream, decompressor, 0);
IOUtils.readFully(is, dest, destOffset, uncompressedSize);
is.close();
} finally {
if (decompressor != null) {
compressAlgo.returnDecompressor(decompressor);
}
}
}
use of org.apache.hadoop.io.compress.Decompressor in project hive by apache.
the class CodecPool method getDecompressor.
/**
* Get a {@link Decompressor} for the given {@link CompressionCodec} from the
* pool or a new one.
*
* @param codec
* the <code>CompressionCodec</code> for which to get the
* <code>Decompressor</code>
* @return <code>Decompressor</code> for the given
* <code>CompressionCodec</code> the pool or a new one
*/
public static Decompressor getDecompressor(CompressionCodec codec) {
Decompressor decompressor = borrow(DECOMPRESSOR_POOL, codec.getDecompressorType());
if (decompressor == null) {
decompressor = codec.createDecompressor();
LOG.info("Got brand-new decompressor");
} else {
LOG.debug("Got recycled decompressor");
}
return decompressor;
}
use of org.apache.hadoop.io.compress.Decompressor in project hadoop by apache.
the class TestZlibCompressorDecompressor method testZlibCompressorDecompressorSetDictionary.
@Test
public void testZlibCompressorDecompressorSetDictionary() {
Configuration conf = new Configuration();
if (ZlibFactory.isNativeZlibLoaded(conf)) {
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
checkSetDictionaryNullPointerException(zlibCompressor);
checkSetDictionaryNullPointerException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
} else {
assertTrue("ZlibFactory is using native libs against request", ZlibFactory.isNativeZlibLoaded(conf));
}
}
Aggregations