use of org.apache.hadoop.io.compress.lz4.Lz4Decompressor in project hadoop by apache.
the class TestLz4CompressorDecompressor method testDecompressorSetInputNullPointerException.
//test on NullPointerException in {@code decompressor.setInput()}
@Test
public void testDecompressorSetInputNullPointerException() {
try {
Lz4Decompressor decompressor = new Lz4Decompressor();
decompressor.setInput(null, 0, 10);
fail("testDecompressorSetInputNullPointerException error !!!");
} catch (NullPointerException ex) {
// expected
} catch (Exception e) {
fail("testDecompressorSetInputNullPointerException ex error !!!");
}
}
use of org.apache.hadoop.io.compress.lz4.Lz4Decompressor in project presto by prestodb.
the class ParquetCompressionUtils method decompressLz4.
private static Slice decompressLz4(Slice input, int uncompressedSize) {
byte[] buffer = new byte[uncompressedSize];
decompress(new Lz4Decompressor(), input, 0, input.length(), buffer, 0);
return wrappedBuffer(buffer);
}
use of org.apache.hadoop.io.compress.lz4.Lz4Decompressor in project presto by prestodb.
the class PageFileWriterFactory method createPagesSerdeForPageFile.
public static PagesSerde createPagesSerdeForPageFile(BlockEncodingSerde blockEncodingSerde, Optional<HiveCompressionCodec> compressionCodec) {
if (!compressionCodec.isPresent()) {
return new PagesSerde(blockEncodingSerde, Optional.empty(), Optional.empty(), Optional.empty());
}
PageCompressor pageCompressor = null;
PageDecompressor pageDecompressor = null;
switch(compressionCodec.get()) {
case NONE:
break;
case SNAPPY:
pageCompressor = new AirliftCompressorAdapter(new SnappyCompressor());
pageDecompressor = new AirliftDecompressorAdapter(new SnappyDecompressor());
break;
case LZ4:
pageCompressor = new AirliftCompressorAdapter(new Lz4Compressor());
pageDecompressor = new AirliftDecompressorAdapter(new Lz4Decompressor());
break;
case GZIP:
pageCompressor = new AirliftCompressorAdapter(new DeflateCompressor(OptionalInt.empty()));
pageDecompressor = new AirliftDecompressorAdapter(new InflateDecompressor());
break;
case ZSTD:
pageCompressor = new AirliftCompressorAdapter(new ZstdJniCompressor(OptionalInt.empty()));
pageDecompressor = new AirliftDecompressorAdapter(new ZstdJniDecompressor());
break;
default:
throw new PrestoException(GENERIC_INTERNAL_ERROR, format("%s compression is not supported for %s", compressionCodec.get().name(), PAGEFILE.getOutputFormat()));
}
return new PagesSerde(blockEncodingSerde, Optional.ofNullable(pageCompressor), Optional.ofNullable(pageDecompressor), Optional.empty());
}
use of org.apache.hadoop.io.compress.lz4.Lz4Decompressor in project hadoop by apache.
the class TestCompressorDecompressor method testCompressorDecompressor.
@Test
public void testCompressorDecompressor() {
// no more for this data
int SIZE = 44 * 1024;
byte[] rawData = generate(SIZE);
try {
CompressDecompressTester.of(rawData).withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor()).withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor()).withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater()).withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK, CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK, CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS, CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM)).test();
} catch (Exception ex) {
GenericTestUtils.assertExceptionContains("testCompressorDecompressor error !!!", ex);
}
}
use of org.apache.hadoop.io.compress.lz4.Lz4Decompressor in project hadoop by apache.
the class TestLz4CompressorDecompressor method testDecompressorSetInputAIOUBException.
//test on ArrayIndexOutOfBoundsException in {@code decompressor.setInput()}
@Test
public void testDecompressorSetInputAIOUBException() {
try {
Lz4Decompressor decompressor = new Lz4Decompressor();
decompressor.setInput(new byte[] {}, -5, 10);
fail("testDecompressorSetInputAIOBException error !!!");
} catch (ArrayIndexOutOfBoundsException ex) {
// expected
} catch (Exception e) {
fail("testDecompressorSetInputAIOBException ex error !!!");
}
}
Aggregations