use of io.airlift.compress.lz4.Lz4Compressor in project hadoop by apache.
the class TestCompressorDecompressor method testCompressorDecompressorWithExeedBufferLimit.
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
int BYTE_SIZE = 100 * 1024;
byte[] rawData = generate(BYTE_SIZE);
try {
CompressDecompressTester.of(rawData).withCompressDecompressPair(new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2), new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2)).withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE), new Lz4Decompressor(BYTE_SIZE)).withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK, CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK, CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS, CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM)).test();
} catch (Exception ex) {
GenericTestUtils.assertExceptionContains("testCompressorDecompressorWithExeedBufferLimit error !!!", ex);
}
}
use of io.airlift.compress.lz4.Lz4Compressor in project hadoop by apache.
the class TestLz4CompressorDecompressor method testCompressorDecompressorEmptyStreamLogic.
// test compress/decompress with empty stream
@Test
public void testCompressorDecompressorEmptyStreamLogic() {
ByteArrayInputStream bytesIn = null;
ByteArrayOutputStream bytesOut = null;
byte[] buf = null;
BlockDecompressorStream blockDecompressorStream = null;
try {
// compress empty stream
bytesOut = new ByteArrayOutputStream();
BlockCompressorStream blockCompressorStream = new BlockCompressorStream(bytesOut, new Lz4Compressor(), 1024, 0);
// close without write
blockCompressorStream.close();
// check compressed output
buf = bytesOut.toByteArray();
assertEquals("empty stream compressed output size != 4", 4, buf.length);
// use compressed output as input for decompression
bytesIn = new ByteArrayInputStream(buf);
// create decompression stream
blockDecompressorStream = new BlockDecompressorStream(bytesIn, new Lz4Decompressor(), 1024);
// no byte is available because stream was closed
assertEquals("return value is not -1", -1, blockDecompressorStream.read());
} catch (Exception e) {
fail("testCompressorDecompressorEmptyStreamLogic ex error !!!" + e.getMessage());
} finally {
if (blockDecompressorStream != null)
try {
bytesIn.close();
bytesOut.close();
blockDecompressorStream.close();
} catch (IOException e) {
}
}
}
use of io.airlift.compress.lz4.Lz4Compressor in project hadoop by apache.
the class TestLz4CompressorDecompressor method testCompressorDecopressorLogicWithCompressionStreams.
// test compress/decompress process through CompressionOutputStream/CompressionInputStream api
@Test
public void testCompressorDecopressorLogicWithCompressionStreams() {
DataOutputStream deflateOut = null;
DataInputStream inflateIn = null;
int BYTE_SIZE = 1024 * 100;
byte[] bytes = generate(BYTE_SIZE);
int bufferSize = 262144;
int compressionOverhead = (bufferSize / 6) + 32;
try {
DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
CompressionOutputStream deflateFilter = new BlockCompressorStream(compressedDataBuffer, new Lz4Compressor(bufferSize), bufferSize, compressionOverhead);
deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
deflateOut.write(bytes, 0, bytes.length);
deflateOut.flush();
deflateFilter.finish();
DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0, compressedDataBuffer.getLength());
CompressionInputStream inflateFilter = new BlockDecompressorStream(deCompressedDataBuffer, new Lz4Decompressor(bufferSize), bufferSize);
inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter));
byte[] result = new byte[BYTE_SIZE];
inflateIn.read(result);
assertArrayEquals("original array not equals compress/decompressed array", result, bytes);
} catch (IOException e) {
fail("testLz4CompressorDecopressorLogicWithCompressionStreams ex error !!!");
} finally {
try {
if (deflateOut != null)
deflateOut.close();
if (inflateIn != null)
inflateIn.close();
} catch (Exception e) {
}
}
}
use of io.airlift.compress.lz4.Lz4Compressor in project hadoop by apache.
the class TestLz4CompressorDecompressor method testCompressDecompress.
// test compress/decompress process
@Test
public void testCompressDecompress() {
int BYTE_SIZE = 1024 * 54;
byte[] bytes = generate(BYTE_SIZE);
Lz4Compressor compressor = new Lz4Compressor();
try {
compressor.setInput(bytes, 0, bytes.length);
assertTrue("Lz4CompressDecompress getBytesRead error !!!", compressor.getBytesRead() > 0);
assertTrue("Lz4CompressDecompress getBytesWritten before compress error !!!", compressor.getBytesWritten() == 0);
byte[] compressed = new byte[BYTE_SIZE];
int cSize = compressor.compress(compressed, 0, compressed.length);
assertTrue("Lz4CompressDecompress getBytesWritten after compress error !!!", compressor.getBytesWritten() > 0);
Lz4Decompressor decompressor = new Lz4Decompressor();
// set as input for decompressor only compressed data indicated with cSize
decompressor.setInput(compressed, 0, cSize);
byte[] decompressed = new byte[BYTE_SIZE];
decompressor.decompress(decompressed, 0, decompressed.length);
assertTrue("testLz4CompressDecompress finished error !!!", decompressor.finished());
assertArrayEquals(bytes, decompressed);
compressor.reset();
decompressor.reset();
assertTrue("decompressor getRemaining error !!!", decompressor.getRemaining() == 0);
} catch (Exception e) {
fail("testLz4CompressDecompress ex error!!!");
}
}
use of io.airlift.compress.lz4.Lz4Compressor in project SilverKing by Morgan-Stanley.
the class LZ4 method compress.
public byte[] compress(byte[] rawValue, int offset, int length) throws IOException {
LZ4Compressor compressor;
int maxCompressedLength;
byte[] compressed;
int compressedLength;
byte[] buf;
compressor = factory.fastCompressor();
maxCompressedLength = compressor.maxCompressedLength(length);
compressed = new byte[maxCompressedLength];
compressedLength = compressor.compress(rawValue, 0, length, compressed, 0, maxCompressedLength);
buf = new byte[compressedLength];
System.arraycopy(compressed, 0, buf, 0, compressedLength);
// FUTURE - eliminate the copy
return buf;
}
Aggregations