use of org.apache.hadoop.io.compress.lz4.Lz4Compressor in project chunkstories by Hugobros3.
the class ChunkHolderImplementation method compressChunkData.
/**
* This method is called assumming the chunk is well-locked
*/
private CompressedData compressChunkData(final CubicChunk chunk) {
final int changesTakenIntoAccount = chunk.compr_uncomittedBlockModifications.get();
// Stage 1: Compress the actual voxel data
byte[] voxelCompressedData;
if (!chunk.isAirChunk()) {
// Heuristic value for the size of the buffer: fixed voxel size + factor of components & entities
// + chunk.voxelComponents.size() * 1024 + chunk.localEntities.size() * 2048;
int uncompressedStuffBufferSize = 32 * 32 * 32 * 4;
ByteBuffer uncompressedStuff = MemoryUtil.memAlloc(uncompressedStuffBufferSize);
uncompressedStuff.asIntBuffer().put(chunk.chunkVoxelData);
// uncompressedStuff.flip();
ByteBuffer compressedStuff = MemoryUtil.memAlloc(uncompressedStuffBufferSize + 2048);
LZ4Compressor compressor = factory.fastCompressor();
compressor.compress(uncompressedStuff, compressedStuff);
// No longer need that buffer
MemoryUtil.memFree(uncompressedStuff);
// Make a Java byte[] array to put the final stuff in
voxelCompressedData = new byte[compressedStuff.position()];
compressedStuff.flip();
compressedStuff.get(voxelCompressedData);
// No longer need that buffer either
MemoryUtil.memFree(compressedStuff);
} else {
// Just use a symbolic null here
voxelCompressedData = null;
}
// Stage 2: Take care of the voxel components
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream daos = new DataOutputStream(baos);
// ByteBuffer smallBuffer = MemoryUtil.memAlloc(4096);
// byte[] smallArray = new byte[4096];
// ByteBufferOutputStream bbos = new ByteBufferOutputStream(smallBuffer);
ByteArrayOutputStream bbos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bbos);
try {
// For all cells that have components
for (CellComponentsHolder voxelComponents : chunk.allCellComponents.values()) {
// Write a 1 then their in-chunk index
daos.writeByte((byte) 0x01);
daos.writeInt(voxelComponents.getIndex());
// For all components in this cell
for (Entry<String, VoxelComponent> entry : voxelComponents.all()) {
// Write component name
daos.writeUTF(entry.getKey());
// Push the component in the temporary buffer
entry.getValue().push(region.handler, dos);
// smallBuffer.flip();
byte[] bytesPushed = bbos.toByteArray();
bbos.reset();
// Write how many bytes the temporary buffer now contains
// int bytesPushed = smallBuffer.limit();
daos.writeShort(bytesPushed.length);
// Get those bytes as an array then write it in the compressed stuff
// smallBuffer.get(smallArray);
daos.write(bytesPushed, 0, bytesPushed.length);
// Reset the temporary buffer
// smallBuffer.clear();
}
daos.writeUTF("\n");
}
// Write the final 00, so to be clear we are done with voxel components
daos.writeByte((byte) 0x00);
// Since we output to a local buffer, any failure is viewed as catastrophic
} catch (IOException e) {
assert false;
}
// Extract the byte array from the baos
byte[] voxelComponentsData = baos.toByteArray();
// MemoryUtil.memFree(smallBuffer);
// Stage 3: Compress entities
baos.reset();
for (Entity entity : chunk.localEntities) {
// Don't save controllable entities
if (entity.exists() && !(entity instanceof EntityUnsaveable && !((EntityUnsaveable) entity).shouldSaveIntoRegion())) {
EntitySerializer.writeEntityToStream(daos, region.handler, entity);
}
}
EntitySerializer.writeEntityToStream(daos, region.handler, null);
byte[] entityData = baos.toByteArray();
// Remove whatever modifications existed when the method started, this is for avoiding concurrent modifications not being taken into account
chunk.compr_uncomittedBlockModifications.addAndGet(-changesTakenIntoAccount);
return new CompressedData(voxelCompressedData, voxelComponentsData, entityData);
}
use of org.apache.hadoop.io.compress.lz4.Lz4Compressor in project vespa by vespa-engine.
the class BasicPacket method encodeAndCompressBody.
protected void encodeAndCompressBody(ByteBuffer buffer, int startPosition) {
int startOfBody = buffer.position();
encodeBody(buffer);
setEncodedBody(buffer, startOfBody, buffer.position() - startOfBody);
length = buffer.position() - startPosition;
if (compressionLimit != 0 && length - 4 > compressionLimit) {
byte[] compressedBody;
compressionType = CompressionType.LZ4;
LZ4Factory factory = LZ4Factory.fastestInstance();
LZ4Compressor compressor = factory.fastCompressor();
compressedBody = compressor.compress(encodedBody);
log.log(LogLevel.DEBUG, "Uncompressed size: " + encodedBody.length + ", Compressed size: " + compressedBody.length);
if (compressedBody.length + 4 < encodedBody.length) {
buffer.position(startPosition);
// +4 for compressed size
buffer.putInt(compressedBody.length + startOfBody - startPosition + 4 - 4);
buffer.putInt(getCompressedCode(compressionType));
buffer.position(startOfBody);
buffer.putInt(encodedBody.length);
buffer.put(compressedBody);
buffer.limit(buffer.position());
return;
}
}
// Encoded length 4 less than actual length
buffer.putInt(startPosition, length - 4);
buffer.limit(buffer.position());
}
use of org.apache.hadoop.io.compress.lz4.Lz4Compressor in project vespa by vespa-engine.
the class NormalSketch method onSerialize.
/**
* Serializes the Sketch.
*
* Serialization format
* ==================
* Original size: 4 bytes
* Compressed size: 4 bytes
* Compressed data: N * 1 bytes
*
* Invariant:
* compressed size <= original size
*
* Special case:
* compressed size == original size => data is uncompressed
*
* @param buf Serializer
*/
@Override
protected void onSerialize(Serializer buf) {
super.onSerialize(buf);
buf.putInt(null, data.length);
try {
LZ4Compressor c = LZ4Factory.safeInstance().highCompressor();
byte[] compressedData = new byte[data.length];
int compressedSize = c.compress(data, compressedData);
serializeDataArray(compressedData, compressedSize, buf);
} catch (LZ4Exception e) {
// LZ4Compressor.compress will throw this exception if it is unable to compress
// into compressedData (when compressed size >= original size)
serializeDataArray(data, data.length, buf);
}
}
use of org.apache.hadoop.io.compress.lz4.Lz4Compressor in project presto by prestodb.
the class TestParquetCompressionUtils method performTest.
private void performTest(CompressionCodecName codec, int inputLength) throws IOException {
Compressor compressor = null;
if (codec.equals(ZSTD)) {
compressor = new ZstdCompressor();
} else if (codec.equals(LZ4)) {
compressor = new Lz4Compressor();
}
byte[] input = createArray(inputLength);
byte[] output = new byte[inputLength + 512];
int retLength = compress(compressor, input, inputLength, output, 0);
Slice decompressedSlice = ParquetCompressionUtils.decompress(codec, wrappedBuffer(output, 0, retLength), inputLength);
assertEquals(decompressedSlice, wrappedBuffer(input));
}
use of org.apache.hadoop.io.compress.lz4.Lz4Compressor in project presto by prestodb.
the class PageFileWriterFactory method createPagesSerdeForPageFile.
public static PagesSerde createPagesSerdeForPageFile(BlockEncodingSerde blockEncodingSerde, Optional<HiveCompressionCodec> compressionCodec) {
if (!compressionCodec.isPresent()) {
return new PagesSerde(blockEncodingSerde, Optional.empty(), Optional.empty(), Optional.empty());
}
PageCompressor pageCompressor = null;
PageDecompressor pageDecompressor = null;
switch(compressionCodec.get()) {
case NONE:
break;
case SNAPPY:
pageCompressor = new AirliftCompressorAdapter(new SnappyCompressor());
pageDecompressor = new AirliftDecompressorAdapter(new SnappyDecompressor());
break;
case LZ4:
pageCompressor = new AirliftCompressorAdapter(new Lz4Compressor());
pageDecompressor = new AirliftDecompressorAdapter(new Lz4Decompressor());
break;
case GZIP:
pageCompressor = new AirliftCompressorAdapter(new DeflateCompressor(OptionalInt.empty()));
pageDecompressor = new AirliftDecompressorAdapter(new InflateDecompressor());
break;
case ZSTD:
pageCompressor = new AirliftCompressorAdapter(new ZstdJniCompressor(OptionalInt.empty()));
pageDecompressor = new AirliftDecompressorAdapter(new ZstdJniDecompressor());
break;
default:
throw new PrestoException(GENERIC_INTERNAL_ERROR, format("%s compression is not supported for %s", compressionCodec.get().name(), PAGEFILE.getOutputFormat()));
}
return new PagesSerde(blockEncodingSerde, Optional.ofNullable(pageCompressor), Optional.ofNullable(pageDecompressor), Optional.empty());
}
Aggregations