Search in sources :

Example 1 with LZFOutputStream

use of com.ning.compress.lzf.LZFOutputStream in project camel by apache.

the class LZFDataFormat method marshal.

@Override
public void marshal(final Exchange exchange, final Object graph, final OutputStream stream) throws Exception {
    InputStream is = exchange.getContext().getTypeConverter().mandatoryConvertTo(InputStream.class, exchange, graph);
    OutputStream compressedOutput = parallelCompression ? new PLZFOutputStream(stream) : new LZFOutputStream(stream);
    try {
        IOHelper.copy(is, compressedOutput);
    } finally {
        // must close all input streams
        IOHelper.close(is, compressedOutput);
    }
}
Also used : PLZFOutputStream(com.ning.compress.lzf.parallel.PLZFOutputStream) LZFInputStream(com.ning.compress.lzf.LZFInputStream) InputStream(java.io.InputStream) OutputStream(java.io.OutputStream) LZFOutputStream(com.ning.compress.lzf.LZFOutputStream) PLZFOutputStream(com.ning.compress.lzf.parallel.PLZFOutputStream) LZFOutputStream(com.ning.compress.lzf.LZFOutputStream) PLZFOutputStream(com.ning.compress.lzf.parallel.PLZFOutputStream)

Example 2 with LZFOutputStream

use of com.ning.compress.lzf.LZFOutputStream in project cassandra by apache.

the class StreamWriter method write.

/**
     * Stream file of specified sections to given channel.
     *
     * StreamWriter uses LZF compression on wire to decrease size to transfer.
     *
     * @param output where this writes data to
     * @throws IOException on any I/O error
     */
public void write(DataOutputStreamPlus output) throws IOException {
    long totalSize = totalSize();
    logger.debug("[Stream #{}] Start streaming file {} to {}, repairedAt = {}, totalSize = {}", session.planId(), sstable.getFilename(), session.peer, sstable.getSSTableMetadata().repairedAt, totalSize);
    try (RandomAccessReader file = sstable.openDataReader();
        ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists() ? DataIntegrityMetadata.checksumValidator(sstable.descriptor) : null) {
        transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];
        // setting up data compression stream
        compressedOutput = new LZFOutputStream(output);
        long progress = 0L;
        // stream each of the required sections of the file
        for (Pair<Long, Long> section : sections) {
            long start = validator == null ? section.left : validator.chunkStart(section.left);
            int readOffset = (int) (section.left - start);
            // seek to the beginning of the section
            file.seek(start);
            if (validator != null)
                validator.seek(start);
            // length of the section to read
            long length = section.right - start;
            // tracks write progress
            long bytesRead = 0;
            while (bytesRead < length) {
                long lastBytesRead = write(file, validator, readOffset, length, bytesRead);
                bytesRead += lastBytesRead;
                progress += (lastBytesRead - readOffset);
                session.progress(sstable.descriptor.filenameFor(Component.DATA), ProgressInfo.Direction.OUT, progress, totalSize);
                readOffset = 0;
            }
            // make sure that current section is sent
            compressedOutput.flush();
        }
        logger.debug("[Stream #{}] Finished streaming file {} to {}, bytesTransferred = {}, totalSize = {}", session.planId(), sstable.getFilename(), session.peer, FBUtilities.prettyPrintMemory(progress), FBUtilities.prettyPrintMemory(totalSize));
    }
}
Also used : RandomAccessReader(org.apache.cassandra.io.util.RandomAccessReader) ChecksumValidator(org.apache.cassandra.io.util.DataIntegrityMetadata.ChecksumValidator) File(java.io.File) LZFOutputStream(com.ning.compress.lzf.LZFOutputStream)

Example 3 with LZFOutputStream

use of com.ning.compress.lzf.LZFOutputStream in project eiger by wlloyd.

the class FileStreamTask method stream.

/**
     * Stream file by it's sections specified by this.header
     * @throws IOException on any I/O error
     */
private void stream() throws IOException {
    ByteBuffer HeaderBuffer = MessagingService.instance().constructStreamHeader(header, false, Gossiper.instance.getVersion(to));
    // write header (this should not be compressed for compatibility with other messages)
    output.write(ByteBufferUtil.getArray(HeaderBuffer));
    if (header.file == null)
        return;
    // TODO just use a raw RandomAccessFile since we're managing our own buffer here
    RandomAccessReader file = // try to skip kernel page cache if possible
    (header.file.sstable.compression) ? CompressedRandomAccessReader.open(header.file.getFilename(), header.file.sstable.getCompressionMetadata(), true) : RandomAccessReader.open(new File(header.file.getFilename()), true);
    // setting up data compression stream
    compressedoutput = new LZFOutputStream(output);
    MessagingService.instance().incrementActiveStreamsOutbound();
    try {
        // stream each of the required sections of the file
        for (Pair<Long, Long> section : header.file.sections) {
            // seek to the beginning of the section
            file.seek(section.left);
            // length of the section to stream
            long length = section.right - section.left;
            // tracks write progress
            long bytesTransferred = 0;
            while (bytesTransferred < length) {
                long lastWrite = write(file, length, bytesTransferred);
                bytesTransferred += lastWrite;
                // store streaming progress
                header.file.progress += lastWrite;
            }
            // make sure that current section is send
            compressedoutput.flush();
            if (logger.isDebugEnabled())
                logger.debug("Bytes transferred " + bytesTransferred + "/" + header.file.size);
        }
        // receive reply confirmation
        receiveReply();
    } finally {
        MessagingService.instance().decrementActiveStreamsOutbound();
        // no matter what happens close file
        FileUtils.closeQuietly(file);
    }
}
Also used : CompressedRandomAccessReader(org.apache.cassandra.io.compress.CompressedRandomAccessReader) RandomAccessReader(org.apache.cassandra.io.util.RandomAccessReader) ByteBuffer(java.nio.ByteBuffer) LZFOutputStream(com.ning.compress.lzf.LZFOutputStream)

Example 4 with LZFOutputStream

use of com.ning.compress.lzf.LZFOutputStream in project GeoGig by boundlessgeo.

the class AbstractObjectDatabase method writeObject.

protected void writeObject(RevObject object, OutputStream target) {
    ObjectWriter<RevObject> writer = serializationFactory.createObjectWriter(object.getType());
    LZFOutputStream cOut = new LZFOutputStream(target);
    try {
        writer.write(object, cOut);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    } finally {
        try {
            cOut.flush();
            cOut.close();
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }
    }
// int size = ((ByteArrayOutputStream) target).size();
// System.err.printf("%d,%s,%s\n", size, object.getId(), object.getType());
}
Also used : RevObject(org.locationtech.geogig.api.RevObject) IOException(java.io.IOException) IOException(java.io.IOException) LZFOutputStream(com.ning.compress.lzf.LZFOutputStream)

Example 5 with LZFOutputStream

use of com.ning.compress.lzf.LZFOutputStream in project GeoGig by boundlessgeo.

the class MongoObjectDatabase method toBytes.

private byte[] toBytes(RevObject object) {
    ObjectWriter<RevObject> writer = serializers.createObjectWriter(object.getType());
    ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
    LZFOutputStream cOut = new LZFOutputStream(byteStream);
    try {
        writer.write(object, cOut);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    try {
        cOut.close();
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
    return byteStream.toByteArray();
}
Also used : RevObject(org.locationtech.geogig.api.RevObject) ByteArrayOutputStream(java.io.ByteArrayOutputStream) IOException(java.io.IOException) LZFOutputStream(com.ning.compress.lzf.LZFOutputStream)

Aggregations

LZFOutputStream (com.ning.compress.lzf.LZFOutputStream)5 IOException (java.io.IOException)2 RandomAccessReader (org.apache.cassandra.io.util.RandomAccessReader)2 RevObject (org.locationtech.geogig.api.RevObject)2 LZFInputStream (com.ning.compress.lzf.LZFInputStream)1 PLZFOutputStream (com.ning.compress.lzf.parallel.PLZFOutputStream)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 File (java.io.File)1 InputStream (java.io.InputStream)1 OutputStream (java.io.OutputStream)1 ByteBuffer (java.nio.ByteBuffer)1 CompressedRandomAccessReader (org.apache.cassandra.io.compress.CompressedRandomAccessReader)1 ChecksumValidator (org.apache.cassandra.io.util.DataIntegrityMetadata.ChecksumValidator)1