use of com.ning.compress.lzf.LZFOutputStream in project camel by apache.
the class LZFDataFormat method marshal.
@Override
public void marshal(final Exchange exchange, final Object graph, final OutputStream stream) throws Exception {
InputStream is = exchange.getContext().getTypeConverter().mandatoryConvertTo(InputStream.class, exchange, graph);
OutputStream compressedOutput = parallelCompression ? new PLZFOutputStream(stream) : new LZFOutputStream(stream);
try {
IOHelper.copy(is, compressedOutput);
} finally {
// must close all input streams
IOHelper.close(is, compressedOutput);
}
}
use of com.ning.compress.lzf.LZFOutputStream in project cassandra by apache.
the class StreamWriter method write.
/**
* Stream file of specified sections to given channel.
*
* StreamWriter uses LZF compression on wire to decrease size to transfer.
*
* @param output where this writes data to
* @throws IOException on any I/O error
*/
public void write(DataOutputStreamPlus output) throws IOException {
long totalSize = totalSize();
logger.debug("[Stream #{}] Start streaming file {} to {}, repairedAt = {}, totalSize = {}", session.planId(), sstable.getFilename(), session.peer, sstable.getSSTableMetadata().repairedAt, totalSize);
try (RandomAccessReader file = sstable.openDataReader();
ChecksumValidator validator = new File(sstable.descriptor.filenameFor(Component.CRC)).exists() ? DataIntegrityMetadata.checksumValidator(sstable.descriptor) : null) {
transferBuffer = validator == null ? new byte[DEFAULT_CHUNK_SIZE] : new byte[validator.chunkSize];
// setting up data compression stream
compressedOutput = new LZFOutputStream(output);
long progress = 0L;
// stream each of the required sections of the file
for (Pair<Long, Long> section : sections) {
long start = validator == null ? section.left : validator.chunkStart(section.left);
int readOffset = (int) (section.left - start);
// seek to the beginning of the section
file.seek(start);
if (validator != null)
validator.seek(start);
// length of the section to read
long length = section.right - start;
// tracks write progress
long bytesRead = 0;
while (bytesRead < length) {
long lastBytesRead = write(file, validator, readOffset, length, bytesRead);
bytesRead += lastBytesRead;
progress += (lastBytesRead - readOffset);
session.progress(sstable.descriptor.filenameFor(Component.DATA), ProgressInfo.Direction.OUT, progress, totalSize);
readOffset = 0;
}
// make sure that current section is sent
compressedOutput.flush();
}
logger.debug("[Stream #{}] Finished streaming file {} to {}, bytesTransferred = {}, totalSize = {}", session.planId(), sstable.getFilename(), session.peer, FBUtilities.prettyPrintMemory(progress), FBUtilities.prettyPrintMemory(totalSize));
}
}
use of com.ning.compress.lzf.LZFOutputStream in project eiger by wlloyd.
the class FileStreamTask method stream.
/**
* Stream file by it's sections specified by this.header
* @throws IOException on any I/O error
*/
private void stream() throws IOException {
ByteBuffer HeaderBuffer = MessagingService.instance().constructStreamHeader(header, false, Gossiper.instance.getVersion(to));
// write header (this should not be compressed for compatibility with other messages)
output.write(ByteBufferUtil.getArray(HeaderBuffer));
if (header.file == null)
return;
// TODO just use a raw RandomAccessFile since we're managing our own buffer here
RandomAccessReader file = // try to skip kernel page cache if possible
(header.file.sstable.compression) ? CompressedRandomAccessReader.open(header.file.getFilename(), header.file.sstable.getCompressionMetadata(), true) : RandomAccessReader.open(new File(header.file.getFilename()), true);
// setting up data compression stream
compressedoutput = new LZFOutputStream(output);
MessagingService.instance().incrementActiveStreamsOutbound();
try {
// stream each of the required sections of the file
for (Pair<Long, Long> section : header.file.sections) {
// seek to the beginning of the section
file.seek(section.left);
// length of the section to stream
long length = section.right - section.left;
// tracks write progress
long bytesTransferred = 0;
while (bytesTransferred < length) {
long lastWrite = write(file, length, bytesTransferred);
bytesTransferred += lastWrite;
// store streaming progress
header.file.progress += lastWrite;
}
// make sure that current section is send
compressedoutput.flush();
if (logger.isDebugEnabled())
logger.debug("Bytes transferred " + bytesTransferred + "/" + header.file.size);
}
// receive reply confirmation
receiveReply();
} finally {
MessagingService.instance().decrementActiveStreamsOutbound();
// no matter what happens close file
FileUtils.closeQuietly(file);
}
}
use of com.ning.compress.lzf.LZFOutputStream in project GeoGig by boundlessgeo.
the class AbstractObjectDatabase method writeObject.
protected void writeObject(RevObject object, OutputStream target) {
ObjectWriter<RevObject> writer = serializationFactory.createObjectWriter(object.getType());
LZFOutputStream cOut = new LZFOutputStream(target);
try {
writer.write(object, cOut);
} catch (IOException e) {
throw Throwables.propagate(e);
} finally {
try {
cOut.flush();
cOut.close();
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
// int size = ((ByteArrayOutputStream) target).size();
// System.err.printf("%d,%s,%s\n", size, object.getId(), object.getType());
}
use of com.ning.compress.lzf.LZFOutputStream in project GeoGig by boundlessgeo.
the class MongoObjectDatabase method toBytes.
private byte[] toBytes(RevObject object) {
ObjectWriter<RevObject> writer = serializers.createObjectWriter(object.getType());
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
LZFOutputStream cOut = new LZFOutputStream(byteStream);
try {
writer.write(object, cOut);
} catch (IOException e) {
throw new RuntimeException(e);
}
try {
cOut.close();
} catch (IOException e) {
throw Throwables.propagate(e);
}
return byteStream.toByteArray();
}
Aggregations