use of com.ning.compress.lzf.LZFInputStream in project camel by apache.
the class LZFDataFormat method unmarshal.
@Override
public Object unmarshal(final Exchange exchange, final InputStream inputStream) throws Exception {
InputStream compressedInput = null;
OutputStreamBuilder osb = OutputStreamBuilder.withExchange(exchange);
try {
compressedInput = new LZFInputStream(inputStream);
IOHelper.copy(compressedInput, osb);
return osb.build();
} finally {
// must close all input streams
IOHelper.close(osb, compressedInput, inputStream);
}
}
use of com.ning.compress.lzf.LZFInputStream in project GeoGig by boundlessgeo.
the class HeapObjectDatabse method getAll.
@Override
public Iterator<RevObject> getAll(final Iterable<ObjectId> ids, final BulkOpListener listener) {
return new AbstractIterator<RevObject>() {
final Iterator<ObjectId> iterator = ids.iterator();
@Override
protected RevObject computeNext() {
RevObject found = null;
ObjectId id;
byte[] raw;
while (iterator.hasNext() && found == null) {
id = iterator.next();
raw = objects.get(id);
if (raw != null) {
try {
found = serializationFactory.createObjectReader().read(id, new LZFInputStream(new ByteArrayInputStream(raw)));
} catch (IOException e) {
throw Throwables.propagate(e);
}
listener.found(found.getId(), raw.length);
} else {
listener.notFound(id);
}
}
return found == null ? endOfData() : found;
}
};
}
use of com.ning.compress.lzf.LZFInputStream in project cassandra by apache.
the class StreamReader method read.
/**
* @param channel where this reads data from
* @return SSTable transferred
* @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
*/
// channel needs to remain open, streams on top of it can't be closed
@SuppressWarnings("resource")
public SSTableMultiWriter read(ReadableByteChannel channel) throws IOException {
long totalSize = totalSize();
ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(tableId);
if (cfs == null) {
// schema was dropped during streaming
throw new IOException("CF " + tableId + " was dropped during streaming");
}
logger.debug("[Stream #{}] Start receiving file #{} from {}, repairedAt = {}, size = {}, ks = '{}', table = '{}', pendingRepair = '{}'.", session.planId(), fileSeqNum, session.peer, repairedAt, totalSize, cfs.keyspace.getName(), cfs.getTableName(), session.getPendingRepair());
TrackedInputStream in = new TrackedInputStream(new LZFInputStream(Channels.newInputStream(channel)));
StreamDeserializer deserializer = new StreamDeserializer(cfs.metadata(), in, inputVersion, getHeader(cfs.metadata()));
SSTableMultiWriter writer = null;
try {
writer = createWriter(cfs, totalSize, repairedAt, session.getPendingRepair(), format);
while (in.getBytesRead() < totalSize) {
writePartition(deserializer, writer);
// TODO move this to BytesReadTracker
session.progress(writer.getFilename(), ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
}
logger.debug("[Stream #{}] Finished receiving file #{} from {} readBytes = {}, totalSize = {}", session.planId(), fileSeqNum, session.peer, FBUtilities.prettyPrintMemory(in.getBytesRead()), FBUtilities.prettyPrintMemory(totalSize));
return writer;
} catch (Throwable e) {
logger.warn("[Stream {}] Error while reading partition {} from stream on ks='{}' and table='{}'.", session.planId(), deserializer.partitionKey(), cfs.keyspace.getName(), cfs.getTableName(), e);
if (writer != null) {
writer.abort(e);
}
throw Throwables.propagate(e);
}
}
use of com.ning.compress.lzf.LZFInputStream in project camel by apache.
the class LZFDataFormatTest method testMarshalTextToLzf.
@Test
public void testMarshalTextToLzf() throws Exception {
byte[] output = sendText("direct:textToLzf");
InputStream stream = new LZFInputStream(new ByteArrayInputStream(output));
String result = IOConverter.toString(stream, null);
assertEquals("Uncompressed something different than compressed", TEXT, result);
}
use of com.ning.compress.lzf.LZFInputStream in project eiger by wlloyd.
the class IncomingStreamReader method read.
public void read() throws IOException {
if (remoteFile != null) {
if (logger.isDebugEnabled()) {
logger.debug("Receiving stream");
logger.debug("Creating file for {} with {} estimated keys", localFile.getFilename(), remoteFile.estimatedKeys);
}
assert remoteFile.estimatedKeys > 0;
SSTableReader reader = null;
logger.debug("Estimated keys {}", remoteFile.estimatedKeys);
DataInputStream dis = new DataInputStream(new LZFInputStream(socket.getInputStream()));
try {
reader = streamIn(dis, localFile, remoteFile);
session.finished(remoteFile, reader);
} catch (IOException ex) {
retry();
throw ex;
}
}
session.closeIfFinished();
}
Aggregations