Search in sources :

Example 1 with StreamCompressionInputStream

use of org.apache.cassandra.streaming.compress.StreamCompressionInputStream in project cassandra by apache.

the class CassandraStreamReader method read.

/**
 * @param inputPlus where this reads data from
 * @return SSTable transferred
 * @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
 */
// input needs to remain open, streams on top of it can't be closed
@SuppressWarnings("resource")
@Override
public SSTableMultiWriter read(DataInputPlus inputPlus) throws Throwable {
    long totalSize = totalSize();
    ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(tableId);
    if (cfs == null)
        // schema was dropped during streaming
        throw new IllegalStateException("Table " + tableId + " was dropped during streaming");
    logger.debug("[Stream #{}] Start receiving file #{} from {}, repairedAt = {}, size = {}, ks = '{}', table = '{}', pendingRepair = '{}'.", session.planId(), fileSeqNum, session.peer, repairedAt, totalSize, cfs.keyspace.getName(), cfs.getTableName(), pendingRepair);
    StreamDeserializer deserializer = null;
    SSTableMultiWriter writer = null;
    try (StreamCompressionInputStream streamCompressionInputStream = new StreamCompressionInputStream(inputPlus, current_version)) {
        TrackedDataInputPlus in = new TrackedDataInputPlus(streamCompressionInputStream);
        deserializer = new StreamDeserializer(cfs.metadata(), in, inputVersion, getHeader(cfs.metadata()));
        writer = createWriter(cfs, totalSize, repairedAt, pendingRepair, format);
        while (in.getBytesRead() < totalSize) {
            writePartition(deserializer, writer);
            // TODO move this to BytesReadTracker
            session.progress(writer.getFilename() + '-' + fileSeqNum, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
        }
        logger.debug("[Stream #{}] Finished receiving file #{} from {} readBytes = {}, totalSize = {}", session.planId(), fileSeqNum, session.peer, FBUtilities.prettyPrintMemory(in.getBytesRead()), FBUtilities.prettyPrintMemory(totalSize));
        return writer;
    } catch (Throwable e) {
        Object partitionKey = deserializer != null ? deserializer.partitionKey() : "";
        logger.warn("[Stream {}] Error while reading partition {} from stream on ks='{}' and table='{}'.", session.planId(), partitionKey, cfs.keyspace.getName(), cfs.getTableName(), e);
        if (writer != null)
            e = writer.abort(e);
        throw e;
    }
}
Also used : ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) StreamCompressionInputStream(org.apache.cassandra.streaming.compress.StreamCompressionInputStream) TrackedDataInputPlus(org.apache.cassandra.io.util.TrackedDataInputPlus) SSTableMultiWriter(org.apache.cassandra.io.sstable.SSTableMultiWriter)

Aggregations

ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)1 SSTableMultiWriter (org.apache.cassandra.io.sstable.SSTableMultiWriter)1 TrackedDataInputPlus (org.apache.cassandra.io.util.TrackedDataInputPlus)1 StreamCompressionInputStream (org.apache.cassandra.streaming.compress.StreamCompressionInputStream)1