Search in sources :

Example 1 with BytesReadTracker

use of org.apache.cassandra.utils.BytesReadTracker in project eiger by wlloyd.

the class IncomingStreamReader method streamIn.

private SSTableReader streamIn(DataInput input, PendingFile localFile, PendingFile remoteFile) throws IOException {
    ColumnFamilyStore cfs = Table.open(localFile.desc.ksname).getColumnFamilyStore(localFile.desc.cfname);
    DecoratedKey key;
    SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
    CompactionController controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
    try {
        BytesReadTracker in = new BytesReadTracker(input);
        for (Pair<Long, Long> section : localFile.sections) {
            long length = section.right - section.left;
            long bytesRead = 0;
            while (bytesRead < length) {
                in.reset(0);
                key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                long dataSize = SSTableReader.readRowSize(in, localFile.desc);
                ColumnFamily cached = cfs.getRawCachedRow(key);
                if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit()) {
                    // need to update row cache
                    // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                    SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                    PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                    // We don't expire anything so the row shouldn't be empty
                    assert !row.isEmpty();
                    writer.append(row);
                    // row append does not update the max timestamp on its own
                    writer.updateMaxTimestamp(row.maxTimestamp());
                    // update cache
                    ColumnFamily cf = row.getFullColumnFamily();
                    cfs.updateRowCache(key, cf);
                } else {
                    writer.appendFromStream(key, cfs.metadata, dataSize, in);
                    cfs.invalidateCachedRow(key);
                }
                bytesRead += in.getBytesRead();
                remoteFile.progress += in.getBytesRead();
            }
        }
        return writer.closeAndOpenReader();
    } catch (Exception e) {
        writer.abort();
        if (e instanceof IOException)
            throw (IOException) e;
        else
            throw FBUtilities.unchecked(e);
    }
}
Also used : CompactionController(org.apache.cassandra.db.compaction.CompactionController) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ColumnFamily(org.apache.cassandra.db.ColumnFamily) BytesReadTracker(org.apache.cassandra.utils.BytesReadTracker) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) PrecompactedRow(org.apache.cassandra.db.compaction.PrecompactedRow)

Aggregations

ColumnFamily (org.apache.cassandra.db.ColumnFamily)1 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)1 DecoratedKey (org.apache.cassandra.db.DecoratedKey)1 CompactionController (org.apache.cassandra.db.compaction.CompactionController)1 PrecompactedRow (org.apache.cassandra.db.compaction.PrecompactedRow)1 BytesReadTracker (org.apache.cassandra.utils.BytesReadTracker)1