use of org.apache.cassandra.streaming.StreamReceiver in project cassandra by apache.
the class CassandraEntireSSTableStreamReader method createWriter.
@SuppressWarnings("resource")
protected BigTableZeroCopyWriter createWriter(ColumnFamilyStore cfs, long totalSize, Collection<Component> components) throws IOException {
File dataDir = getDataDir(cfs, totalSize);
StreamReceiver streamReceiver = session.getAggregator(tableId);
assert streamReceiver instanceof CassandraStreamReceiver;
LifecycleNewTracker lifecycleNewTracker = CassandraStreamReceiver.fromReceiver(session.getAggregator(tableId)).createLifecycleNewTracker();
Descriptor desc = cfs.newSSTableDescriptor(dataDir, header.version, header.format);
logger.debug("[Table #{}] {} Components to write: {}", cfs.metadata(), desc.filenameFor(Component.DATA), components);
return new BigTableZeroCopyWriter(desc, cfs.metadata, lifecycleNewTracker, components);
}
use of org.apache.cassandra.streaming.StreamReceiver in project cassandra by apache.
the class CassandraStreamReader method createWriter.
@SuppressWarnings("resource")
protected SSTableMultiWriter createWriter(ColumnFamilyStore cfs, long totalSize, long repairedAt, UUID pendingRepair, SSTableFormat.Type format) throws IOException {
Directories.DataDirectory localDir = cfs.getDirectories().getWriteableLocation(totalSize);
if (localDir == null)
throw new IOException(String.format("Insufficient disk space to store %s", FBUtilities.prettyPrintMemory(totalSize)));
StreamReceiver streamReceiver = session.getAggregator(tableId);
Preconditions.checkState(streamReceiver instanceof CassandraStreamReceiver);
LifecycleNewTracker lifecycleNewTracker = CassandraStreamReceiver.fromReceiver(session.getAggregator(tableId)).createLifecycleNewTracker();
RangeAwareSSTableWriter writer = new RangeAwareSSTableWriter(cfs, estimatedKeys, repairedAt, pendingRepair, false, format, sstableLevel, totalSize, lifecycleNewTracker, getHeader(cfs.metadata()));
return writer;
}
Aggregations