use of org.apache.cassandra.io.sstable.format.big.BigTableZeroCopyWriter in project cassandra by apache.
the class CassandraEntireSSTableStreamReader method read.
/**
* @param in where this reads data from
* @return SSTable transferred
* @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
*/
// input needs to remain open, streams on top of it can't be closed
@SuppressWarnings("resource")
@Override
public SSTableMultiWriter read(DataInputPlus in) throws Throwable {
ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(tableId);
if (cfs == null) {
// schema was dropped during streaming
throw new IOException("Table " + tableId + " was dropped during streaming");
}
ComponentManifest manifest = header.componentManifest;
long totalSize = manifest.totalSize();
logger.debug("[Stream #{}] Started receiving sstable #{} from {}, size = {}, table = {}", session.planId(), fileSequenceNumber, session.peer, prettyPrintMemory(totalSize), cfs.metadata());
BigTableZeroCopyWriter writer = null;
try {
writer = createWriter(cfs, totalSize, manifest.components());
long bytesRead = 0;
for (Component component : manifest.components()) {
long length = manifest.sizeOf(component);
logger.debug("[Stream #{}] Started receiving {} component from {}, componentSize = {}, readBytes = {}, totalSize = {}", session.planId(), component, session.peer, prettyPrintMemory(length), prettyPrintMemory(bytesRead), prettyPrintMemory(totalSize));
writer.writeComponent(component.type, in, length);
session.progress(writer.descriptor.filenameFor(component), ProgressInfo.Direction.IN, length, length);
bytesRead += length;
logger.debug("[Stream #{}] Finished receiving {} component from {}, componentSize = {}, readBytes = {}, totalSize = {}", session.planId(), component, session.peer, prettyPrintMemory(length), prettyPrintMemory(bytesRead), prettyPrintMemory(totalSize));
}
UnaryOperator<StatsMetadata> transform = stats -> stats.mutateLevel(header.sstableLevel).mutateRepairedMetadata(messageHeader.repairedAt, messageHeader.pendingRepair, false);
String description = String.format("level %s and repairedAt time %s and pendingRepair %s", header.sstableLevel, messageHeader.repairedAt, messageHeader.pendingRepair);
writer.descriptor.getMetadataSerializer().mutate(writer.descriptor, description, transform);
return writer;
} catch (Throwable e) {
logger.error("[Stream {}] Error while reading sstable from stream for table = {}", session.planId(), cfs.metadata(), e);
if (writer != null)
e = writer.abort(e);
throw e;
}
}
use of org.apache.cassandra.io.sstable.format.big.BigTableZeroCopyWriter in project cassandra by apache.
the class CassandraEntireSSTableStreamReader method createWriter.
@SuppressWarnings("resource")
protected BigTableZeroCopyWriter createWriter(ColumnFamilyStore cfs, long totalSize, Collection<Component> components) throws IOException {
File dataDir = getDataDir(cfs, totalSize);
StreamReceiver streamReceiver = session.getAggregator(tableId);
assert streamReceiver instanceof CassandraStreamReceiver;
LifecycleNewTracker lifecycleNewTracker = CassandraStreamReceiver.fromReceiver(session.getAggregator(tableId)).createLifecycleNewTracker();
Descriptor desc = cfs.newSSTableDescriptor(dataDir, header.version, header.format);
logger.debug("[Table #{}] {} Components to write: {}", cfs.metadata(), desc.filenameFor(Component.DATA), components);
return new BigTableZeroCopyWriter(desc, cfs.metadata, lifecycleNewTracker, components);
}
Aggregations