use of org.apache.cassandra.io.sstable.Component in project cassandra by apache.
the class DirectoriesTest method createFakeSSTable.
private static void createFakeSSTable(File dir, String cf, int gen, List<File> addTo) throws IOException {
Descriptor desc = new Descriptor(dir, KS, cf, gen, SSTableFormat.Type.BIG);
for (Component c : new Component[] { Component.DATA, Component.PRIMARY_INDEX, Component.FILTER }) {
File f = new File(desc.filenameFor(c));
f.createNewFile();
addTo.add(f);
}
}
use of org.apache.cassandra.io.sstable.Component in project cassandra by apache.
the class MockSchema method sstable.
public static SSTableReader sstable(int generation, int size, boolean keepRef, ColumnFamilyStore cfs) {
Descriptor descriptor = new Descriptor(cfs.getDirectories().getDirectoryForNewSSTables(), cfs.keyspace.getName(), cfs.getTableName(), generation, SSTableFormat.Type.BIG);
Set<Component> components = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
for (Component component : components) {
File file = new File(descriptor.filenameFor(component));
try {
file.createNewFile();
} catch (IOException e) {
}
}
if (size > 0) {
try {
File file = new File(descriptor.filenameFor(Component.DATA));
try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) {
raf.setLength(size);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
SerializationHeader header = SerializationHeader.make(cfs.metadata(), Collections.emptyList());
StatsMetadata metadata = (StatsMetadata) new MetadataCollector(cfs.metadata().comparator).finalizeMetadata(cfs.metadata().partitioner.getClass().getCanonicalName(), 0.01f, -1, null, header).get(MetadataType.STATS);
SSTableReader reader = SSTableReader.internalOpen(descriptor, components, cfs.metadata, RANDOM_ACCESS_READER_FACTORY.sharedCopy(), RANDOM_ACCESS_READER_FACTORY.sharedCopy(), indexSummary.sharedCopy(), new AlwaysPresentFilter(), 1L, metadata, SSTableReader.OpenReason.NORMAL, header);
reader.first = reader.last = readerBounds(generation);
if (!keepRef)
reader.selfRef().release();
return reader;
}
use of org.apache.cassandra.io.sstable.Component in project cassandra by apache.
the class LogTransactionTest method sstable.
private static SSTableReader sstable(File dataFolder, ColumnFamilyStore cfs, int generation, int size) throws IOException {
Descriptor descriptor = new Descriptor(dataFolder, cfs.keyspace.getName(), cfs.getTableName(), generation, SSTableFormat.Type.BIG);
Set<Component> components = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
for (Component component : components) {
File file = new File(descriptor.filenameFor(component));
if (!file.exists())
assertTrue(file.createFileIfNotExists());
Util.setFileLength(file, size);
}
FileHandle dFile = new FileHandle.Builder(descriptor.filenameFor(Component.DATA)).complete();
FileHandle iFile = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).complete();
SerializationHeader header = SerializationHeader.make(cfs.metadata(), Collections.emptyList());
StatsMetadata metadata = (StatsMetadata) new MetadataCollector(cfs.metadata().comparator).finalizeMetadata(cfs.metadata().partitioner.getClass().getCanonicalName(), 0.01f, -1, null, false, header).get(MetadataType.STATS);
SSTableReader reader = SSTableReader.internalOpen(descriptor, components, cfs.metadata, iFile, dFile, MockSchema.indexSummary.sharedCopy(), new AlwaysPresentFilter(), 1L, metadata, SSTableReader.OpenReason.NORMAL, header);
reader.first = reader.last = MockSchema.readerBounds(generation);
return reader;
}
use of org.apache.cassandra.io.sstable.Component in project cassandra by apache.
the class ScrubTest method testScrubOutOfOrder.
@Test
public void testScrubOutOfOrder() {
// This test assumes ByteOrderPartitioner to create out-of-order SSTable
IPartitioner oldPartitioner = DatabaseDescriptor.getPartitioner();
DatabaseDescriptor.setPartitionerUnsafe(new ByteOrderedPartitioner());
// Create out-of-order SSTable
File tempDir = FileUtils.createTempFile("ScrubTest.testScrubOutOfOrder", "").parent();
// create ks/cf directory
File tempDataDir = new File(tempDir, String.join(File.pathSeparator(), ksName, CF));
assertTrue(tempDataDir.tryCreateDirectories());
try {
CompactionManager.instance.disableAutoCompaction();
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
List<String> keys = Arrays.asList("t", "a", "b", "z", "c", "y", "d");
Descriptor desc = cfs.newSSTableDescriptor(tempDataDir);
try (LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
SSTableTxnWriter writer = new SSTableTxnWriter(txn, createTestWriter(desc, keys.size(), cfs.metadata, txn))) {
for (String k : keys) {
PartitionUpdate update = UpdateBuilder.create(cfs.metadata(), Util.dk(k)).newRow("someName").add("val", "someValue").build();
writer.append(update.unfilteredIterator());
}
writer.finish(false);
}
try {
SSTableReader.open(desc, cfs.metadata);
fail("SSTR validation should have caught the out-of-order rows");
} catch (CorruptSSTableException ise) {
/* this is expected */
}
// open without validation for scrubbing
Set<Component> components = new HashSet<>();
if (new File(desc.filenameFor(Component.COMPRESSION_INFO)).exists())
components.add(Component.COMPRESSION_INFO);
components.add(Component.DATA);
components.add(Component.PRIMARY_INDEX);
components.add(Component.FILTER);
components.add(Component.STATS);
components.add(Component.SUMMARY);
components.add(Component.TOC);
SSTableReader sstable = SSTableReader.openNoValidation(desc, components, cfs);
if (sstable.last.compareTo(sstable.first) < 0)
sstable.last = sstable.first;
try (LifecycleTransaction scrubTxn = LifecycleTransaction.offline(OperationType.SCRUB, sstable);
Scrubber scrubber = new Scrubber(cfs, scrubTxn, false, true)) {
scrubber.scrub();
}
LifecycleTransaction.waitForDeletions();
cfs.loadNewSSTables();
assertOrderedAll(cfs, 7);
} finally {
FileUtils.deleteRecursive(tempDataDir);
// reset partitioner
DatabaseDescriptor.setPartitionerUnsafe(oldPartitioner);
}
}
use of org.apache.cassandra.io.sstable.Component in project cassandra by apache.
the class CassandraEntireSSTableStreamReader method read.
/**
* @param in where this reads data from
* @return SSTable transferred
* @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
*/
// input needs to remain open, streams on top of it can't be closed
@SuppressWarnings("resource")
@Override
public SSTableMultiWriter read(DataInputPlus in) throws Throwable {
ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(tableId);
if (cfs == null) {
// schema was dropped during streaming
throw new IOException("Table " + tableId + " was dropped during streaming");
}
ComponentManifest manifest = header.componentManifest;
long totalSize = manifest.totalSize();
logger.debug("[Stream #{}] Started receiving sstable #{} from {}, size = {}, table = {}", session.planId(), fileSequenceNumber, session.peer, prettyPrintMemory(totalSize), cfs.metadata());
BigTableZeroCopyWriter writer = null;
try {
writer = createWriter(cfs, totalSize, manifest.components());
long bytesRead = 0;
for (Component component : manifest.components()) {
long length = manifest.sizeOf(component);
logger.debug("[Stream #{}] Started receiving {} component from {}, componentSize = {}, readBytes = {}, totalSize = {}", session.planId(), component, session.peer, prettyPrintMemory(length), prettyPrintMemory(bytesRead), prettyPrintMemory(totalSize));
writer.writeComponent(component.type, in, length);
session.progress(writer.descriptor.filenameFor(component), ProgressInfo.Direction.IN, length, length);
bytesRead += length;
logger.debug("[Stream #{}] Finished receiving {} component from {}, componentSize = {}, readBytes = {}, totalSize = {}", session.planId(), component, session.peer, prettyPrintMemory(length), prettyPrintMemory(bytesRead), prettyPrintMemory(totalSize));
}
UnaryOperator<StatsMetadata> transform = stats -> stats.mutateLevel(header.sstableLevel).mutateRepairedMetadata(messageHeader.repairedAt, messageHeader.pendingRepair, false);
String description = String.format("level %s and repairedAt time %s and pendingRepair %s", header.sstableLevel, messageHeader.repairedAt, messageHeader.pendingRepair);
writer.descriptor.getMetadataSerializer().mutate(writer.descriptor, description, transform);
return writer;
} catch (Throwable e) {
logger.error("[Stream {}] Error while reading sstable from stream for table = {}", session.planId(), cfs.metadata(), e);
if (writer != null)
e = writer.abort(e);
throw e;
}
}
Aggregations