use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class LeveledCompactionStrategyTest method testMutateLevel.
@Test
public void testMutateLevel() throws Exception {
cfs.disableAutoCompaction();
// 100 KB value, make it easy to have multiple files
ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]);
// Enough data to have a level 1 and 2
int rows = 40;
int columns = 20;
// Adds enough data to trigger multiple sstable per level
for (int r = 0; r < rows; r++) {
UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), String.valueOf(r));
for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
update.applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
LeveledCompactionStrategy strategy = (LeveledCompactionStrategy) cfs.getCompactionStrategyManager().getStrategies().get(1).get(0);
cfs.forceMajorCompaction();
for (SSTableReader s : cfs.getLiveSSTables()) {
assertTrue(s.getSSTableLevel() != 6 && s.getSSTableLevel() > 0);
strategy.manifest.remove(s);
s.descriptor.getMetadataSerializer().mutateLevel(s.descriptor, 6);
s.reloadSSTableMetadata();
strategy.manifest.add(s);
}
// verify that all sstables in the changed set is level 6
for (SSTableReader s : cfs.getLiveSSTables()) assertEquals(6, s.getSSTableLevel());
int[] levels = strategy.manifest.getAllLevelSize();
// verify that the manifest has correct amount of sstables
assertEquals(cfs.getLiveSSTables().size(), levels[6]);
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class LogTransactionTest method testGetTemporaryFilesThrowsIfCompletingAfterObsoletion.
@Test
public void testGetTemporaryFilesThrowsIfCompletingAfterObsoletion() throws Throwable {
ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
File dataFolder = new Directories(cfs.metadata()).getDirectoryForNewSSTables();
SSTableReader sstable = sstable(dataFolder, cfs, 0, 128);
LogTransaction logs = new LogTransaction(OperationType.COMPACTION);
assertNotNull(logs);
LogTransaction.SSTableTidier tidier = logs.obsoleted(sstable);
sstable.markObsolete(tidier);
sstable.selfRef().release();
LogTransaction.waitForDeletions();
try {
// This should race with the asynchronous deletion of txn log files
// it should throw because we are violating the requirement that a transaction must
// finish before deleting files (i.e. releasing sstables)
getTemporaryFiles(dataFolder);
fail("Expected runtime exception");
} catch (RuntimeException e) {
//pass as long as the cause is not an assertion
assertFalse(e.getCause() instanceof AssertionError);
}
logs.finish();
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class LogTransactionTest method testRemoveUnfinishedLeftovers_abort_multipleFolders.
@Test
public void testRemoveUnfinishedLeftovers_abort_multipleFolders() throws Throwable {
ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
File origiFolder = new Directories(cfs.metadata()).getDirectoryForNewSSTables();
File dataFolder1 = new File(origiFolder, "1");
File dataFolder2 = new File(origiFolder, "2");
Files.createDirectories(dataFolder1.toPath());
Files.createDirectories(dataFolder2.toPath());
SSTableReader[] sstables = { sstable(dataFolder1, cfs, 0, 128), sstable(dataFolder1, cfs, 1, 128), sstable(dataFolder2, cfs, 2, 128), sstable(dataFolder2, cfs, 3, 128) };
LogTransaction log = new LogTransaction(OperationType.COMPACTION);
assertNotNull(log);
LogTransaction.SSTableTidier[] tidiers = { log.obsoleted(sstables[0]), log.obsoleted(sstables[2]) };
log.trackNew(sstables[1]);
log.trackNew(sstables[3]);
Collection<File> logFiles = log.logFiles();
Assert.assertEquals(2, logFiles.size());
// fake an abort
log.txnFile().abort();
Arrays.stream(sstables).forEach(s -> s.selfRef().release());
// test listing
Assert.assertEquals(sstables[1].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), getTemporaryFiles(dataFolder1));
Assert.assertEquals(sstables[3].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), getTemporaryFiles(dataFolder2));
// normally called at startup
assertTrue(LogTransaction.removeUnfinishedLeftovers(Arrays.asList(dataFolder1, dataFolder2)));
// old tables should be only table left
assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[0].getAllFilePaths()));
assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[2].getAllFilePaths()));
// complete the transaction to avoid LEAK errors
Arrays.stream(tidiers).forEach(LogTransaction.SSTableTidier::run);
assertNull(log.complete(null));
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class LogTransactionTest method testRemoveUnfinishedLeftovers_commit_multipleFolders.
@Test
public void testRemoveUnfinishedLeftovers_commit_multipleFolders() throws Throwable {
ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
File origiFolder = new Directories(cfs.metadata()).getDirectoryForNewSSTables();
File dataFolder1 = new File(origiFolder, "1");
File dataFolder2 = new File(origiFolder, "2");
Files.createDirectories(dataFolder1.toPath());
Files.createDirectories(dataFolder2.toPath());
SSTableReader[] sstables = { sstable(dataFolder1, cfs, 0, 128), sstable(dataFolder1, cfs, 1, 128), sstable(dataFolder2, cfs, 2, 128), sstable(dataFolder2, cfs, 3, 128) };
LogTransaction log = new LogTransaction(OperationType.COMPACTION);
assertNotNull(log);
LogTransaction.SSTableTidier[] tidiers = { log.obsoleted(sstables[0]), log.obsoleted(sstables[2]) };
log.trackNew(sstables[1]);
log.trackNew(sstables[3]);
Collection<File> logFiles = log.logFiles();
Assert.assertEquals(2, logFiles.size());
// fake a commit
log.txnFile().commit();
Arrays.stream(sstables).forEach(s -> s.selfRef().release());
// test listing
Assert.assertEquals(sstables[0].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), getTemporaryFiles(dataFolder1));
Assert.assertEquals(sstables[2].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), getTemporaryFiles(dataFolder2));
// normally called at startup
assertTrue(LogTransaction.removeUnfinishedLeftovers(Arrays.asList(dataFolder1, dataFolder2)));
// new tables should be only table left
assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[1].getAllFilePaths()));
assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[3].getAllFilePaths()));
// complete the transaction to avoid LEAK errors
Arrays.stream(tidiers).forEach(LogTransaction.SSTableTidier::run);
assertNull(log.complete(null));
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class LogTransactionTest method sstable.
private static SSTableReader sstable(File dataFolder, ColumnFamilyStore cfs, int generation, int size) throws IOException {
Descriptor descriptor = new Descriptor(dataFolder, cfs.keyspace.getName(), cfs.getTableName(), generation, SSTableFormat.Type.BIG);
Set<Component> components = ImmutableSet.of(Component.DATA, Component.PRIMARY_INDEX, Component.FILTER, Component.TOC);
for (Component component : components) {
File file = new File(descriptor.filenameFor(component));
if (!file.exists())
assertTrue(file.createNewFile());
try (RandomAccessFile raf = new RandomAccessFile(file, "rw")) {
raf.setLength(size);
}
}
FileHandle dFile = new FileHandle.Builder(descriptor.filenameFor(Component.DATA)).complete();
FileHandle iFile = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).complete();
SerializationHeader header = SerializationHeader.make(cfs.metadata(), Collections.emptyList());
StatsMetadata metadata = (StatsMetadata) new MetadataCollector(cfs.metadata().comparator).finalizeMetadata(cfs.metadata().partitioner.getClass().getCanonicalName(), 0.01f, -1, null, header).get(MetadataType.STATS);
SSTableReader reader = SSTableReader.internalOpen(descriptor, components, cfs.metadata, dFile, iFile, MockSchema.indexSummary.sharedCopy(), new AlwaysPresentFilter(), 1L, metadata, SSTableReader.OpenReason.NORMAL, header);
reader.first = reader.last = MockSchema.readerBounds(generation);
return reader;
}
Aggregations