use of org.apache.cassandra.db.compaction.writers.SplittingSizeTieredCompactionWriter in project cassandra by apache.
the class CompactionAwareWriterTest method testSplittingSizeTieredCompactionWriter.
@Test
public void testSplittingSizeTieredCompactionWriter() throws Throwable {
ColumnFamilyStore cfs = getColumnFamilyStore();
cfs.disableAutoCompaction();
int rowCount = 10000;
populate(rowCount);
LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getLiveSSTables(), OperationType.COMPACTION);
long beforeSize = txn.originals().iterator().next().onDiskLength();
CompactionAwareWriter writer = new SplittingSizeTieredCompactionWriter(cfs, cfs.getDirectories(), txn, txn.originals(), 0);
int rows = compact(cfs, txn, writer);
long expectedSize = beforeSize / 2;
List<SSTableReader> sortedSSTables = new ArrayList<>(cfs.getLiveSSTables());
Collections.sort(sortedSSTables, new Comparator<SSTableReader>() {
@Override
public int compare(SSTableReader o1, SSTableReader o2) {
return Longs.compare(o2.onDiskLength(), o1.onDiskLength());
}
});
for (SSTableReader sstable : sortedSSTables) {
// we dont create smaller files than this, everything will be in the last file
if (expectedSize > SplittingSizeTieredCompactionWriter.DEFAULT_SMALLEST_SSTABLE_BYTES)
// allow 1% diff in estimated vs actual size
assertEquals(expectedSize, sstable.onDiskLength(), expectedSize / 100);
expectedSize /= 2;
}
assertEquals(rowCount, rows);
validateData(cfs, rowCount);
cfs.truncateBlocking();
}
Aggregations