use of org.apache.cassandra.db.Keyspace in project cassandra by palantir.
the class AntiCompactionTest method prepareColumnFamilyStore.
private ColumnFamilyStore prepareColumnFamilyStore() {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
store.disableAutoCompaction();
long timestamp = System.currentTimeMillis();
for (int i = 0; i < 10; i++) {
DecoratedKey key = Util.dk(Integer.toString(i));
Mutation rm = new Mutation(KEYSPACE1, key.getKey());
for (int j = 0; j < 10; j++) rm.add("Standard1", Util.cellname(Integer.toString(j)), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 0);
rm.apply();
}
store.forceBlockingFlush();
return store;
}
use of org.apache.cassandra.db.Keyspace in project cassandra by palantir.
the class CompactionAwareWriterTest method clear.
@Before
public void clear() {
// avoid one test affecting the next one
Keyspace ks = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = ks.getColumnFamilyStore(CF);
cfs.clearUnsafe();
}
use of org.apache.cassandra.db.Keyspace in project cassandra by palantir.
the class CompactionAwareWriterTest method testMaxSSTableSizeWriter.
@Test
public void testMaxSSTableSizeWriter() {
Keyspace ks = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = ks.getColumnFamilyStore(CF);
cfs.disableAutoCompaction();
int rowCount = 1000;
populate(cfs, rowCount);
LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getSSTables(), OperationType.COMPACTION);
long beforeSize = txn.originals().iterator().next().onDiskLength();
int sstableSize = (int) beforeSize / 10;
CompactionAwareWriter writer = new MaxSSTableSizeWriter(cfs, txn, txn.originals(), sstableSize, 0, false, OperationType.COMPACTION);
int rows = compact(cfs, txn, writer);
assertEquals(10, cfs.getSSTables().size());
assertEquals(rowCount, rows);
validateData(cfs, rowCount);
cfs.truncateBlocking();
}
use of org.apache.cassandra.db.Keyspace in project cassandra by palantir.
the class CompactionAwareWriterTest method testMajorLeveledCompactionWriter.
@Test
public void testMajorLeveledCompactionWriter() {
Keyspace ks = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = ks.getColumnFamilyStore(CF);
cfs.disableAutoCompaction();
int rowCount = 20000;
int targetSSTableCount = 50;
populate(cfs, rowCount);
LifecycleTransaction txn = cfs.getTracker().tryModify(cfs.getSSTables(), OperationType.COMPACTION);
long beforeSize = txn.originals().iterator().next().onDiskLength();
int sstableSize = (int) beforeSize / targetSSTableCount;
CompactionAwareWriter writer = new MajorLeveledCompactionWriter(cfs, txn, txn.originals(), sstableSize, false, OperationType.COMPACTION);
int rows = compact(cfs, txn, writer);
assertEquals(targetSSTableCount, cfs.getSSTables().size());
int[] levelCounts = new int[5];
assertEquals(rowCount, rows);
for (SSTableReader sstable : cfs.getSSTables()) {
levelCounts[sstable.getSSTableLevel()]++;
}
assertEquals(0, levelCounts[0]);
assertEquals(10, levelCounts[1]);
// note that if we want more levels, fix this
assertEquals(targetSSTableCount - 10, levelCounts[2]);
for (int i = 3; i < levelCounts.length; i++) assertEquals(0, levelCounts[i]);
validateData(cfs, rowCount);
cfs.truncateBlocking();
}
use of org.apache.cassandra.db.Keyspace in project cassandra by palantir.
the class CompactionControllerTest method testGetFullyExpiredSSTables.
@Test
public void testGetFullyExpiredSSTables() {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF2);
cfs.truncateBlocking();
ByteBuffer rowKey = ByteBufferUtil.bytes("k1");
// latest timestamp
long timestamp1 = FBUtilities.timestampMicros();
long timestamp2 = timestamp1 - 5;
// oldest timestamp
long timestamp3 = timestamp2 - 5;
// create sstable with tombstone that should be expired in no older timestamps
applyDeleteMutation(CF2, rowKey, timestamp2);
cfs.forceBlockingFlush();
// first sstable with tombstone is compacting
Set<SSTableReader> compacting = Sets.newHashSet(cfs.getSSTables());
// create another sstable with more recent timestamp
applyMutation(CF2, rowKey, timestamp1);
cfs.forceBlockingFlush();
// second sstable is overlapping
Set<SSTableReader> overlapping = Sets.difference(Sets.newHashSet(cfs.getSSTables()), compacting);
// the first sstable should be expired because the overlapping sstable is newer and the gc period is later
int gcBefore = (int) (System.currentTimeMillis() / 1000) + 5;
Set<SSTableReader> expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore);
assertNotNull(expired);
assertEquals(1, expired.size());
assertEquals(compacting.iterator().next(), expired.iterator().next());
// however if we add an older mutation to the memtable then the sstable should not be expired
applyMutation(CF2, rowKey, timestamp3);
expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore);
assertNotNull(expired);
assertEquals(0, expired.size());
}
Aggregations