use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class NeverPurgeTest method testHelper.
private void testHelper(String deletionStatement) throws Throwable {
createTable("CREATE TABLE %s (a int, b int, c text, PRIMARY KEY (a, b)) WITH gc_grace_seconds = 0");
ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
execute("INSERT INTO %s (a, b, c) VALUES (1, 2, '3')");
execute(deletionStatement);
Thread.sleep(1000);
cfs.forceBlockingFlush();
cfs.forceMajorCompaction();
verifyContainsTombstones(cfs.getLiveSSTables(), 1);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class NeverPurgeTest method minorNeverPurgeTombstonesTest.
@Test
public void minorNeverPurgeTombstonesTest() throws Throwable {
createTable("CREATE TABLE %s (a int, b int, c text, PRIMARY KEY (a, b)) WITH gc_grace_seconds = 0");
ColumnFamilyStore cfs = Keyspace.open(keyspace()).getColumnFamilyStore(currentTable());
cfs.disableAutoCompaction();
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 1000; j++) {
execute("INSERT INTO %s (a, b, c) VALUES (" + j + ", 2, '3')");
}
cfs.forceBlockingFlush();
}
execute("UPDATE %s SET c = null WHERE a=1 AND b=2");
execute("DELETE FROM %s WHERE a=2 AND b=2");
execute("DELETE FROM %s WHERE a=3");
cfs.forceBlockingFlush();
cfs.enableAutoCompaction();
while (cfs.getLiveSSTables().size() > 1) Thread.sleep(100);
verifyContainsTombstones(cfs.getLiveSSTables(), 3);
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class OneCompactionTest method testCompaction.
private void testCompaction(String columnFamilyName, int insertsPerTable) {
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(columnFamilyName);
Set<String> inserted = new HashSet<>();
for (int j = 0; j < insertsPerTable; j++) {
String key = String.valueOf(j);
new RowUpdateBuilder(store.metadata(), j, key).clustering("0").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
inserted.add(key);
store.forceBlockingFlush();
assertEquals(inserted.size(), Util.getAll(Util.cmd(store).build()).size());
}
CompactionManager.instance.performMaximal(store, false);
assertEquals(1, store.getLiveSSTables().size());
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class SnapshotDeletingTest method testCompactionHook.
@Test
public void testCompactionHook() throws Exception {
Assume.assumeTrue(FBUtilities.isWindows);
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
store.clearUnsafe();
populate(10000);
store.snapshot("snapshot1");
// Confirm snapshot deletion fails. Sleep for a bit just to make sure the SnapshotDeletingTask has
// time to run and fail.
Thread.sleep(500);
store.clearSnapshot("snapshot1");
assertEquals(1, SnapshotDeletingTask.pendingDeletionCount());
// Compact the cf and confirm that the executor's after hook calls rescheduleDeletion
populate(20000);
store.forceBlockingFlush();
store.forceMajorCompaction();
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < 1000 && SnapshotDeletingTask.pendingDeletionCount() > 0) {
Thread.yield();
}
assertEquals(0, SnapshotDeletingTask.pendingDeletionCount());
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class TimeWindowCompactionStrategyTest method testPrepBucket.
@Test
public void testPrepBucket() {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
Long tstamp = System.currentTimeMillis();
Long tstamp2 = tstamp - (2L * 3600L * 1000L);
// create 5 sstables
for (int r = 0; r < 3; r++) {
DecoratedKey key = Util.dk(String.valueOf(r));
new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
// Decrement the timestamp to simulate a timestamp in the past hour
for (int r = 3; r < 5; r++) {
// And add progressively more cells into each sstable
DecoratedKey key = Util.dk(String.valueOf(r));
new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
HashMultimap<Long, SSTableReader> buckets = HashMultimap.create();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
// We'll put 3 sstables into the newest bucket
for (int i = 0; i < 3; i++) {
Pair<Long, Long> bounds = getWindowBoundsInMillis(TimeUnit.HOURS, 1, tstamp);
buckets.put(bounds.left, sstrs.get(i));
}
List<SSTableReader> newBucket = newestBucket(buckets, 4, 32, new SizeTieredCompactionStrategyOptions(), getWindowBoundsInMillis(TimeUnit.HOURS, 1, System.currentTimeMillis()).left);
assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.isEmpty());
newBucket = newestBucket(buckets, 2, 32, new SizeTieredCompactionStrategyOptions(), getWindowBoundsInMillis(TimeUnit.HOURS, 1, System.currentTimeMillis()).left);
assertTrue("incoming bucket should be accepted when it is larger than the min threshold SSTables", !newBucket.isEmpty());
// And 2 into the second bucket (1 hour back)
for (int i = 3; i < 5; i++) {
Pair<Long, Long> bounds = getWindowBoundsInMillis(TimeUnit.HOURS, 1, tstamp2);
buckets.put(bounds.left, sstrs.get(i));
}
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(1).getMinTimestamp(), sstrs.get(1).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(2).getMinTimestamp(), sstrs.get(2).getMaxTimestamp());
// Test trim
int numSSTables = 40;
for (int r = 5; r < numSSTables; r++) {
DecoratedKey key = Util.dk(String.valueOf(r));
for (int i = 0; i < r; i++) {
new RowUpdateBuilder(cfs.metadata(), tstamp + r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
}
cfs.forceBlockingFlush();
}
// Reset the buckets, overfill it now
sstrs = new ArrayList<>(cfs.getLiveSSTables());
for (int i = 0; i < 40; i++) {
Pair<Long, Long> bounds = getWindowBoundsInMillis(TimeUnit.HOURS, 1, sstrs.get(i).getMaxTimestamp());
buckets.put(bounds.left, sstrs.get(i));
}
newBucket = newestBucket(buckets, 4, 32, new SizeTieredCompactionStrategyOptions(), getWindowBoundsInMillis(TimeUnit.HOURS, 1, System.currentTimeMillis()).left);
assertEquals("new bucket should be trimmed to max threshold of 32", newBucket.size(), 32);
}
Aggregations