use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class LeveledCompactionStrategyTest method testTokenRangeCompaction.
@Test
public void testTokenRangeCompaction() throws Exception {
// Remove any existing data so we can start out clean with predictable number of sstables
cfs.truncateBlocking();
// Disable auto compaction so cassandra does not compact
CompactionManager.instance.disableAutoCompaction();
// 100 KiB value, make it easy to have multiple files
ByteBuffer value = ByteBuffer.wrap(new byte[100 * 1024]);
DecoratedKey key1 = Util.dk(String.valueOf(1));
DecoratedKey key2 = Util.dk(String.valueOf(2));
List<DecoratedKey> keys = new ArrayList<>(Arrays.asList(key1, key2));
int numIterations = 10;
int columns = 2;
// create 10 sstables that contain data for both key1 and key2
for (int i = 0; i < numIterations; i++) {
for (DecoratedKey key : keys) {
UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), key);
for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
update.applyUnsafe();
}
cfs.forceBlockingFlush();
}
// create 20 more sstables with 10 containing data for key1 and other 10 containing data for key2
for (int i = 0; i < numIterations; i++) {
for (DecoratedKey key : keys) {
UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), key);
for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
update.applyUnsafe();
cfs.forceBlockingFlush();
}
}
// We should have a total of 30 sstables by now
assertEquals(30, cfs.getLiveSSTables().size());
// Compact just the tables with key2
// Bit hackish to use the key1.token as the prior key but works in BytesToken
Range<Token> tokenRange = new Range<>(key2.getToken(), key2.getToken());
Collection<Range<Token>> tokenRanges = new ArrayList<>(Arrays.asList(tokenRange));
cfs.forceCompactionForTokenRange(tokenRanges);
while (CompactionManager.instance.isCompacting(Arrays.asList(cfs), (sstable) -> true)) {
Thread.sleep(100);
}
// 20 tables that have key2 should have been compacted in to 1 table resulting in 11 (30-20+1)
assertEquals(11, cfs.getLiveSSTables().size());
// Compact just the tables with key1. At this point all 11 tables should have key1
Range<Token> tokenRange2 = new Range<>(key1.getToken(), key1.getToken());
Collection<Range<Token>> tokenRanges2 = new ArrayList<>(Arrays.asList(tokenRange2));
cfs.forceCompactionForTokenRange(tokenRanges2);
while (CompactionManager.instance.isCompacting(Arrays.asList(cfs), (sstable) -> true)) {
Thread.sleep(100);
}
// the 11 tables containing key1 should all compact to 1 table
assertEquals(1, cfs.getLiveSSTables().size());
// Set it up again
cfs.truncateBlocking();
// create 10 sstables that contain data for both key1 and key2
for (int i = 0; i < numIterations; i++) {
for (DecoratedKey key : keys) {
UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), key);
for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
update.applyUnsafe();
}
cfs.forceBlockingFlush();
}
// create 20 more sstables with 10 containing data for key1 and other 10 containing data for key2
for (int i = 0; i < numIterations; i++) {
for (DecoratedKey key : keys) {
UpdateBuilder update = UpdateBuilder.create(cfs.metadata(), key);
for (int c = 0; c < columns; c++) update.newRow("column" + c).add("val", value);
update.applyUnsafe();
cfs.forceBlockingFlush();
}
}
// We should have a total of 30 sstables again
assertEquals(30, cfs.getLiveSSTables().size());
// This time, we're going to make sure the token range wraps around, to cover the full range
Range<Token> wrappingRange;
if (key1.getToken().compareTo(key2.getToken()) < 0) {
wrappingRange = new Range<>(key2.getToken(), key1.getToken());
} else {
wrappingRange = new Range<>(key1.getToken(), key2.getToken());
}
Collection<Range<Token>> wrappingRanges = new ArrayList<>(Arrays.asList(wrappingRange));
cfs.forceCompactionForTokenRange(wrappingRanges);
while (CompactionManager.instance.isCompacting(Arrays.asList(cfs), (sstable) -> true)) {
Thread.sleep(100);
}
// should all compact to 1 table
assertEquals(1, cfs.getLiveSSTables().size());
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class TimeWindowCompactionStrategyTest method testDropExpiredSSTables.
@Test
public void testDropExpiredSSTables() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// Create a expiring sstable with a TTL
DecoratedKey key = Util.dk("expired");
new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), TTL_SECONDS, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
SSTableReader expiredSSTable = cfs.getLiveSSTables().iterator().next();
Thread.sleep(10);
// Create a second sstable without TTL
key = Util.dk("nonexpired");
new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
assertEquals(cfs.getLiveSSTables().size(), 2);
Map<String, String> options = new HashMap<>();
options.put(TimeWindowCompactionStrategyOptions.COMPACTION_WINDOW_SIZE_KEY, "30");
options.put(TimeWindowCompactionStrategyOptions.COMPACTION_WINDOW_UNIT_KEY, "SECONDS");
options.put(TimeWindowCompactionStrategyOptions.TIMESTAMP_RESOLUTION_KEY, "MILLISECONDS");
options.put(TimeWindowCompactionStrategyOptions.EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_KEY, "0");
TimeWindowCompactionStrategy twcs = new TimeWindowCompactionStrategy(cfs, options);
for (SSTableReader sstable : cfs.getLiveSSTables()) twcs.addSSTable(sstable);
twcs.startup();
assertNull(twcs.getNextBackgroundTask(nowInSeconds()));
// Wait for the expiration of the first sstable
Thread.sleep(TimeUnit.SECONDS.toMillis(TTL_SECONDS + 1));
AbstractCompactionTask t = twcs.getNextBackgroundTask(nowInSeconds());
assertNotNull(t);
assertEquals(1, Iterables.size(t.transaction.originals()));
SSTableReader sstable = t.transaction.originals().iterator().next();
assertEquals(sstable, expiredSSTable);
t.transaction.abort();
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class TimeWindowCompactionStrategyTest method testDropOverlappingExpiredSSTables.
@Test
public void testDropOverlappingExpiredSSTables() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
long timestamp = System.currentTimeMillis();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// Create a expiring sstable with a TTL
DecoratedKey key = Util.dk("expired");
new RowUpdateBuilder(cfs.metadata(), timestamp, TTL_SECONDS, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
SSTableReader expiredSSTable = cfs.getLiveSSTables().iterator().next();
Thread.sleep(10);
// Create a second sstable without TTL and with a row superceded by the expiring row
new RowUpdateBuilder(cfs.metadata(), timestamp - 1000, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
key = Util.dk("nonexpired");
new RowUpdateBuilder(cfs.metadata(), timestamp, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
assertEquals(cfs.getLiveSSTables().size(), 2);
Map<String, String> options = new HashMap<>();
options.put(TimeWindowCompactionStrategyOptions.COMPACTION_WINDOW_SIZE_KEY, "30");
options.put(TimeWindowCompactionStrategyOptions.COMPACTION_WINDOW_UNIT_KEY, "SECONDS");
options.put(TimeWindowCompactionStrategyOptions.TIMESTAMP_RESOLUTION_KEY, "MILLISECONDS");
options.put(TimeWindowCompactionStrategyOptions.EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_KEY, "0");
TimeWindowCompactionStrategy twcs = new TimeWindowCompactionStrategy(cfs, options);
for (SSTableReader sstable : cfs.getLiveSSTables()) twcs.addSSTable(sstable);
twcs.startup();
assertNull(twcs.getNextBackgroundTask(nowInSeconds()));
// Wait for the expiration of the first sstable
Thread.sleep(TimeUnit.SECONDS.toMillis(TTL_SECONDS + 1));
assertNull(twcs.getNextBackgroundTask(nowInSeconds()));
options.put(TimeWindowCompactionStrategyOptions.UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_KEY, "true");
twcs = new TimeWindowCompactionStrategy(cfs, options);
for (SSTableReader sstable : cfs.getLiveSSTables()) twcs.addSSTable(sstable);
twcs.startup();
AbstractCompactionTask t = twcs.getNextBackgroundTask(nowInSeconds());
assertNotNull(t);
assertEquals(1, Iterables.size(t.transaction.originals()));
SSTableReader sstable = t.transaction.originals().iterator().next();
assertEquals(sstable, expiredSSTable);
twcs.shutdown();
t.transaction.abort();
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class TimeWindowCompactionStrategyTest method testPrepBucket.
@Test
public void testPrepBucket() {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.truncateBlocking();
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
long tstamp = System.currentTimeMillis();
long tstamp2 = tstamp - (2L * 3600L * 1000L);
// create 5 sstables
for (int r = 0; r < 3; r++) {
DecoratedKey key = Util.dk(String.valueOf(r));
new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
// Decrement the timestamp to simulate a timestamp in the past hour
for (int r = 3; r < 5; r++) {
// And add progressively more cells into each sstable
DecoratedKey key = Util.dk(String.valueOf(r));
new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
HashMultimap<Long, SSTableReader> buckets = HashMultimap.create();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
// We'll put 3 sstables into the newest bucket
for (int i = 0; i < 3; i++) {
Pair<Long, Long> bounds = getWindowBoundsInMillis(TimeUnit.HOURS, 1, tstamp);
buckets.put(bounds.left, sstrs.get(i));
}
TimeWindowCompactionStrategy.NewestBucket newBucket = newestBucket(buckets, 4, 32, new SizeTieredCompactionStrategyOptions(), getWindowBoundsInMillis(TimeUnit.HOURS, 1, System.currentTimeMillis()).left);
assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.sstables.isEmpty());
assertEquals("there should be no estimated remaining tasks when bucket is below min threshold SSTables", 0, newBucket.estimatedRemainingTasks);
newBucket = newestBucket(buckets, 2, 32, new SizeTieredCompactionStrategyOptions(), getWindowBoundsInMillis(TimeUnit.HOURS, 1, System.currentTimeMillis()).left);
assertFalse("incoming bucket should be accepted when it is larger than the min threshold SSTables", newBucket.sstables.isEmpty());
assertEquals("there should be one estimated remaining task when bucket is larger than the min threshold SSTables", 1, newBucket.estimatedRemainingTasks);
// And 2 into the second bucket (1 hour back)
for (int i = 3; i < 5; i++) {
Pair<Long, Long> bounds = getWindowBoundsInMillis(TimeUnit.HOURS, 1, tstamp2);
buckets.put(bounds.left, sstrs.get(i));
}
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(1).getMinTimestamp(), sstrs.get(1).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(2).getMinTimestamp(), sstrs.get(2).getMaxTimestamp());
// Test trim
int numSSTables = 40;
for (int r = 5; r < numSSTables; r++) {
DecoratedKey key = Util.dk(String.valueOf(r));
for (int i = 0; i < r; i++) {
new RowUpdateBuilder(cfs.metadata(), tstamp + r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
}
cfs.forceBlockingFlush();
}
// Reset the buckets, overfill it now
sstrs = new ArrayList<>(cfs.getLiveSSTables());
for (int i = 0; i < 40; i++) {
Pair<Long, Long> bounds = getWindowBoundsInMillis(TimeUnit.HOURS, 1, sstrs.get(i).getMaxTimestamp());
buckets.put(bounds.left, sstrs.get(i));
}
newBucket = newestBucket(buckets, 4, 32, new SizeTieredCompactionStrategyOptions(), getWindowBoundsInMillis(TimeUnit.HOURS, 1, System.currentTimeMillis()).left);
assertEquals("new bucket should be trimmed to max threshold of 32", newBucket.sstables.size(), 32);
// one per bucket because they are all eligible and one more for the sstables that were trimmed
assertEquals("there should be one estimated remaining task per eligible bucket", buckets.keySet().size() + 1, newBucket.estimatedRemainingTasks);
}
use of org.apache.cassandra.db.DecoratedKey in project cassandra by apache.
the class CompactionsTest method populate.
public static long populate(String ks, String cf, int startRowKey, int endRowKey, int ttl) {
long timestamp = System.currentTimeMillis();
TableMetadata cfm = Keyspace.open(ks).getColumnFamilyStore(cf).metadata();
for (int i = startRowKey; i <= endRowKey; i++) {
DecoratedKey key = Util.dk(Integer.toString(i));
for (int j = 0; j < 10; j++) {
new RowUpdateBuilder(cfm, timestamp, j > 0 ? ttl : 0, key.getKey()).clustering(Integer.toString(j)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
}
}
return timestamp;
}
Aggregations