use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class ViewTest method testCompaction.
@Test
public void testCompaction() {
ColumnFamilyStore cfs = MockSchema.newCFS();
View initialView = fakeView(0, 5, cfs, true);
View cur = initialView;
List<SSTableReader> readers = ImmutableList.copyOf(initialView.sstables);
Assert.assertTrue(View.permitCompacting(readers).apply(cur));
// check we permit compacting duplicates in the predicate, so we don't spin infinitely if there is a screw up
Assert.assertTrue(View.permitCompacting(ImmutableList.copyOf(concat(readers, readers))).apply(cur));
// check we fail in the application in the presence of duplicates
testFailure(View.updateCompacting(emptySet(), concat(readers.subList(0, 1), readers.subList(0, 1))), cur);
// do lots of trivial checks that the compacting set and related methods behave properly for a simple update
cur = View.updateCompacting(emptySet(), readers.subList(0, 2)).apply(cur);
Assert.assertTrue(View.permitCompacting(readers.subList(2, 5)).apply(cur));
Assert.assertFalse(View.permitCompacting(readers.subList(0, 2)).apply(cur));
Assert.assertFalse(View.permitCompacting(readers.subList(0, 1)).apply(cur));
Assert.assertFalse(View.permitCompacting(readers.subList(1, 2)).apply(cur));
Assert.assertTrue(readers.subList(2, 5).containsAll(copyOf(cur.getUncompacting(readers))));
Assert.assertEquals(3, copyOf(cur.getUncompacting(readers)).size());
Assert.assertTrue(ImmutableSet.copyOf(cur.select(SSTableSet.NONCOMPACTING)).containsAll(readers.subList(2, 5)));
Assert.assertEquals(3, ImmutableSet.copyOf(cur.select(SSTableSet.NONCOMPACTING)).size());
// check marking already compacting readers fails with an exception
testFailure(View.updateCompacting(emptySet(), readers.subList(0, 1)), cur);
testFailure(View.updateCompacting(emptySet(), readers.subList(1, 2)), cur);
testFailure(View.updateCompacting(copyOf(readers.subList(0, 1)), readers.subList(1, 2)), cur);
// make equivalents of readers.subList(0, 3) that are different instances
SSTableReader r0 = MockSchema.sstable(0, cfs), r1 = MockSchema.sstable(1, cfs), r2 = MockSchema.sstable(2, cfs);
// attempt to mark compacting a version not in the live set
testFailure(View.updateCompacting(emptySet(), of(r2)), cur);
// update one compacting, one non-compacting, of the liveset to another instance of the same readers;
// confirm liveset changes but compacting does not
cur = View.updateLiveSet(copyOf(readers.subList(1, 3)), of(r1, r2)).apply(cur);
Assert.assertSame(readers.get(0), cur.sstablesMap.get(r0));
Assert.assertSame(r1, cur.sstablesMap.get(r1));
Assert.assertSame(r2, cur.sstablesMap.get(r2));
testFailure(View.updateCompacting(emptySet(), readers.subList(2, 3)), cur);
Assert.assertSame(readers.get(1), Iterables.getFirst(Iterables.filter(cur.compacting, Predicates.equalTo(r1)), null));
// unmark compacting, and check our methods are all correctly updated
cur = View.updateCompacting(copyOf(readers.subList(0, 1)), emptySet()).apply(cur);
Assert.assertTrue(View.permitCompacting(concat(readers.subList(0, 1), of(r2), readers.subList(3, 5))).apply(cur));
Assert.assertFalse(View.permitCompacting(readers.subList(1, 2)).apply(cur));
testFailure(View.updateCompacting(emptySet(), readers.subList(1, 2)), cur);
testFailure(View.updateCompacting(copyOf(readers.subList(0, 2)), emptySet()), cur);
Assert.assertTrue(copyOf(concat(readers.subList(0, 1), readers.subList(2, 5))).containsAll(copyOf(cur.getUncompacting(readers))));
Assert.assertEquals(4, copyOf(cur.getUncompacting(readers)).size());
Set<SSTableReader> nonCompacting = ImmutableSet.copyOf(cur.select(SSTableSet.NONCOMPACTING));
Assert.assertTrue(nonCompacting.containsAll(readers.subList(2, 5)));
Assert.assertTrue(nonCompacting.containsAll(readers.subList(0, 1)));
Assert.assertEquals(4, nonCompacting.size());
for (SSTableReader sstable : initialView.sstables) sstable.selfRef().release();
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class DateTieredCompactionStrategyTest method testPrepBucket.
@Test
public void testPrepBucket() {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++) {
DecoratedKey key = Util.dk(String.valueOf(r));
new RowUpdateBuilder(cfs.metadata(), r, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
List<SSTableReader> newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 9, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.isEmpty());
newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 10, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
assertFalse("non-incoming bucket should be accepted when it has at least 2 SSTables", newBucket.isEmpty());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(1).getMinTimestamp(), sstrs.get(1).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(2).getMinTimestamp(), sstrs.get(2).getMaxTimestamp());
cfs.truncateBlocking();
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class DateTieredCompactionStrategyTest method testDropExpiredSSTables.
@Test
public void testDropExpiredSSTables() throws InterruptedException {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 2 sstables
DecoratedKey key = Util.dk(String.valueOf("expired"));
new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), 1, key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
SSTableReader expiredSSTable = cfs.getLiveSSTables().iterator().next();
Thread.sleep(10);
key = Util.dk(String.valueOf("nonexpired"));
new RowUpdateBuilder(cfs.metadata(), System.currentTimeMillis(), key.getKey()).clustering("column").add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
assertEquals(cfs.getLiveSSTables().size(), 2);
Map<String, String> options = new HashMap<>();
options.put(DateTieredCompactionStrategyOptions.BASE_TIME_KEY, "30");
options.put(DateTieredCompactionStrategyOptions.TIMESTAMP_RESOLUTION_KEY, "MILLISECONDS");
options.put(DateTieredCompactionStrategyOptions.MAX_SSTABLE_AGE_KEY, Double.toString((1d / (24 * 60 * 60))));
options.put(DateTieredCompactionStrategyOptions.EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_KEY, "0");
DateTieredCompactionStrategy dtcs = new DateTieredCompactionStrategy(cfs, options);
for (SSTableReader sstable : cfs.getLiveSSTables()) dtcs.addSSTable(sstable);
dtcs.startup();
assertNull(dtcs.getNextBackgroundTask((int) (System.currentTimeMillis() / 1000)));
Thread.sleep(2000);
AbstractCompactionTask t = dtcs.getNextBackgroundTask((int) (System.currentTimeMillis() / 1000));
assertNotNull(t);
assertEquals(1, Iterables.size(t.transaction.originals()));
SSTableReader sstable = t.transaction.originals().iterator().next();
assertEquals(sstable, expiredSSTable);
t.transaction.abort();
cfs.truncateBlocking();
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class LogTransactionTest method testGetTemporaryFilesThrowsIfCompletingAfterObsoletion.
@Test
public void testGetTemporaryFilesThrowsIfCompletingAfterObsoletion() throws Throwable {
ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
File dataFolder = new Directories(cfs.metadata()).getDirectoryForNewSSTables();
SSTableReader sstable = sstable(dataFolder, cfs, 0, 128);
LogTransaction logs = new LogTransaction(OperationType.COMPACTION);
assertNotNull(logs);
LogTransaction.SSTableTidier tidier = logs.obsoleted(sstable);
sstable.markObsolete(tidier);
sstable.selfRef().release();
LogTransaction.waitForDeletions();
try {
// This should race with the asynchronous deletion of txn log files
// it should throw because we are violating the requirement that a transaction must
// finish before deleting files (i.e. releasing sstables)
getTemporaryFiles(dataFolder);
fail("Expected runtime exception");
} catch (RuntimeException e) {
//pass as long as the cause is not an assertion
assertFalse(e.getCause() instanceof AssertionError);
}
logs.finish();
}
use of org.apache.cassandra.db.ColumnFamilyStore in project cassandra by apache.
the class LogTransactionTest method testRemoveUnfinishedLeftovers_abort_multipleFolders.
@Test
public void testRemoveUnfinishedLeftovers_abort_multipleFolders() throws Throwable {
ColumnFamilyStore cfs = MockSchema.newCFS(KEYSPACE);
File origiFolder = new Directories(cfs.metadata()).getDirectoryForNewSSTables();
File dataFolder1 = new File(origiFolder, "1");
File dataFolder2 = new File(origiFolder, "2");
Files.createDirectories(dataFolder1.toPath());
Files.createDirectories(dataFolder2.toPath());
SSTableReader[] sstables = { sstable(dataFolder1, cfs, 0, 128), sstable(dataFolder1, cfs, 1, 128), sstable(dataFolder2, cfs, 2, 128), sstable(dataFolder2, cfs, 3, 128) };
LogTransaction log = new LogTransaction(OperationType.COMPACTION);
assertNotNull(log);
LogTransaction.SSTableTidier[] tidiers = { log.obsoleted(sstables[0]), log.obsoleted(sstables[2]) };
log.trackNew(sstables[1]);
log.trackNew(sstables[3]);
Collection<File> logFiles = log.logFiles();
Assert.assertEquals(2, logFiles.size());
// fake an abort
log.txnFile().abort();
Arrays.stream(sstables).forEach(s -> s.selfRef().release());
// test listing
Assert.assertEquals(sstables[1].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), getTemporaryFiles(dataFolder1));
Assert.assertEquals(sstables[3].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), getTemporaryFiles(dataFolder2));
// normally called at startup
assertTrue(LogTransaction.removeUnfinishedLeftovers(Arrays.asList(dataFolder1, dataFolder2)));
// old tables should be only table left
assertFiles(dataFolder1.getPath(), new HashSet<>(sstables[0].getAllFilePaths()));
assertFiles(dataFolder2.getPath(), new HashSet<>(sstables[2].getAllFilePaths()));
// complete the transaction to avoid LEAK errors
Arrays.stream(tidiers).forEach(LogTransaction.SSTableTidier::run);
assertNull(log.complete(null));
}
Aggregations