use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class CompactionStrategyManagerPendingRepairTest method sstableRepairStatusChanged.
@Test
public void sstableRepairStatusChanged() {
UUID repairID = registerSession(cfs, true, true);
LocalSessionAccessor.prepareUnsafe(repairID, COORDINATOR, PARTICIPANTS);
// add as unrepaired
SSTableReader sstable = makeSSTable(false);
Assert.assertTrue(unrepairedContains(sstable));
Assert.assertFalse(repairedContains(sstable));
csm.getForPendingRepair(repairID).forEach(Assert::assertNull);
SSTableRepairStatusChanged notification;
// change to pending repaired
mutateRepaired(sstable, repairID);
notification = new SSTableRepairStatusChanged(Collections.singleton(sstable));
csm.handleNotification(notification, cfs.getTracker());
Assert.assertFalse(unrepairedContains(sstable));
Assert.assertFalse(repairedContains(sstable));
csm.getForPendingRepair(repairID).forEach(Assert::assertNotNull);
Assert.assertTrue(pendingContains(repairID, sstable));
// change to repaired
mutateRepaired(sstable, System.currentTimeMillis());
notification = new SSTableRepairStatusChanged(Collections.singleton(sstable));
csm.handleNotification(notification, cfs.getTracker());
Assert.assertFalse(unrepairedContains(sstable));
Assert.assertTrue(repairedContains(sstable));
Assert.assertFalse(pendingContains(repairID, sstable));
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class CompactionsPurgeTest method testMinTimestampPurge.
/**
* verify that we don't drop tombstones during a minor compaction that might still be relevant
*/
@Test
public void testMinTimestampPurge() {
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE2);
String cfName = "Standard1";
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
String key3 = "key3";
// inserts
new RowUpdateBuilder(cfs.metadata(), 8, key3).clustering("c1").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), 8, key3).clustering("c2").add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
cfs.forceBlockingFlush();
// delete c1
RowUpdateBuilder.deleteRow(cfs.metadata(), 10, key3, "c1").applyUnsafe();
cfs.forceBlockingFlush();
Collection<SSTableReader> sstablesIncomplete = cfs.getLiveSSTables();
// delete c2 so we have new delete in a diffrent SSTable
RowUpdateBuilder.deleteRow(cfs.metadata(), 9, key3, "c2").applyUnsafe();
cfs.forceBlockingFlush();
// compact the sstables with the c1/c2 data and the c1 tombstone
List<AbstractCompactionTask> tasks = cfs.getCompactionStrategyManager().getUserDefinedTasks(sstablesIncomplete, Integer.MAX_VALUE);
assertEquals(1, tasks.size());
tasks.get(0).execute(null);
// We should have both the c1 and c2 tombstones still. Since the min timestamp in the c2 tombstone
// sstable is older than the c1 tombstone, it is invalid to throw out the c1 tombstone.
ImmutableBTreePartition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key3).build());
assertEquals(2, partition.rowCount());
for (Row row : partition) assertFalse(row.hasLiveData(FBUtilities.nowInSeconds()));
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class CompactionsTest method assertMaxTimestamp.
/*
@Test
public void testSingleSSTableCompactionWithLeveledCompaction() throws Exception
{
ColumnFamilyStore store = testSingleSSTableCompaction(LeveledCompactionStrategy.class.getCanonicalName());
CompactionStrategyManager strategyManager = store.getCompactionStrategyManager();
// tombstone removal compaction should not promote level
assert strategyManager.getSSTableCountPerLevel()[0] == 1;
}
@Test
public void testSuperColumnTombstones()
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Super1");
CFMetaData table = cfs.metadata;
cfs.disableAutoCompaction();
DecoratedKey key = Util.dk("tskey");
ByteBuffer scName = ByteBufferUtil.bytes("TestSuperColumn");
// a subcolumn
new RowUpdateBuilder(table, FBUtilities.timestampMicros(), key.getKey())
.clustering(ByteBufferUtil.bytes("cols"))
.add("val", "val1")
.build().applyUnsafe();
cfs.forceBlockingFlush();
// shadow the subcolumn with a supercolumn tombstone
RowUpdateBuilder.deleteRow(table, FBUtilities.timestampMicros(), key.getKey(), ByteBufferUtil.bytes("cols")).applyUnsafe();
cfs.forceBlockingFlush();
CompactionManager.instance.performMaximal(cfs);
assertEquals(1, cfs.getLiveSSTables().size());
// check that the shadowed column is gone
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
AbstractBounds<PartitionPosition> bounds = new Bounds<PartitionPosition>(key, sstable.partitioner.getMinimumToken().maxKeyBound());
ISSTableScanner scanner = sstable.getScanner(FBUtilities.nowInSeconds());
UnfilteredRowIterator ai = scanner.next();
assertTrue(ai.next() instanceof RangeTombstone);
assertFalse(ai.hasNext());
}
@Test
public void testUncheckedTombstoneSizeTieredCompaction() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_STANDARD1);
store.clearUnsafe();
store.metadata.gcGraceSeconds(1);
store.metadata.compactionStrategyOptions.put("tombstone_compaction_interval", "1");
store.metadata.compactionStrategyOptions.put("unchecked_tombstone_compaction", "false");
store.reload();
store.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getName());
// disable compaction while flushing
store.disableAutoCompaction();
//Populate sstable1 with with keys [0..9]
populate(KEYSPACE1, CF_STANDARD1, 0, 9, 3); //ttl=3s
store.forceBlockingFlush();
//Populate sstable2 with with keys [10..19] (keys do not overlap with SSTable1)
long timestamp2 = populate(KEYSPACE1, CF_STANDARD1, 10, 19, 3); //ttl=3s
store.forceBlockingFlush();
assertEquals(2, store.getLiveSSTables().size());
Iterator<SSTableReader> it = store.getLiveSSTables().iterator();
long originalSize1 = it.next().uncompressedLength();
long originalSize2 = it.next().uncompressedLength();
// wait enough to force single compaction
TimeUnit.SECONDS.sleep(5);
// enable compaction, submit background and wait for it to complete
store.enableAutoCompaction();
FBUtilities.waitOnFutures(CompactionManager.instance.submitBackground(store));
do
{
TimeUnit.SECONDS.sleep(1);
} while (CompactionManager.instance.getPendingTasks() > 0 || CompactionManager.instance.getActiveCompactions() > 0);
// even though both sstables were candidate for tombstone compaction
// it was not executed because they have an overlapping token range
assertEquals(2, store.getLiveSSTables().size());
it = store.getLiveSSTables().iterator();
long newSize1 = it.next().uncompressedLength();
long newSize2 = it.next().uncompressedLength();
assertEquals("candidate sstable should not be tombstone-compacted because its key range overlap with other sstable",
originalSize1, newSize1);
assertEquals("candidate sstable should not be tombstone-compacted because its key range overlap with other sstable",
originalSize2, newSize2);
// now let's enable the magic property
store.metadata.compactionStrategyOptions.put("unchecked_tombstone_compaction", "true");
store.reload();
//submit background task again and wait for it to complete
FBUtilities.waitOnFutures(CompactionManager.instance.submitBackground(store));
do
{
TimeUnit.SECONDS.sleep(1);
} while (CompactionManager.instance.getPendingTasks() > 0 || CompactionManager.instance.getActiveCompactions() > 0);
//we still have 2 sstables, since they were not compacted against each other
assertEquals(2, store.getLiveSSTables().size());
it = store.getLiveSSTables().iterator();
newSize1 = it.next().uncompressedLength();
newSize2 = it.next().uncompressedLength();
assertTrue("should be less than " + originalSize1 + ", but was " + newSize1, newSize1 < originalSize1);
assertTrue("should be less than " + originalSize2 + ", but was " + newSize2, newSize2 < originalSize2);
// make sure max timestamp of compacted sstables is recorded properly after compaction.
assertMaxTimestamp(store, timestamp2);
}
*/
public static void assertMaxTimestamp(ColumnFamilyStore cfs, long maxTimestampExpected) {
long maxTimestampObserved = Long.MIN_VALUE;
for (SSTableReader sstable : cfs.getLiveSSTables()) maxTimestampObserved = Math.max(sstable.getMaxTimestamp(), maxTimestampObserved);
assertEquals(maxTimestampExpected, maxTimestampObserved);
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class CompactionsTest method testNeedsCleanup.
@Test
@Ignore("making ranges based on the keys, not on the tokens")
public void testNeedsCleanup() {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore("CF_STANDARD1");
store.clearUnsafe();
// disable compaction while flushing
store.disableAutoCompaction();
// 201, 202, ... 208, 209
for (int i = 1; i < 10; i++) {
insertRowWithKey(i);
insertRowWithKey(i + 100);
insertRowWithKey(i + 200);
}
store.forceBlockingFlush();
assertEquals(1, store.getLiveSSTables().size());
SSTableReader sstable = store.getLiveSSTables().iterator().next();
// contiguous range spans all data
assertFalse(CompactionManager.needsCleanup(sstable, makeRanges(0, 209)));
assertFalse(CompactionManager.needsCleanup(sstable, makeRanges(0, 210)));
// separate ranges span all data
assertFalse(CompactionManager.needsCleanup(sstable, makeRanges(0, 9, 100, 109, 200, 209)));
assertFalse(CompactionManager.needsCleanup(sstable, makeRanges(0, 109, 200, 210)));
assertFalse(CompactionManager.needsCleanup(sstable, makeRanges(0, 9, 100, 210)));
// one range is missing completely
assertTrue(CompactionManager.needsCleanup(sstable, makeRanges(100, 109, 200, 209)));
assertTrue(CompactionManager.needsCleanup(sstable, makeRanges(0, 9, 200, 209)));
assertTrue(CompactionManager.needsCleanup(sstable, makeRanges(0, 9, 100, 109)));
// the beginning of one range is missing
assertTrue(CompactionManager.needsCleanup(sstable, makeRanges(1, 9, 100, 109, 200, 209)));
assertTrue(CompactionManager.needsCleanup(sstable, makeRanges(0, 9, 101, 109, 200, 209)));
assertTrue(CompactionManager.needsCleanup(sstable, makeRanges(0, 9, 100, 109, 201, 209)));
// the end of one range is missing
assertTrue(CompactionManager.needsCleanup(sstable, makeRanges(0, 8, 100, 109, 200, 209)));
assertTrue(CompactionManager.needsCleanup(sstable, makeRanges(0, 9, 100, 108, 200, 209)));
assertTrue(CompactionManager.needsCleanup(sstable, makeRanges(0, 9, 100, 109, 200, 208)));
// some ranges don't contain any data
assertFalse(CompactionManager.needsCleanup(sstable, makeRanges(0, 0, 0, 9, 50, 51, 100, 109, 150, 199, 200, 209, 300, 301)));
// same case, but with a middle range not covering some of the existing data
assertFalse(CompactionManager.needsCleanup(sstable, makeRanges(0, 0, 0, 9, 50, 51, 100, 103, 150, 199, 200, 209, 300, 301)));
}
use of org.apache.cassandra.io.sstable.format.SSTableReader in project cassandra by apache.
the class RealTransactionsTest method testRewriteAborted.
@Test
public void testRewriteAborted() throws IOException {
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(REWRITE_ABORTED_CF);
SSTableReader oldSSTable = getSSTable(cfs, 1);
LifecycleTransaction txn = cfs.getTracker().tryModify(oldSSTable, OperationType.COMPACTION);
replaceSSTable(cfs, txn, true);
LogTransaction.waitForDeletions();
assertFiles(oldSSTable.descriptor.directory.getPath(), new HashSet<>(oldSSTable.getAllFilePaths()));
}
Aggregations