use of org.apache.cassandra.io.sstable.ISSTableScanner in project cassandra by apache.
the class LeveledCompactionStrategy method getScanners.
public ScannerList getScanners(Collection<SSTableReader> sstables, Collection<Range<Token>> ranges) {
Set<SSTableReader>[] sstablesPerLevel = manifest.getSStablesPerLevelSnapshot();
Multimap<Integer, SSTableReader> byLevel = ArrayListMultimap.create();
for (SSTableReader sstable : sstables) {
int level = sstable.getSSTableLevel();
// so we add it to level -1 and create exclusive scanners for it - see below (#9935)
if (level >= sstablesPerLevel.length || !sstablesPerLevel[level].contains(sstable)) {
logger.warn("Live sstable {} from level {} is not on corresponding level in the leveled manifest." + " This is not a problem per se, but may indicate an orphaned sstable due to a failed" + " compaction not cleaned up properly.", sstable.getFilename(), level);
level = -1;
}
byLevel.get(level).add(sstable);
}
List<ISSTableScanner> scanners = new ArrayList<ISSTableScanner>(sstables.size());
try {
for (Integer level : byLevel.keySet()) {
// since we don't know which level those sstable belong yet, we simply do the same as L0 sstables.
if (level <= 0) {
// L0 makes no guarantees about overlapping-ness. Just create a direct scanner for each
for (SSTableReader sstable : byLevel.get(level)) scanners.add(sstable.getScanner(ranges));
} else {
// Create a LeveledScanner that only opens one sstable at a time, in sorted order
Collection<SSTableReader> intersecting = LeveledScanner.intersecting(byLevel.get(level), ranges);
if (!intersecting.isEmpty()) {
// The ScannerList will be in charge of closing (and we close properly on errors)
@SuppressWarnings("resource") ISSTableScanner scanner = new LeveledScanner(cfs.metadata(), intersecting, ranges);
scanners.add(scanner);
}
}
}
} catch (Throwable t) {
ISSTableScanner.closeAllAndPropagate(scanners, t);
}
return new ScannerList(scanners);
}
use of org.apache.cassandra.io.sstable.ISSTableScanner in project cassandra by apache.
the class PendingRepairManager method getScanners.
@SuppressWarnings("resource")
synchronized Set<ISSTableScanner> getScanners(Collection<SSTableReader> sstables, Collection<Range<Token>> ranges) {
if (sstables.isEmpty()) {
return Collections.emptySet();
}
Map<UUID, Set<SSTableReader>> sessionSSTables = new HashMap<>();
for (SSTableReader sstable : sstables) {
UUID sessionID = sstable.getSSTableMetadata().pendingRepair;
checkPendingID(sessionID);
sessionSSTables.computeIfAbsent(sessionID, k -> new HashSet<>()).add(sstable);
}
Set<ISSTableScanner> scanners = new HashSet<>(sessionSSTables.size());
try {
for (Map.Entry<UUID, Set<SSTableReader>> entry : sessionSSTables.entrySet()) {
scanners.addAll(getOrCreate(entry.getKey()).getScanners(entry.getValue(), ranges).scanners);
}
} catch (Throwable t) {
ISSTableScanner.closeAllAndPropagate(scanners, t);
}
return scanners;
}
use of org.apache.cassandra.io.sstable.ISSTableScanner in project cassandra by apache.
the class NeverPurgeTest method verifyContainsTombstones.
private void verifyContainsTombstones(Collection<SSTableReader> sstables, int expectedTombstoneCount) throws Exception {
// always run a major compaction before calling this
assertTrue(sstables.size() == 1);
SSTableReader sstable = sstables.iterator().next();
int tombstoneCount = 0;
try (ISSTableScanner scanner = sstable.getScanner()) {
while (scanner.hasNext()) {
try (UnfilteredRowIterator iter = scanner.next()) {
if (!iter.partitionLevelDeletion().isLive())
tombstoneCount++;
while (iter.hasNext()) {
Unfiltered atom = iter.next();
if (atom.isRow()) {
Row r = (Row) atom;
if (!r.deletion().isLive())
tombstoneCount++;
for (Cell<?> c : r.cells()) if (c.isTombstone())
tombstoneCount++;
}
}
}
}
}
assertEquals(tombstoneCount, expectedTombstoneCount);
}
use of org.apache.cassandra.io.sstable.ISSTableScanner in project cassandra by apache.
the class TTLExpiryTest method testNoExpire.
@Test
public void testNoExpire() throws InterruptedException, IOException {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore("Standard1");
cfs.truncateBlocking();
cfs.disableAutoCompaction();
MigrationManager.announceTableUpdate(cfs.metadata().unbuild().gcGraceSeconds(0).build(), true);
long timestamp = System.currentTimeMillis();
String key = "ttl";
new RowUpdateBuilder(cfs.metadata(), timestamp, 1, key).add("col", ByteBufferUtil.EMPTY_BYTE_BUFFER).add("col7", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
cfs.forceBlockingFlush();
new RowUpdateBuilder(cfs.metadata(), timestamp, 1, key).add("col2", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
cfs.forceBlockingFlush();
new RowUpdateBuilder(cfs.metadata(), timestamp, 1, key).add("col3", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
cfs.forceBlockingFlush();
String noTTLKey = "nottl";
new RowUpdateBuilder(cfs.metadata(), timestamp, noTTLKey).add("col311", ByteBufferUtil.EMPTY_BYTE_BUFFER).build().applyUnsafe();
cfs.forceBlockingFlush();
// wait for ttl to expire
Thread.sleep(2000);
assertEquals(4, cfs.getLiveSSTables().size());
cfs.enableAutoCompaction(true);
assertEquals(1, cfs.getLiveSSTables().size());
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
ISSTableScanner scanner = sstable.getScanner(ColumnFilter.all(cfs.metadata()), DataRange.allData(cfs.getPartitioner()), SSTableReadsListener.NOOP_LISTENER);
assertTrue(scanner.hasNext());
while (scanner.hasNext()) {
UnfilteredRowIterator iter = scanner.next();
assertEquals(Util.dk(noTTLKey), iter.partitionKey());
}
scanner.close();
}
use of org.apache.cassandra.io.sstable.ISSTableScanner in project cassandra by apache.
the class CompactionsTest method testDontPurgeAccidentally.
private void testDontPurgeAccidentally(String k, String cfname) throws InterruptedException {
// This test catches the regression of CASSANDRA-2786
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
TableMetadata table = cfs.metadata();
// disable compaction while flushing
cfs.clearUnsafe();
cfs.disableAutoCompaction();
// Add test row
DecoratedKey key = Util.dk(k);
RowUpdateBuilder rowUpdateBuilder = new RowUpdateBuilder(table, 0, key);
rowUpdateBuilder.clustering("c").add("val", "a");
rowUpdateBuilder.build().applyUnsafe();
cfs.forceBlockingFlush();
Collection<SSTableReader> sstablesBefore = cfs.getLiveSSTables();
ImmutableBTreePartition partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
assertTrue(!partition.isEmpty());
RowUpdateBuilder deleteRowBuilder = new RowUpdateBuilder(table, 2, key);
deleteRowBuilder.clustering("c").delete("val");
deleteRowBuilder.build().applyUnsafe();
// Remove key
partition = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, key).build());
assertTrue(partition.iterator().next().cells().iterator().next().isTombstone());
// Sleep one second so that the removal is indeed purgeable even with gcgrace == 0
Thread.sleep(1000);
cfs.forceBlockingFlush();
Collection<SSTableReader> sstablesAfter = cfs.getLiveSSTables();
Collection<SSTableReader> toCompact = new ArrayList<SSTableReader>();
for (SSTableReader sstable : sstablesAfter) if (!sstablesBefore.contains(sstable))
toCompact.add(sstable);
Util.compact(cfs, toCompact);
SSTableReader newSSTable = null;
for (SSTableReader reader : cfs.getLiveSSTables()) {
assert !toCompact.contains(reader);
if (!sstablesBefore.contains(reader))
newSSTable = reader;
}
// We cannot read the data, since {@link ReadCommand#withoutPurgeableTombstones} will purge droppable tombstones
// but we just want to check here that compaction did *NOT* drop the tombstone, so we read from the SSTable directly
// instead
ISSTableScanner scanner = newSSTable.getScanner();
assertTrue(scanner.hasNext());
UnfilteredRowIterator rowIt = scanner.next();
assertTrue(rowIt.hasNext());
Unfiltered unfiltered = rowIt.next();
assertTrue(unfiltered.isRow());
Row row = (Row) unfiltered;
assertTrue(row.cells().iterator().next().isTombstone());
assertFalse(rowIt.hasNext());
assertFalse(scanner.hasNext());
}
Aggregations