use of org.apache.cassandra.db.rows.UnfilteredRowIterator in project cassandra by apache.
the class AntiCompactionTest method antiCompactOne.
private void antiCompactOne(long repairedAt, UUID pendingRepair) throws Exception {
assert repairedAt != UNREPAIRED_SSTABLE || pendingRepair != null;
ColumnFamilyStore store = prepareColumnFamilyStore();
Collection<SSTableReader> sstables = getUnrepairedSSTables(store);
assertEquals(store.getLiveSSTables().size(), sstables.size());
Range<Token> range = new Range<Token>(new BytesToken("0".getBytes()), new BytesToken("4".getBytes()));
List<Range<Token>> ranges = Arrays.asList(range);
int repairedKeys = 0;
int pendingKeys = 0;
int nonRepairedKeys = 0;
try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.ANTICOMPACTION);
Refs<SSTableReader> refs = Refs.ref(sstables)) {
if (txn == null)
throw new IllegalStateException();
UUID parentRepairSession = UUID.randomUUID();
CompactionManager.instance.performAnticompaction(store, ranges, refs, txn, repairedAt, pendingRepair, parentRepairSession);
}
assertEquals(2, store.getLiveSSTables().size());
for (SSTableReader sstable : store.getLiveSSTables()) {
try (ISSTableScanner scanner = sstable.getScanner()) {
while (scanner.hasNext()) {
UnfilteredRowIterator row = scanner.next();
if (sstable.isRepaired() || sstable.isPendingRepair()) {
assertTrue(range.contains(row.partitionKey().getToken()));
repairedKeys += sstable.isRepaired() ? 1 : 0;
pendingKeys += sstable.isPendingRepair() ? 1 : 0;
} else {
assertFalse(range.contains(row.partitionKey().getToken()));
nonRepairedKeys++;
}
}
}
}
for (SSTableReader sstable : store.getLiveSSTables()) {
assertFalse(sstable.isMarkedCompacted());
assertEquals(1, sstable.selfRef().globalCount());
}
assertEquals(0, store.getTracker().getCompacting().size());
assertEquals(repairedKeys, repairedAt != UNREPAIRED_SSTABLE ? 4 : 0);
assertEquals(pendingKeys, pendingRepair != NO_PENDING_REPAIR ? 4 : 0);
assertEquals(nonRepairedKeys, 6);
}
use of org.apache.cassandra.db.rows.UnfilteredRowIterator in project cassandra by apache.
the class RepairedDataTombstonesTest method readTestPartitionTombstones.
@Test
public void readTestPartitionTombstones() throws Throwable {
createTable("create table %s (id int, id2 int, t text, t2 text, primary key (id, id2)) with gc_grace_seconds=0 and compaction = {'class':'SizeTieredCompactionStrategy', 'only_purge_repaired_tombstones':true}");
for (int i = 0; i < 10; i++) {
execute("delete from %s where id=?", i);
}
flush();
SSTableReader repairedSSTable = getCurrentColumnFamilyStore().getSSTables(SSTableSet.LIVE).iterator().next();
repair(getCurrentColumnFamilyStore(), repairedSSTable);
Thread.sleep(2000);
for (int i = 10; i < 20; i++) {
execute("delete from %s where id=?", i);
}
flush();
Thread.sleep(1000);
ReadCommand cmd = Util.cmd(getCurrentColumnFamilyStore()).build();
int partitionsFound = 0;
try (ReadExecutionController executionController = cmd.executionController();
UnfilteredPartitionIterator iterator = cmd.executeLocally(executionController)) {
while (iterator.hasNext()) {
partitionsFound++;
try (UnfilteredRowIterator rowIter = iterator.next()) {
int val = ByteBufferUtil.toInt(rowIter.partitionKey().getKey());
assertTrue("val=" + val, val >= 10 && val < 20);
}
}
}
assertEquals(10, partitionsFound);
}
use of org.apache.cassandra.db.rows.UnfilteredRowIterator in project cassandra by apache.
the class RepairedDataTombstonesTest method verify2.
private void verify2(int key, int expectedRows, int minVal, int maxVal, boolean includePurgeable) {
ReadCommand cmd = Util.cmd(getCurrentColumnFamilyStore(), Util.dk(ByteBufferUtil.bytes(key))).build();
int foundRows = 0;
try (ReadExecutionController executionController = cmd.executionController();
UnfilteredPartitionIterator iterator = includePurgeable ? cmd.queryStorage(getCurrentColumnFamilyStore(), executionController) : cmd.executeLocally(executionController)) {
while (iterator.hasNext()) {
try (UnfilteredRowIterator rowIter = iterator.next()) {
while (rowIter.hasNext()) {
AbstractRow row = (AbstractRow) rowIter.next();
for (int i = 0; i < row.clustering().size(); i++) {
foundRows++;
int val = ByteBufferUtil.toInt(row.clustering().get(i));
assertTrue("val=" + val, val >= minVal && val < maxVal);
}
}
}
}
}
assertEquals(expectedRows, foundRows);
}
use of org.apache.cassandra.db.rows.UnfilteredRowIterator in project cassandra by apache.
the class RepairedDataTombstonesTest method verify.
private void verify(int expectedRows, int minVal, int maxVal, boolean includePurgeable) {
ReadCommand cmd = Util.cmd(getCurrentColumnFamilyStore()).build();
int foundRows = 0;
try (ReadExecutionController executionController = cmd.executionController();
UnfilteredPartitionIterator iterator = includePurgeable ? cmd.queryStorage(getCurrentColumnFamilyStore(), executionController) : cmd.executeLocally(executionController)) {
while (iterator.hasNext()) {
try (UnfilteredRowIterator rowIter = iterator.next()) {
if (// partition key 999 is 'live' and used to avoid sstables from being dropped
!rowIter.partitionKey().equals(Util.dk(ByteBufferUtil.bytes(999)))) {
while (rowIter.hasNext()) {
AbstractRow row = (AbstractRow) rowIter.next();
for (int i = 0; i < row.clustering().size(); i++) {
foundRows++;
int val = ByteBufferUtil.toInt(row.clustering().get(i));
assertTrue("val=" + val, val >= minVal && val < maxVal);
}
}
}
}
}
}
assertEquals(expectedRows, foundRows);
}
use of org.apache.cassandra.db.rows.UnfilteredRowIterator in project cassandra by apache.
the class CassandraIndexTest method assertIndexRowTtl.
// this is slightly annoying, but we cannot read rows from the methods in Util as
// ReadCommand#executeInternal uses metadata retrieved via the tableId, which the index
// CFS inherits from the base CFS. This has the 'wrong' partitioner (the index table
// uses LocalPartition, the base table a real one, so we cannot read from the index
// table with executeInternal
private void assertIndexRowTtl(ColumnFamilyStore indexCfs, int indexedValue, int ttl) throws Throwable {
DecoratedKey indexKey = indexCfs.decorateKey(ByteBufferUtil.bytes(indexedValue));
ClusteringIndexFilter filter = new ClusteringIndexSliceFilter(Slices.with(indexCfs.metadata().comparator, Slice.ALL), false);
SinglePartitionReadCommand command = SinglePartitionReadCommand.create(indexCfs.metadata(), FBUtilities.nowInSeconds(), indexKey, ColumnFilter.all(indexCfs.metadata()), filter);
try (ReadExecutionController executionController = command.executionController();
UnfilteredRowIterator iter = command.queryMemtableAndDisk(indexCfs, executionController)) {
while (iter.hasNext()) {
Unfiltered unfiltered = iter.next();
assert (unfiltered.isRow());
Row indexRow = (Row) unfiltered;
assertEquals(ttl, indexRow.primaryKeyLivenessInfo().ttl());
}
}
}
Aggregations