use of org.apache.cassandra.db.ReadExecutionController in project cassandra by apache.
the class ViewBuilderTask method buildKey.
@SuppressWarnings("resource")
private void buildKey(DecoratedKey key) {
ReadQuery selectQuery = view.getReadQuery();
if (!selectQuery.selectsKey(key)) {
logger.trace("Skipping {}, view query filters", key);
return;
}
int nowInSec = FBUtilities.nowInSeconds();
SinglePartitionReadCommand command = view.getSelectStatement().internalReadForView(key, nowInSec);
// We're rebuilding everything from what's on disk, so we read everything, consider that as new updates
// and pretend that there is nothing pre-existing.
UnfilteredRowIterator empty = UnfilteredRowIterators.noRowsIterator(baseCfs.metadata(), key, Rows.EMPTY_STATIC_ROW, DeletionTime.LIVE, false);
try (ReadExecutionController orderGroup = command.executionController();
UnfilteredRowIterator data = UnfilteredPartitionIterators.getOnlyElement(command.executeLocally(orderGroup), command)) {
Iterator<Collection<Mutation>> mutations = baseCfs.keyspace.viewManager.forTable(baseCfs.metadata.id).generateViewUpdates(Collections.singleton(view), data, empty, nowInSec, true);
AtomicLong noBase = new AtomicLong(Long.MAX_VALUE);
mutations.forEachRemaining(m -> StorageProxy.mutateMV(key.getKey(), m, true, noBase, nanoTime()));
}
}
use of org.apache.cassandra.db.ReadExecutionController in project cassandra by apache.
the class ThrottledUnfilteredIteratorTest method testThrottledIteratorWithRangeDeletions.
@Test
public void testThrottledIteratorWithRangeDeletions() throws Exception {
Keyspace keyspace = Keyspace.open(KSNAME);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CFNAME);
// Inserting data
String key = "k1";
UpdateBuilder builder;
builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(0);
for (int i = 0; i < 40; i += 2) builder.newRow(i).add("val", i);
builder.applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), 1, key).addRangeTombstone(10, 22).build().applyUnsafe();
cfs.forceBlockingFlush();
builder = UpdateBuilder.create(cfs.metadata(), key).withTimestamp(2);
for (int i = 1; i < 40; i += 2) builder.newRow(i).add("val", i);
builder.applyUnsafe();
new RowUpdateBuilder(cfs.metadata(), 3, key).addRangeTombstone(19, 27).build().applyUnsafe();
// We don't flush to test with both a range tomsbtone in memtable and in sstable
// Queries by name
int[] live = new int[] { 4, 9, 11, 17, 28 };
int[] dead = new int[] { 12, 19, 21, 24, 27 };
AbstractReadCommandBuilder.PartitionRangeBuilder cmdBuilder = Util.cmd(cfs);
ReadCommand cmd = cmdBuilder.build();
for (int batchSize = 2; batchSize <= 40; batchSize++) {
List<UnfilteredRowIterator> unfilteredRowIterators = new LinkedList<>();
try (ReadExecutionController executionController = cmd.executionController();
UnfilteredPartitionIterator iterator = cmd.executeLocally(executionController)) {
assertTrue(iterator.hasNext());
Iterator<UnfilteredRowIterator> throttled = ThrottledUnfilteredIterator.throttle(iterator, batchSize);
while (throttled.hasNext()) {
UnfilteredRowIterator next = throttled.next();
ImmutableBTreePartition materializedPartition = ImmutableBTreePartition.create(next);
int unfilteredCount = Iterators.size(materializedPartition.unfilteredIterator());
System.out.println("batchsize " + batchSize + " unfilteredCount " + unfilteredCount + " materializedPartition " + materializedPartition);
if (throttled.hasNext()) {
if (unfilteredCount != batchSize) {
// when there is extra unfiltered, it must be close bound marker
assertEquals(batchSize + 1, unfilteredCount);
Unfiltered last = Iterators.getLast(materializedPartition.unfilteredIterator());
assertTrue(last.isRangeTombstoneMarker());
RangeTombstoneMarker marker = (RangeTombstoneMarker) last;
assertFalse(marker.isBoundary());
assertTrue(marker.isClose(false));
}
} else {
// only last batch can be smaller than batchSize
assertTrue(unfilteredCount <= batchSize + 1);
}
unfilteredRowIterators.add(materializedPartition.unfilteredIterator());
}
assertFalse(iterator.hasNext());
}
// Verify throttled data after merge
Partition partition = ImmutableBTreePartition.create(UnfilteredRowIterators.merge(unfilteredRowIterators));
int nowInSec = FBUtilities.nowInSeconds();
for (int i : live) assertTrue("Row " + i + " should be live", partition.getRow(Clustering.make(ByteBufferUtil.bytes((i)))).hasLiveData(nowInSec, cfs.metadata().enforceStrictLiveness()));
for (int i : dead) assertFalse("Row " + i + " shouldn't be live", partition.getRow(Clustering.make(ByteBufferUtil.bytes((i)))).hasLiveData(nowInSec, cfs.metadata().enforceStrictLiveness()));
}
}
use of org.apache.cassandra.db.ReadExecutionController in project cassandra by apache.
the class CompactionsTest method testRangeTombstones.
@Test
public void testRangeTombstones() {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard2");
cfs.clearUnsafe();
// disable compaction while flushing
cfs.disableAutoCompaction();
final TableMetadata table = cfs.metadata();
Directories dir = cfs.getDirectories();
ArrayList<DecoratedKey> keys = new ArrayList<DecoratedKey>();
for (int i = 0; i < 4; i++) {
keys.add(Util.dk(Integer.toString(i)));
}
int[] dks = { 0, 1, 3 };
writeSSTableWithRangeTombstoneMaskingOneColumn(cfs, table, dks);
int[] dkays = { 0, 1, 2, 3 };
writeSSTableWithRangeTombstoneMaskingOneColumn(cfs, table, dkays);
Collection<SSTableReader> toCompact = cfs.getLiveSSTables();
assert toCompact.size() == 2;
Util.compact(cfs, toCompact);
assertEquals(1, cfs.getLiveSSTables().size());
// Now assert we do have the 4 keys
assertEquals(4, Util.getAll(Util.cmd(cfs).build()).size());
ArrayList<DecoratedKey> k = new ArrayList<>();
for (FilteredPartition p : Util.getAll(Util.cmd(cfs).build())) {
k.add(p.partitionKey());
final SinglePartitionReadCommand command = SinglePartitionReadCommand.create(cfs.metadata(), FBUtilities.nowInSeconds(), ColumnFilter.all(cfs.metadata()), RowFilter.NONE, DataLimits.NONE, p.partitionKey(), new ClusteringIndexSliceFilter(Slices.ALL, false));
try (ReadExecutionController executionController = command.executionController();
PartitionIterator iterator = command.executeInternal(executionController)) {
try (RowIterator rowIterator = iterator.next()) {
Row row = rowIterator.next();
Cell<?> cell = row.getCell(cfs.metadata().getColumn(new ColumnIdentifier("val", false)));
assertEquals(ByteBufferUtil.bytes("a"), cell.buffer());
assertEquals(3, cell.timestamp());
ValueAccessors.assertDataNotEquals(ByteBufferUtil.bytes("01"), row.clustering().getRawValues()[0]);
ValueAccessors.assertDataEquals(ByteBufferUtil.bytes("02"), row.clustering().getRawValues()[0]);
}
}
}
for (SSTableReader sstable : cfs.getLiveSSTables()) {
StatsMetadata stats = sstable.getSSTableMetadata();
assertEquals(ByteBufferUtil.bytes("0"), stats.minClusteringValues.get(0));
assertEquals(ByteBufferUtil.bytes("b"), stats.maxClusteringValues.get(0));
}
assertEquals(keys, k);
}
use of org.apache.cassandra.db.ReadExecutionController in project cassandra by apache.
the class Ballots method latestBallotFromBaseTable.
public static long latestBallotFromBaseTable(DecoratedKey key, TableMetadata metadata) {
SinglePartitionReadCommand cmd = SinglePartitionReadCommand.create(metadata, 0, key, Slice.ALL);
ImmutableBTreePartition partition;
try (ReadExecutionController controller = cmd.executionController();
UnfilteredPartitionIterator partitions = cmd.executeLocally(controller)) {
if (!partitions.hasNext())
return 0L;
try (UnfilteredRowIterator rows = partitions.next()) {
partition = ImmutableBTreePartition.create(rows);
}
}
return latestBallot(partition);
}
Aggregations