use of org.apache.cassandra.db.RangeTombstone in project cassandra by apache.
the class CompactionsCQLTest method testIndexedReaderRT.
@Test
public void testIndexedReaderRT() throws Throwable {
// write enough data to make sure we use an IndexedReader when doing a read, and make sure it fails when reading a corrupt range tombstone
DatabaseDescriptor.setCorruptedTombstoneStrategy(Config.CorruptedTombstoneStrategy.exception);
final int maxSizePreKiB = DatabaseDescriptor.getColumnIndexSizeInKiB();
DatabaseDescriptor.setColumnIndexSize(1024);
prepareWide();
RangeTombstone rt = new RangeTombstone(Slice.ALL, new DeletionTime(System.currentTimeMillis(), -1));
RowUpdateBuilder rub = new RowUpdateBuilder(getCurrentColumnFamilyStore().metadata(), System.currentTimeMillis() * 1000, 22).clustering(33).addRangeTombstone(rt);
rub.build().apply();
getCurrentColumnFamilyStore().forceBlockingFlush();
readAndValidate(true);
readAndValidate(false);
DatabaseDescriptor.setColumnIndexSize(maxSizePreKiB);
}
use of org.apache.cassandra.db.RangeTombstone in project cassandra by apache.
the class CompactionsCQLTest method testCompactionInvalidRTs.
@Test
public void testCompactionInvalidRTs() throws Throwable {
// set the corruptedTombstoneStrategy to exception since these tests require it - if someone changed the default
// in test/conf/cassandra.yaml they would start failing
DatabaseDescriptor.setCorruptedTombstoneStrategy(Config.CorruptedTombstoneStrategy.exception);
prepare();
// write a range tombstone with negative local deletion time (LDTs are not set by user and should not be negative):
RangeTombstone rt = new RangeTombstone(Slice.ALL, new DeletionTime(System.currentTimeMillis(), -1));
RowUpdateBuilder rub = new RowUpdateBuilder(getCurrentColumnFamilyStore().metadata(), System.currentTimeMillis() * 1000, 22).clustering(33).addRangeTombstone(rt);
rub.build().apply();
getCurrentColumnFamilyStore().forceBlockingFlush();
compactAndValidate();
readAndValidate(true);
readAndValidate(false);
}
use of org.apache.cassandra.db.RangeTombstone in project cassandra by apache.
the class DataResolverTest method withExclusiveStartIf.
// Forces the start to be exclusive if the condition holds
private static RangeTombstone withExclusiveStartIf(RangeTombstone rt, boolean condition) {
if (!condition)
return rt;
Slice slice = rt.deletedSlice();
ClusteringBound<?> newStart = ClusteringBound.create(Kind.EXCL_START_BOUND, slice.start());
return condition ? new RangeTombstone(Slice.make(newStart, slice.end()), rt.deletionTime()) : rt;
}
use of org.apache.cassandra.db.RangeTombstone in project cassandra by apache.
the class DataResolverTest method resolveRangeTombstonesOnBoundary.
/*
* We want responses to merge on tombstone boundary. So we'll merge 2 "streams":
* 1: [1, 2)(3, 4](5, 6] 2
* 2: [2, 3][4, 5) 1
* which tests all combination of open/close boundaries (open/close, close/open, open/open, close/close).
*
* Note that, because DataResolver returns a "filtered" iterator, it should resolve into an empty iterator.
* However, what should be sent to each source depends on the exact on the timestamps of each tombstones and we
* test a few combination.
*/
private void resolveRangeTombstonesOnBoundary(long timestamp1, long timestamp2) {
EndpointsForRange replicas = makeReplicas(2);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
InetAddressAndPort peer1 = replicas.get(0).endpoint();
InetAddressAndPort peer2 = replicas.get(1).endpoint();
// 1st "stream"
RangeTombstone one_two = tombstone("1", true, "2", false, timestamp1, nowInSec);
RangeTombstone three_four = tombstone("3", false, "4", true, timestamp1, nowInSec);
RangeTombstone five_six = tombstone("5", false, "6", true, timestamp1, nowInSec);
UnfilteredPartitionIterator iter1 = iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).addRangeTombstone(one_two).addRangeTombstone(three_four).addRangeTombstone(five_six).buildUpdate());
// 2nd "stream"
RangeTombstone two_three = tombstone("2", true, "3", true, timestamp2, nowInSec);
RangeTombstone four_five = tombstone("4", true, "5", false, timestamp2, nowInSec);
UnfilteredPartitionIterator iter2 = iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).addRangeTombstone(two_three).addRangeTombstone(four_five).buildUpdate());
resolver.preprocess(response(command, peer1, iter1));
resolver.preprocess(response(command, peer2, iter2));
// No results, we've only reconciled tombstones.
try (PartitionIterator data = resolver.resolve()) {
assertFalse(data.hasNext());
}
assertEquals(2, readRepair.sent.size());
Mutation msg1 = readRepair.getForEndpoint(peer1);
assertRepairMetadata(msg1);
assertRepairContainsNoColumns(msg1);
Mutation msg2 = readRepair.getForEndpoint(peer2);
assertRepairMetadata(msg2);
assertRepairContainsNoColumns(msg2);
// Both streams are mostly complementary, so they will roughly get the ranges of the other stream. One subtlety is
// around the value "4" however, as it's included by both stream.
// So for a given stream, unless the other stream has a strictly higher timestamp, the value 4 will be excluded
// from whatever range it receives as repair since the stream already covers it.
// Message to peer1 contains peer2 ranges
assertRepairContainsDeletions(msg1, null, two_three, withExclusiveStartIf(four_five, timestamp1 >= timestamp2));
// Message to peer2 contains peer1 ranges
assertRepairContainsDeletions(msg2, null, one_two, withExclusiveEndIf(three_four, timestamp2 >= timestamp1), five_six);
}
use of org.apache.cassandra.db.RangeTombstone in project cassandra by apache.
the class DataResolverTest method withExclusiveEndIf.
// Forces the end to be exclusive if the condition holds
private static RangeTombstone withExclusiveEndIf(RangeTombstone rt, boolean condition) {
if (!condition)
return rt;
Slice slice = rt.deletedSlice();
ClusteringBound<?> newEnd = ClusteringBound.create(Kind.EXCL_END_BOUND, slice.end());
return condition ? new RangeTombstone(Slice.make(slice.start(), newEnd), rt.deletionTime()) : rt;
}
Aggregations