use of org.apache.cassandra.db.RangeTombstone in project cassandra by apache.
the class DataResolverTest method testResolveDisjointMultipleRowsWithRangeTombstones.
@Test
public void testResolveDisjointMultipleRowsWithRangeTombstones() {
EndpointsForRange replicas = makeReplicas(4);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
RangeTombstone tombstone1 = tombstone("1", "11", 1, nowInSec);
RangeTombstone tombstone2 = tombstone("3", "31", 1, nowInSec);
PartitionUpdate update = new RowUpdateBuilder(cfm3, nowInSec, 1L, dk).addRangeTombstone(tombstone1).addRangeTombstone(tombstone2).buildUpdate();
InetAddressAndPort peer1 = replicas.get(0).endpoint();
UnfilteredPartitionIterator iter1 = iter(new RowUpdateBuilder(cfm3, nowInSec, 1L, dk).addRangeTombstone(tombstone1).addRangeTombstone(tombstone2).buildUpdate());
resolver.preprocess(response(command, peer1, iter1));
// not covered by any range tombstone
InetAddressAndPort peer2 = replicas.get(1).endpoint();
UnfilteredPartitionIterator iter2 = iter(new RowUpdateBuilder(cfm3, nowInSec, 0L, dk).clustering("0").add("c1", "v0").buildUpdate());
resolver.preprocess(response(command, peer2, iter2));
// covered by a range tombstone
InetAddressAndPort peer3 = replicas.get(2).endpoint();
UnfilteredPartitionIterator iter3 = iter(new RowUpdateBuilder(cfm3, nowInSec, 0L, dk).clustering("10").add("c2", "v1").buildUpdate());
resolver.preprocess(response(command, peer3, iter3));
// range covered by rt, but newer
InetAddressAndPort peer4 = replicas.get(3).endpoint();
UnfilteredPartitionIterator iter4 = iter(new RowUpdateBuilder(cfm3, nowInSec, 2L, dk).clustering("3").add("one", "A").buildUpdate());
resolver.preprocess(response(command, peer4, iter4));
try (PartitionIterator data = resolver.resolve()) {
try (RowIterator rows = data.next()) {
Row row = rows.next();
assertClustering(cfm, row, "0");
assertColumns(row, "c1");
assertColumn(cfm, row, "c1", "v0", 0);
row = rows.next();
assertClustering(cfm, row, "3");
assertColumns(row, "one");
assertColumn(cfm, row, "one", "A", 2);
assertFalse(rows.hasNext());
}
}
assertEquals(4, readRepair.sent.size());
// peer1 needs the rows from peers 2 and 4
Mutation mutation = readRepair.getForEndpoint(peer1);
assertRepairMetadata(mutation);
assertRepairContainsNoDeletions(mutation);
assertRepairContainsColumn(mutation, "0", "c1", "v0", 0);
assertRepairContainsColumn(mutation, "3", "one", "A", 2);
// peer2 needs to get the row from peer4 and the RTs
mutation = readRepair.getForEndpoint(peer2);
assertRepairMetadata(mutation);
assertRepairContainsDeletions(mutation, null, tombstone1, tombstone2);
assertRepairContainsColumn(mutation, "3", "one", "A", 2);
// peer 3 needs both rows and the RTs
mutation = readRepair.getForEndpoint(peer3);
assertRepairMetadata(mutation);
assertRepairContainsDeletions(mutation, null, tombstone1, tombstone2);
assertRepairContainsColumn(mutation, "0", "c1", "v0", 0);
assertRepairContainsColumn(mutation, "3", "one", "A", 2);
// peer4 needs the row from peer2 and the RTs
mutation = readRepair.getForEndpoint(peer4);
assertRepairMetadata(mutation);
assertRepairContainsDeletions(mutation, null, tombstone1, tombstone2);
assertRepairContainsColumn(mutation, "0", "c1", "v0", 0);
}
use of org.apache.cassandra.db.RangeTombstone in project cassandra by apache.
the class DataResolverTest method testRepairRangeTombstoneWithPartitionDeletion2.
/**
* Additional test for CASSANDRA-13719: tests the case where a partition deletion doesn't shadow a range tombstone.
*/
@Test
public void testRepairRangeTombstoneWithPartitionDeletion2() {
EndpointsForRange replicas = makeReplicas(2);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
InetAddressAndPort peer1 = replicas.get(0).endpoint();
InetAddressAndPort peer2 = replicas.get(1).endpoint();
// 1st "stream": a partition deletion and a range tombstone
RangeTombstone rt1 = tombstone("0", true, "9", true, 11, nowInSec);
PartitionUpdate upd1 = new RowUpdateBuilder(cfm, nowInSec, 1L, dk).addRangeTombstone(rt1).buildUpdate();
((MutableDeletionInfo) upd1.deletionInfo()).add(new DeletionTime(10, nowInSec));
UnfilteredPartitionIterator iter1 = iter(upd1);
// 2nd "stream": a range tombstone that is covered by the other stream rt
RangeTombstone rt2 = tombstone("2", true, "3", true, 11, nowInSec);
RangeTombstone rt3 = tombstone("4", true, "5", true, 10, nowInSec);
UnfilteredPartitionIterator iter2 = iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).addRangeTombstone(rt2).addRangeTombstone(rt3).buildUpdate());
resolver.preprocess(response(command, peer1, iter1));
resolver.preprocess(response(command, peer2, iter2));
// No results, we've only reconciled tombstones.
try (PartitionIterator data = resolver.resolve()) {
assertFalse(data.hasNext());
// 2nd stream should get repaired
}
assertEquals(1, readRepair.sent.size());
Mutation mutation = readRepair.getForEndpoint(peer2);
assertRepairMetadata(mutation);
assertRepairContainsNoColumns(mutation);
// 2nd stream should get both the partition deletion, as well as the part of the 1st stream RT that it misses
assertRepairContainsDeletions(mutation, new DeletionTime(10, nowInSec), tombstone("0", true, "2", false, 11, nowInSec), tombstone("3", false, "9", true, 11, nowInSec));
}
use of org.apache.cassandra.db.RangeTombstone in project cassandra by apache.
the class CompactionsTest method writeSSTableWithRangeTombstoneMaskingOneColumn.
public static void writeSSTableWithRangeTombstoneMaskingOneColumn(ColumnFamilyStore cfs, TableMetadata table, int[] dks) {
for (int dk : dks) {
RowUpdateBuilder deletedRowUpdateBuilder = new RowUpdateBuilder(table, 1, Util.dk(Integer.toString(dk)));
// Range tombstone covers this (timestamp 2 > 1)
deletedRowUpdateBuilder.clustering("01").add("val", "a");
Clustering<?> startClustering = Clustering.make(ByteBufferUtil.bytes("0"));
Clustering<?> endClustering = Clustering.make(ByteBufferUtil.bytes("b"));
deletedRowUpdateBuilder.addRangeTombstone(new RangeTombstone(Slice.make(startClustering, endClustering), new DeletionTime(2, (int) (System.currentTimeMillis() / 1000))));
deletedRowUpdateBuilder.build().applyUnsafe();
RowUpdateBuilder notYetDeletedRowUpdateBuilder = new RowUpdateBuilder(table, 3, Util.dk(Integer.toString(dk)));
// Range tombstone doesn't cover this (timestamp 3 > 2)
notYetDeletedRowUpdateBuilder.clustering("02").add("val", "a");
notYetDeletedRowUpdateBuilder.build().applyUnsafe();
}
cfs.forceBlockingFlush();
}
use of org.apache.cassandra.db.RangeTombstone in project cassandra by apache.
the class RowIteratorMergeListener method closeOpenMarker.
private void closeOpenMarker(int i, ClusteringBound<?> close) {
ClusteringBound<?> open = markerToRepair[i];
RangeTombstone rt = new RangeTombstone(Slice.make(isReversed ? close : open, isReversed ? open : close), currentDeletion());
applyToPartition(i, p -> p.add(rt));
markerToRepair[i] = null;
}
use of org.apache.cassandra.db.RangeTombstone in project cassandra by apache.
the class SSTableGenerator method delete.
Mutation delete(long lts, long pd, Query query) {
Object[] partitionKey = schema.inflatePartitionKey(pd);
WhereClause.Builder builder = new WhereClause.Builder();
List<ColumnIdentifier> variableNames = new ArrayList<>();
List<ByteBuffer> values = new ArrayList<>();
for (int i = 0; i < partitionKey.length; i++) {
String name = schema.partitionKeys.get(i).name;
ColumnMetadata columnDef = metadata.getColumn(ByteBufferUtil.bytes(name));
variableNames.add(columnDef.name);
values.add(ByteBufferUtil.objectToBytes(partitionKey[i]));
builder.add(new SingleColumnRelation(ColumnIdentifier.getInterned(name, true), toOperator(Relation.RelationKind.EQ), new AbstractMarker.Raw(values.size() - 1)));
}
for (Relation relation : query.relations) {
String name = relation.column();
ColumnMetadata columnDef = metadata.getColumn(ByteBufferUtil.bytes(relation.column()));
variableNames.add(columnDef.name);
values.add(ByteBufferUtil.objectToBytes(relation.value()));
builder.add(new SingleColumnRelation(ColumnIdentifier.getInterned(name, false), toOperator(relation.kind), new AbstractMarker.Raw(values.size() - 1)));
}
StatementRestrictions restrictions = new StatementRestrictions(StatementType.DELETE, metadata, builder.build(), new VariableSpecifications(variableNames), false, false, false, false);
QueryOptions options = QueryOptions.forInternalCalls(ConsistencyLevel.QUORUM, values);
SortedSet<ClusteringBound<?>> startBounds = restrictions.getClusteringColumnsBounds(Bound.START, options);
SortedSet<ClusteringBound<?>> endBounds = restrictions.getClusteringColumnsBounds(Bound.END, options);
Slices slices = DeleteStatement.toSlices(metadata, startBounds, endBounds);
assert slices.size() == 1;
int deletionTime = FBUtilities.nowInSeconds();
long rts = clock.rts(lts);
return new RowUpdateBuilder(metadata, deletionTime, rts, metadata.params.defaultTimeToLive, serializePartitionKey(store, partitionKey)).noRowMarker().addRangeTombstone(new RangeTombstone(slices.get(0), new DeletionTime(rts, deletionTime))).build();
}
Aggregations