use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method trackMismatchingRepairedDigestsWithAllConclusive.
@Test
public void trackMismatchingRepairedDigestsWithAllConclusive() {
EndpointsForRange replicas = makeReplicas(2);
ByteBuffer digest1 = ByteBufferUtil.bytes("digest1");
ByteBuffer digest2 = ByteBufferUtil.bytes("digest2");
InetAddressAndPort peer1 = replicas.get(0).endpoint();
InetAddressAndPort peer2 = replicas.get(1).endpoint();
TestRepairedDataVerifier verifier = new TestRepairedDataVerifier();
verifier.expectDigest(peer1, digest1, true);
verifier.expectDigest(peer2, digest2, true);
DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest2, true, command));
resolveAndConsume(resolver);
assertTrue(verifier.verified);
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method testRepairRangeTombstoneWithPartitionDeletion.
/**
* Test for CASSANDRA-13719: tests that having a partition deletion shadow a range tombstone on another source
* doesn't trigger an assertion error.
*/
@Test
public void testRepairRangeTombstoneWithPartitionDeletion() {
EndpointsForRange replicas = makeReplicas(2);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
InetAddressAndPort peer1 = replicas.get(0).endpoint();
InetAddressAndPort peer2 = replicas.get(1).endpoint();
// 1st "stream": just a partition deletion
UnfilteredPartitionIterator iter1 = iter(PartitionUpdate.fullPartitionDelete(cfm, dk, 10, nowInSec));
// 2nd "stream": a range tombstone that is covered by the 1st stream
RangeTombstone rt = tombstone("0", true, "10", true, 5, nowInSec);
UnfilteredPartitionIterator iter2 = iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).addRangeTombstone(rt).buildUpdate());
resolver.preprocess(response(command, peer1, iter1));
resolver.preprocess(response(command, peer2, iter2));
// No results, we've only reconciled tombstones.
try (PartitionIterator data = resolver.resolve()) {
assertFalse(data.hasNext());
// 2nd stream should get repaired
}
assertEquals(1, readRepair.sent.size());
Mutation mutation = readRepair.getForEndpoint(peer2);
assertRepairMetadata(mutation);
assertRepairContainsNoColumns(mutation);
assertRepairContainsDeletions(mutation, new DeletionTime(10, nowInSec));
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method testResolveDeleted.
@Test
public void testResolveDeleted() {
EndpointsForRange replicas = makeReplicas(2);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
// one response with columns timestamped before a delete in another response
InetAddressAndPort peer1 = replicas.get(0).endpoint();
resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1").add("one", "A").buildUpdate())));
InetAddressAndPort peer2 = replicas.get(1).endpoint();
resolver.preprocess(response(command, peer2, fullPartitionDelete(cfm, dk, 1, nowInSec)));
try (PartitionIterator data = resolver.resolve()) {
assertFalse(data.hasNext());
}
// peer1 should get the deletion from peer2
assertEquals(1, readRepair.sent.size());
Mutation mutation = readRepair.getForEndpoint(peer1);
assertRepairMetadata(mutation);
assertRepairContainsDeletions(mutation, new DeletionTime(1, nowInSec));
assertRepairContainsNoColumns(mutation);
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method noVerificationForSingletonResponse.
@Test
public void noVerificationForSingletonResponse() {
// for CL <= 1 a coordinator shouldn't request repaired data tracking but we
// can easily assert that the verification isn't attempted even if it did
EndpointsForRange replicas = makeReplicas(2);
ByteBuffer digest1 = ByteBufferUtil.bytes("digest1");
InetAddressAndPort peer1 = replicas.get(0).endpoint();
TestRepairedDataVerifier verifier = new TestRepairedDataVerifier();
verifier.expectDigest(peer1, digest1, true);
DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
resolveAndConsume(resolver);
assertFalse(verifier.verified);
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method testRepairRangeTombstoneBoundary.
/**
* Test for CASSANDRA-13237, checking we don't fail (and handle correctly) the case where a RT boundary has the
* same deletion on both side (while is useless but could be created by legacy code pre-CASSANDRA-13237 and could
* thus still be sent).
*/
private void testRepairRangeTombstoneBoundary(int timestamp1, int timestamp2, int timestamp3) throws UnknownHostException {
EndpointsForRange replicas = makeReplicas(2);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
InetAddressAndPort peer1 = replicas.get(0).endpoint();
InetAddressAndPort peer2 = replicas.get(1).endpoint();
// 1st "stream"
RangeTombstone one_nine = tombstone("0", true, "9", true, timestamp1, nowInSec);
UnfilteredPartitionIterator iter1 = iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).addRangeTombstone(one_nine).buildUpdate());
// 2nd "stream" (build more manually to ensure we have the boundary we want)
RangeTombstoneBoundMarker open_one = marker("0", true, true, timestamp2, nowInSec);
RangeTombstoneBoundaryMarker boundary_five = boundary("5", false, timestamp2, nowInSec, timestamp3, nowInSec);
RangeTombstoneBoundMarker close_nine = marker("9", false, true, timestamp3, nowInSec);
UnfilteredPartitionIterator iter2 = iter(dk, open_one, boundary_five, close_nine);
resolver.preprocess(response(command, peer1, iter1));
resolver.preprocess(response(command, peer2, iter2));
boolean shouldHaveRepair = timestamp1 != timestamp2 || timestamp1 != timestamp3;
// No results, we've only reconciled tombstones.
try (PartitionIterator data = resolver.resolve()) {
assertFalse(data.hasNext());
}
assertEquals(shouldHaveRepair ? 1 : 0, readRepair.sent.size());
if (!shouldHaveRepair)
return;
Mutation mutation = readRepair.getForEndpoint(peer2);
assertRepairMetadata(mutation);
assertRepairContainsNoColumns(mutation);
RangeTombstone expected = timestamp1 != timestamp2 ? // We've repaired the 1st part
tombstone("0", true, "5", false, timestamp1, nowInSec) : // We've repaired the 2nd part
tombstone("5", true, "9", true, timestamp1, nowInSec);
assertRepairContainsDeletions(mutation, null, expected);
}
Aggregations