use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method trackMatchingEmptyDigestsWithNoneConclusive.
@Test
public void trackMatchingEmptyDigestsWithNoneConclusive() {
ByteBuffer digest1 = ByteBufferUtil.EMPTY_BYTE_BUFFER;
EndpointsForRange replicas = makeReplicas(2);
InetAddressAndPort peer1 = replicas.get(0).endpoint();
InetAddressAndPort peer2 = replicas.get(1).endpoint();
TestRepairedDataVerifier verifier = new TestRepairedDataVerifier();
verifier.expectDigest(peer1, digest1, false);
verifier.expectDigest(peer2, digest1, false);
DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, false, command));
resolveAndConsume(resolver);
assertTrue(verifier.verified);
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method responsesFromTransientReplicasAreNotTracked.
@Test
public void responsesFromTransientReplicasAreNotTracked() {
EndpointsForRange replicas = makeReplicas(2);
EndpointsForRange.Builder mutable = replicas.newBuilder(2);
mutable.add(replicas.get(0));
mutable.add(Replica.transientReplica(replicas.get(1).endpoint(), replicas.range()));
replicas = mutable.build();
TestRepairedDataVerifier verifier = new TestRepairedDataVerifier();
ByteBuffer digest1 = ByteBufferUtil.bytes("digest1");
ByteBuffer digest2 = ByteBufferUtil.bytes("digest2");
InetAddressAndPort peer1 = replicas.get(0).endpoint();
InetAddressAndPort peer2 = replicas.get(1).endpoint();
verifier.expectDigest(peer1, digest1, true);
DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest2, true, command));
resolveAndConsume(resolver);
assertTrue(verifier.verified);
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class BootstrapTransientTest method endpoints.
public EndpointsForRange endpoints(Replica... replicas) {
assert replicas.length > 0;
Range<Token> range = replicas[0].range();
EndpointsForRange.Builder builder = EndpointsForRange.builder(range);
for (Replica r : replicas) {
assert r.range().equals(range);
builder.add(r);
}
return builder.build();
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method testRepairRangeTombstoneWithPartitionDeletion2.
/**
* Additional test for CASSANDRA-13719: tests the case where a partition deletion doesn't shadow a range tombstone.
*/
@Test
public void testRepairRangeTombstoneWithPartitionDeletion2() {
EndpointsForRange replicas = makeReplicas(2);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
InetAddressAndPort peer1 = replicas.get(0).endpoint();
InetAddressAndPort peer2 = replicas.get(1).endpoint();
// 1st "stream": a partition deletion and a range tombstone
RangeTombstone rt1 = tombstone("0", true, "9", true, 11, nowInSec);
PartitionUpdate upd1 = new RowUpdateBuilder(cfm, nowInSec, 1L, dk).addRangeTombstone(rt1).buildUpdate();
((MutableDeletionInfo) upd1.deletionInfo()).add(new DeletionTime(10, nowInSec));
UnfilteredPartitionIterator iter1 = iter(upd1);
// 2nd "stream": a range tombstone that is covered by the other stream rt
RangeTombstone rt2 = tombstone("2", true, "3", true, 11, nowInSec);
RangeTombstone rt3 = tombstone("4", true, "5", true, 10, nowInSec);
UnfilteredPartitionIterator iter2 = iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).addRangeTombstone(rt2).addRangeTombstone(rt3).buildUpdate());
resolver.preprocess(response(command, peer1, iter1));
resolver.preprocess(response(command, peer2, iter2));
// No results, we've only reconciled tombstones.
try (PartitionIterator data = resolver.resolve()) {
assertFalse(data.hasNext());
// 2nd stream should get repaired
}
assertEquals(1, readRepair.sent.size());
Mutation mutation = readRepair.getForEndpoint(peer2);
assertRepairMetadata(mutation);
assertRepairContainsNoColumns(mutation);
// 2nd stream should get both the partition deletion, as well as the part of the 1st stream RT that it misses
assertRepairContainsDeletions(mutation, new DeletionTime(10, nowInSec), tombstone("0", true, "2", false, 11, nowInSec), tombstone("3", false, "9", true, 11, nowInSec));
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class BlockingReadRepairTest method remoteDCTest.
/**
* For dc local consistency levels, noop mutations and responses from remote dcs should not affect effective blockFor
*/
@Test
public void remoteDCTest() throws Exception {
Map<Replica, Mutation> repairs = new HashMap<>();
repairs.put(replica1, mutation(cell1));
Replica remote1 = ReplicaUtils.full(InetAddressAndPort.getByName("10.0.0.1"));
Replica remote2 = ReplicaUtils.full(InetAddressAndPort.getByName("10.0.0.2"));
repairs.put(remote1, mutation(cell1));
EndpointsForRange participants = EndpointsForRange.of(replica1, replica2, remote1, remote2);
ReplicaPlan.ForTokenWrite writePlan = repairPlan(replicaPlan(ks, ConsistencyLevel.LOCAL_QUORUM, participants));
InstrumentedReadRepairHandler handler = createRepairHandler(repairs, writePlan);
handler.sendInitialRepairs();
Assert.assertEquals(2, handler.mutationsSent.size());
Assert.assertTrue(handler.mutationsSent.containsKey(replica1.endpoint()));
Assert.assertTrue(handler.mutationsSent.containsKey(remote1.endpoint()));
Assert.assertEquals(1, handler.waitingOn());
Assert.assertFalse(getCurrentRepairStatus(handler));
handler.ack(remote1.endpoint());
Assert.assertEquals(1, handler.waitingOn());
Assert.assertFalse(getCurrentRepairStatus(handler));
handler.ack(replica1.endpoint());
Assert.assertEquals(0, handler.waitingOn());
Assert.assertTrue(getCurrentRepairStatus(handler));
}
Aggregations