use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method testResolveDisjointSingleRow.
@Test
public void testResolveDisjointSingleRow() {
EndpointsForRange replicas = makeReplicas(2);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
InetAddressAndPort peer1 = replicas.get(0).endpoint();
resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1").add("c1", "v1").buildUpdate())));
InetAddressAndPort peer2 = replicas.get(1).endpoint();
resolver.preprocess(response(command, peer2, iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).clustering("1").add("c2", "v2").buildUpdate())));
try (PartitionIterator data = resolver.resolve()) {
try (RowIterator rows = Iterators.getOnlyElement(data)) {
Row row = Iterators.getOnlyElement(rows);
assertColumns(row, "c1", "c2");
assertColumn(cfm, row, "c1", "v1", 0);
assertColumn(cfm, row, "c2", "v2", 1);
}
}
assertEquals(2, readRepair.sent.size());
// each peer needs to repair with each other's column
Mutation mutation = readRepair.getForEndpoint(peer1);
assertRepairMetadata(mutation);
assertRepairContainsColumn(mutation, "1", "c2", "v2", 1);
mutation = readRepair.getForEndpoint(peer2);
assertRepairMetadata(mutation);
assertRepairContainsColumn(mutation, "1", "c1", "v1", 0);
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method trackMatchingEmptyDigestsWithAllConclusive.
/**
* Tests for repaired data tracking
*/
@Test
public void trackMatchingEmptyDigestsWithAllConclusive() {
EndpointsForRange replicas = makeReplicas(2);
ByteBuffer digest1 = ByteBufferUtil.EMPTY_BYTE_BUFFER;
InetAddressAndPort peer1 = replicas.get(0).endpoint();
InetAddressAndPort peer2 = replicas.get(1).endpoint();
TestRepairedDataVerifier verifier = new TestRepairedDataVerifier();
verifier.expectDigest(peer1, digest1, true);
verifier.expectDigest(peer2, digest1, true);
DataResolver resolver = resolverWithVerifier(command, plan(replicas, ALL), readRepair, nanoTime(), verifier);
resolver.preprocess(response(peer1, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
resolver.preprocess(response(peer2, iter(PartitionUpdate.emptyUpdate(cfm, dk)), digest1, true, command));
resolveAndConsume(resolver);
assertTrue(verifier.verified);
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method makeReplicas.
private EndpointsForRange makeReplicas(int num) {
StorageService.instance.getTokenMetadata().clearUnsafe();
switch(num) {
case 2:
ks = AbstractReadResponseTest.ks;
cfs = AbstractReadResponseTest.cfs;
break;
case 4:
ks = AbstractReadResponseTest.ks3;
cfs = AbstractReadResponseTest.cfs3;
break;
default:
throw new IllegalStateException("This test needs refactoring to cleanly support different replication factors");
}
command = Util.cmd(cfs, dk).withNowInSeconds(nowInSec).build();
readRepair = new TestableReadRepair(command);
Token token = Murmur3Partitioner.instance.getMinimumToken();
EndpointsForRange.Builder replicas = EndpointsForRange.builder(ReplicaUtils.FULL_RANGE, num);
for (int i = 0; i < num; i++) {
try {
InetAddressAndPort endpoint = InetAddressAndPort.getByAddress(new byte[] { 127, 0, 0, (byte) (i + 1) });
replicas.add(ReplicaUtils.full(endpoint));
StorageService.instance.getTokenMetadata().updateNormalToken(token = token.increaseSlightly(), endpoint);
Gossiper.instance.initializeNodeUnsafe(endpoint, UUID.randomUUID(), 1);
} catch (UnknownHostException e) {
throw new AssertionError(e);
}
}
return replicas.build();
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method testResolveNewerSingleRow.
@Test
public void testResolveNewerSingleRow() {
EndpointsForRange replicas = makeReplicas(2);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
InetAddressAndPort peer1 = replicas.get(0).endpoint();
resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1").add("c1", "v1").buildUpdate()), false));
InetAddressAndPort peer2 = replicas.get(1).endpoint();
resolver.preprocess(response(command, peer2, iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).clustering("1").add("c1", "v2").buildUpdate()), false));
try (PartitionIterator data = resolver.resolve()) {
try (RowIterator rows = Iterators.getOnlyElement(data)) {
Row row = Iterators.getOnlyElement(rows);
assertColumns(row, "c1");
assertColumn(cfm, row, "c1", "v2", 1);
}
}
assertEquals(1, readRepair.sent.size());
// peer 1 just needs to repair with the row from peer 2
Mutation mutation = readRepair.getForEndpoint(peer1);
assertRepairMetadata(mutation);
assertRepairContainsNoDeletions(mutation);
assertRepairContainsColumn(mutation, "1", "c1", "v2", 1);
}
use of org.apache.cassandra.locator.EndpointsForRange in project cassandra by apache.
the class DataResolverTest method testResolveDisjointMultipleRows.
@Test
public void testResolveDisjointMultipleRows() throws UnknownHostException {
EndpointsForRange replicas = makeReplicas(2);
DataResolver resolver = new DataResolver(command, plan(replicas, ALL), readRepair, nanoTime());
InetAddressAndPort peer1 = replicas.get(0).endpoint();
resolver.preprocess(response(command, peer1, iter(new RowUpdateBuilder(cfm, nowInSec, 0L, dk).clustering("1").add("c1", "v1").buildUpdate())));
InetAddressAndPort peer2 = replicas.get(1).endpoint();
resolver.preprocess(response(command, peer2, iter(new RowUpdateBuilder(cfm, nowInSec, 1L, dk).clustering("2").add("c2", "v2").buildUpdate())));
try (PartitionIterator data = resolver.resolve()) {
try (RowIterator rows = data.next()) {
// We expect the resolved superset to contain both rows
Row row = rows.next();
assertClustering(cfm, row, "1");
assertColumns(row, "c1");
assertColumn(cfm, row, "c1", "v1", 0);
row = rows.next();
assertClustering(cfm, row, "2");
assertColumns(row, "c2");
assertColumn(cfm, row, "c2", "v2", 1);
assertFalse(rows.hasNext());
assertFalse(data.hasNext());
}
}
assertEquals(2, readRepair.sent.size());
// each peer needs to repair the row from the other
Mutation mutation = readRepair.getForEndpoint(peer1);
assertRepairMetadata(mutation);
assertRepairContainsNoDeletions(mutation);
assertRepairContainsColumn(mutation, "2", "c2", "v2", 1);
mutation = readRepair.getForEndpoint(peer2);
assertRepairMetadata(mutation);
assertRepairContainsNoDeletions(mutation);
assertRepairContainsColumn(mutation, "1", "c1", "v1", 0);
}
Aggregations