use of org.apache.cassandra.locator.EndpointsForToken in project cassandra by apache.
the class DigestResolverTest method transientResponseData.
@Test
public void transientResponseData() {
SinglePartitionReadCommand command = SinglePartitionReadCommand.fullPartitionRead(cfm, nowInSec, dk);
EndpointsForToken targetReplicas = EndpointsForToken.of(dk.getToken(), full(EP1), full(EP2), trans(EP3));
DigestResolver<?, ?> resolver = new DigestResolver<>(command, plan(ConsistencyLevel.QUORUM, targetReplicas), 0);
PartitionUpdate fullResponse = update(row(1000, 1, 1)).build();
PartitionUpdate digestResponse = update(row(1000, 1, 1)).build();
PartitionUpdate transientResponse = update(row(1000, 2, 2)).build();
Assert.assertFalse(resolver.isDataPresent());
Assert.assertFalse(resolver.hasTransientResponse());
resolver.preprocess(response(command, EP1, iter(fullResponse), false));
Assert.assertTrue(resolver.isDataPresent());
resolver.preprocess(response(command, EP2, iter(digestResponse), true));
resolver.preprocess(response(command, EP3, iter(transientResponse), false));
Assert.assertTrue(resolver.hasTransientResponse());
assertPartitionsEqual(filter(iter(dk, row(1000, 1, 1), row(1000, 2, 2))), resolver.getData());
}
use of org.apache.cassandra.locator.EndpointsForToken in project cassandra by apache.
the class DigestResolverTest method multiThreadedNoRepairNeededReadCallback.
/**
* This test makes a time-boxed effort to reproduce the issue found in CASSANDRA-16807.
*/
@Test
public void multiThreadedNoRepairNeededReadCallback() {
SinglePartitionReadCommand command = SinglePartitionReadCommand.fullPartitionRead(cfm, nowInSec, dk);
EndpointsForToken targetReplicas = EndpointsForToken.of(dk.getToken(), full(EP1), full(EP2));
PartitionUpdate response = update(row(1000, 4, 4), row(1000, 5, 5)).build();
ReplicaPlan.SharedForTokenRead plan = plan(ConsistencyLevel.ONE, targetReplicas);
ExecutorService pool = Executors.newFixedThreadPool(2);
long endTime = System.nanoTime() + TimeUnit.MINUTES.toNanos(2);
try {
while (System.nanoTime() < endTime) {
final long startNanos = System.nanoTime();
final DigestResolver<EndpointsForToken, ReplicaPlan.ForTokenRead> resolver = new DigestResolver<>(command, plan, startNanos);
final ReadCallback<EndpointsForToken, ReplicaPlan.ForTokenRead> callback = new ReadCallback<>(resolver, command, plan, startNanos);
final CountDownLatch startlatch = new CountDownLatch(2);
pool.execute(() -> {
startlatch.countDown();
waitForLatch(startlatch);
callback.onResponse(response(command, EP1, iter(response), true));
});
pool.execute(() -> {
startlatch.countDown();
waitForLatch(startlatch);
callback.onResponse(response(command, EP2, iter(response), true));
});
callback.awaitResults();
Assert.assertTrue(resolver.isDataPresent());
Assert.assertTrue(resolver.responsesMatch());
}
} finally {
pool.shutdown();
}
}
use of org.apache.cassandra.locator.EndpointsForToken in project cassandra by apache.
the class DigestResolverTest method transientResponse.
/**
* Transient responses shouldn't be classified as the single dataResponse
*/
@Test
public void transientResponse() {
SinglePartitionReadCommand command = SinglePartitionReadCommand.fullPartitionRead(cfm, nowInSec, dk);
EndpointsForToken targetReplicas = EndpointsForToken.of(dk.getToken(), full(EP1), trans(EP2));
DigestResolver<?, ?> resolver = new DigestResolver<>(command, plan(ConsistencyLevel.QUORUM, targetReplicas), 0);
PartitionUpdate response2 = update(row(1000, 5, 5)).build();
Assert.assertFalse(resolver.isDataPresent());
Assert.assertFalse(resolver.hasTransientResponse());
resolver.preprocess(response(command, EP2, iter(response2), false));
Assert.assertFalse(resolver.isDataPresent());
Assert.assertTrue(resolver.hasTransientResponse());
}
use of org.apache.cassandra.locator.EndpointsForToken in project cassandra by apache.
the class AbstractReadRepairTest method repairPlan.
static ReplicaPlan.ForTokenWrite repairPlan(ReplicaPlan.ForRangeRead readPlan, EndpointsForRange liveAndDown) {
Token token = readPlan.range().left.getToken();
EndpointsForToken pending = EndpointsForToken.empty(token);
return ReplicaPlans.forWrite(readPlan.keyspace(), ConsistencyLevel.TWO, liveAndDown.forToken(token), pending, replica -> true, ReplicaPlans.writeReadRepair(readPlan));
}
use of org.apache.cassandra.locator.EndpointsForToken in project cassandra by apache.
the class StorageProxy method findSuitableReplica.
/**
* Find a suitable replica as leader for counter update.
* For now, we pick a random replica in the local DC (or ask the snitch if
* there is no replica alive in the local DC).
* TODO: if we track the latency of the counter writes (which makes sense
* contrarily to standard writes since there is a read involved), we could
* trust the dynamic snitch entirely, which may be a better solution. It
* is unclear we want to mix those latencies with read latencies, so this
* may be a bit involved.
*/
private static Replica findSuitableReplica(String keyspaceName, DecoratedKey key, String localDataCenter, ConsistencyLevel cl) throws UnavailableException {
Keyspace keyspace = Keyspace.open(keyspaceName);
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
AbstractReplicationStrategy replicationStrategy = keyspace.getReplicationStrategy();
EndpointsForToken replicas = replicationStrategy.getNaturalReplicasForToken(key);
// CASSANDRA-13043: filter out those endpoints not accepting clients yet, maybe because still bootstrapping
replicas = replicas.filter(replica -> StorageService.instance.isRpcReady(replica.endpoint()));
// TODO have a way to compute the consistency level
if (replicas.isEmpty())
throw UnavailableException.create(cl, cl.blockFor(replicationStrategy), 0);
List<Replica> localReplicas = new ArrayList<>(replicas.size());
for (Replica replica : replicas) if (snitch.getDatacenter(replica).equals(localDataCenter))
localReplicas.add(replica);
if (localReplicas.isEmpty()) {
// If the consistency required is local then we should not involve other DCs
if (cl.isDatacenterLocal())
throw UnavailableException.create(cl, cl.blockFor(replicationStrategy), 0);
// No endpoint in local DC, pick the closest endpoint according to the snitch
replicas = snitch.sortedByProximity(FBUtilities.getBroadcastAddressAndPort(), replicas);
return replicas.get(0);
}
return localReplicas.get(ThreadLocalRandom.current().nextInt(localReplicas.size()));
}
Aggregations