use of org.apache.cassandra.locator.Replica in project cassandra by apache.
the class AbstractReadExecutor method makeRequests.
private void makeRequests(ReadCommand readCommand, Iterable<Replica> replicas) {
boolean hasLocalEndpoint = false;
Message<ReadCommand> message = null;
for (Replica replica : replicas) {
assert replica.isFull() || readCommand.acceptsTransient();
InetAddressAndPort endpoint = replica.endpoint();
if (replica.isSelf()) {
hasLocalEndpoint = true;
continue;
}
if (traceState != null)
traceState.trace("reading {} from {}", readCommand.isDigestQuery() ? "digest" : "data", endpoint);
if (null == message)
message = readCommand.createMessage(false);
MessagingService.instance().sendWithCallback(message, endpoint, handler);
}
// We delay the local (potentially blocking) read till the end to avoid stalling remote requests.
if (hasLocalEndpoint) {
logger.trace("reading {} locally", readCommand.isDigestQuery() ? "digest" : "data");
Stage.READ.maybeExecuteImmediately(new LocalReadRunnable(readCommand, handler));
}
}
use of org.apache.cassandra.locator.Replica in project cassandra by apache.
the class DigestResolver method getData.
public PartitionIterator getData() {
Collection<Message<ReadResponse>> responses = this.responses.snapshot();
if (!hasTransientResponse(responses)) {
return UnfilteredPartitionIterators.filter(dataResponse.payload.makeIterator(command), command.nowInSec());
} else {
// This path can be triggered only if we've got responses from full replicas and they match, but
// transient replica response still contains data, which needs to be reconciled.
DataResolver<E, P> dataResolver = new DataResolver<>(command, replicaPlan, NoopReadRepair.instance, queryStartNanoTime);
dataResolver.preprocess(dataResponse);
// Reconcile with transient replicas
for (Message<ReadResponse> response : responses) {
Replica replica = replicaPlan().lookup(response.from());
if (replica.isTransient())
dataResolver.preprocess(response);
}
return dataResolver.resolve();
}
}
use of org.apache.cassandra.locator.Replica in project cassandra by apache.
the class DigestResolver method preprocess.
@Override
public void preprocess(Message<ReadResponse> message) {
super.preprocess(message);
Replica replica = replicaPlan().lookup(message.from());
if (dataResponse == null && !message.payload.isDigestResponse() && replica.isFull())
dataResponse = message;
}
use of org.apache.cassandra.locator.Replica in project cassandra by apache.
the class CleanupTransientTest method testCleanup.
@Test
public void testCleanup() throws Exception {
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
// insert data and verify we get it back w/ range query
fillCF(cfs, "val", LOOPS);
// record max timestamps of the sstables pre-cleanup
List<Long> expectedMaxTimestamps = getMaxTimestampList(cfs);
assertEquals(LOOPS, Util.getAll(Util.cmd(cfs).build()).size());
// with two tokens RF=2/1 and the sstable not repaired this should do nothing
CompactionManager.instance.performCleanup(cfs, 2);
// ensure max timestamp of the sstables are retained post-cleanup
assert expectedMaxTimestamps.equals(getMaxTimestampList(cfs));
// check data is still there
assertEquals(LOOPS, Util.getAll(Util.cmd(cfs).build()).size());
// Get an exact count of how many partitions are in the fully replicated range and should
// be retained
int fullCount = 0;
RangesAtEndpoint localRanges = StorageService.instance.getLocalReplicas(keyspace.getName()).filter(Replica::isFull);
for (FilteredPartition partition : Util.getAll(Util.cmd(cfs).build())) {
Token token = partition.partitionKey().getToken();
for (Replica r : localRanges) {
if (r.range().contains(token))
fullCount++;
}
}
SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
sstable.descriptor.getMetadataSerializer().mutateRepairMetadata(sstable.descriptor, 1, null, false);
sstable.reloadSSTableMetadata();
// This should remove approximately 50% of the data, specifically whatever was transiently replicated
CompactionManager.instance.performCleanup(cfs, 2);
// ensure max timestamp of the sstables are retained post-cleanup
assert expectedMaxTimestamps.equals(getMaxTimestampList(cfs));
// check less data is there, all transient data should be gone since the table was repaired
assertEquals(fullCount, Util.getAll(Util.cmd(cfs).build()).size());
}
use of org.apache.cassandra.locator.Replica in project cassandra by apache.
the class CreateTest method testHyphenDatacenters.
@Test
public // tests CASSANDRA-4278
void testHyphenDatacenters() throws Throwable {
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
// Register an EndpointSnitch which returns fixed values for test.
DatabaseDescriptor.setEndpointSnitch(new AbstractEndpointSnitch() {
@Override
public String getRack(InetAddressAndPort endpoint) {
return RACK1;
}
@Override
public String getDatacenter(InetAddressAndPort endpoint) {
return "us-east-1";
}
@Override
public int compareEndpoints(InetAddressAndPort target, Replica a1, Replica a2) {
return 0;
}
});
// this forces the dc above to be added to the list of known datacenters (fixes static init problem
// with this group of tests), ok to remove at some point if doing so doesn't break the test
StorageService.instance.getTokenMetadata().updateHostId(UUID.randomUUID(), InetAddressAndPort.getByName("127.0.0.255"));
execute("CREATE KEYSPACE Foo WITH replication = { 'class' : 'NetworkTopologyStrategy', 'us-east-1' : 1 };");
// Restore the previous EndpointSnitch
DatabaseDescriptor.setEndpointSnitch(snitch);
// clean up
execute("DROP KEYSPACE IF EXISTS Foo");
}
Aggregations