use of org.apache.cassandra.locator.InetAddressAndPort in project cassandra by apache.
the class StorageProxy method appliesLocally.
public boolean appliesLocally(Mutation mutation) {
String keyspaceName = mutation.getKeyspaceName();
Token token = mutation.key().getToken();
InetAddressAndPort local = FBUtilities.getBroadcastAddressAndPort();
return ReplicaLayout.forTokenWriteLiveAndDown(Keyspace.open(keyspaceName), token).all().endpoints().contains(local);
}
use of org.apache.cassandra.locator.InetAddressAndPort in project cassandra by apache.
the class RangeRelocator method calculateToFromStreams.
public void calculateToFromStreams() {
logger.debug("Current tmd: {}, Updated tmd: {}", tokenMetaClone, tokenMetaCloneAllSettled);
for (String keyspace : keyspaceNames) {
// replication strategy of the current keyspace
AbstractReplicationStrategy strategy = Keyspace.open(keyspace).getReplicationStrategy();
logger.info("Calculating ranges to stream and request for keyspace {}", keyspace);
// From what I have seen we only ever call this with a single token from StorageService.move(Token)
for (Token newToken : tokens) {
Collection<Token> currentTokens = tokenMetaClone.getTokens(localAddress);
if (currentTokens.size() > 1 || currentTokens.isEmpty()) {
throw new AssertionError("Unexpected current tokens: " + currentTokens);
}
// calculated parts of the ranges to request/stream from/to nodes in the ring
Pair<RangesAtEndpoint, RangesAtEndpoint> streamAndFetchOwnRanges;
// so it's easier to just identify this case up front.
if (tokenMetaClone.getTopology().getDatacenterEndpoints().get(DatabaseDescriptor.getEndpointSnitch().getLocalDatacenter()).size() > 1) {
// getting collection of the currently used ranges by this keyspace
RangesAtEndpoint currentReplicas = strategy.getAddressReplicas(localAddress);
// collection of ranges which this node will serve after move to the new token
RangesAtEndpoint updatedReplicas = strategy.getPendingAddressRanges(tokenMetaClone, newToken, localAddress);
streamAndFetchOwnRanges = calculateStreamAndFetchRanges(currentReplicas, updatedReplicas);
} else {
streamAndFetchOwnRanges = Pair.create(RangesAtEndpoint.empty(localAddress), RangesAtEndpoint.empty(localAddress));
}
RangesByEndpoint rangesToStream = calculateRangesToStreamWithEndpoints(streamAndFetchOwnRanges.left, strategy, tokenMetaClone, tokenMetaCloneAllSettled);
logger.info("Endpoint ranges to stream to " + rangesToStream);
// stream ranges
for (InetAddressAndPort address : rangesToStream.keySet()) {
logger.debug("Will stream range {} of keyspace {} to endpoint {}", rangesToStream.get(address), keyspace, address);
RangesAtEndpoint ranges = rangesToStream.get(address);
streamPlan.transferRanges(address, keyspace, ranges);
}
Multimap<InetAddressAndPort, RangeStreamer.FetchReplica> rangesToFetch = calculateRangesToFetchWithPreferredEndpoints(streamAndFetchOwnRanges.right, strategy, keyspace, tokenMetaClone, tokenMetaCloneAllSettled);
// stream requests
rangesToFetch.asMap().forEach((address, sourceAndOurReplicas) -> {
RangesAtEndpoint full = sourceAndOurReplicas.stream().filter(pair -> pair.remote.isFull()).map(pair -> pair.local).collect(RangesAtEndpoint.collector(localAddress));
RangesAtEndpoint trans = sourceAndOurReplicas.stream().filter(pair -> pair.remote.isTransient()).map(pair -> pair.local).collect(RangesAtEndpoint.collector(localAddress));
logger.debug("Will request range {} of keyspace {} from endpoint {}", rangesToFetch.get(address), keyspace, address);
streamPlan.requestRanges(address, keyspace, full, trans);
});
logger.debug("Keyspace {}: work map {}.", keyspace, rangesToFetch);
}
}
}
use of org.apache.cassandra.locator.InetAddressAndPort in project cassandra by apache.
the class StorageProxy method shouldHint.
/**
* Determines whether a hint should be stored or not.
* It rejects early if any of the condition is met:
* - Hints disabled entirely or for the belonging datacetner of the replica
* - The replica is transient or is the self node
* - The replica is no longer part of the ring
* - The hint window has expired
* - The hints have reached to the size limit for the node
* Otherwise, it permits.
*
* @param replica, the replica for the hint
* @param tryEnablePersistentWindow, true to consider hint_window_persistent_enabled; otherwise, ignores
* @return true to permit or false to reject hint
*/
public static boolean shouldHint(Replica replica, boolean tryEnablePersistentWindow) {
if (!DatabaseDescriptor.hintedHandoffEnabled() || replica.isTransient() || replica.isSelf())
return false;
Set<String> disabledDCs = DatabaseDescriptor.hintedHandoffDisabledDCs();
if (!disabledDCs.isEmpty()) {
final String dc = DatabaseDescriptor.getEndpointSnitch().getDatacenter(replica);
if (disabledDCs.contains(dc)) {
Tracing.trace("Not hinting {} since its data center {} has been disabled {}", replica, dc, disabledDCs);
return false;
}
}
InetAddressAndPort endpoint = replica.endpoint();
int maxHintWindow = DatabaseDescriptor.getMaxHintWindow();
long endpointDowntime = Gossiper.instance.getEndpointDowntime(endpoint);
boolean hintWindowExpired = endpointDowntime > maxHintWindow;
UUID hostIdForEndpoint = StorageService.instance.getHostIdForEndpoint(endpoint);
if (hostIdForEndpoint == null) {
Tracing.trace("Discarding hint for endpoint not part of ring: {}", endpoint);
return false;
}
// if persisting hints window, hintWindowExpired might be updated according to the timestamp of the earliest hint
if (tryEnablePersistentWindow && !hintWindowExpired && DatabaseDescriptor.hintWindowPersistentEnabled()) {
long earliestHint = HintsService.instance.getEarliestHintForHost(hostIdForEndpoint);
hintWindowExpired = Clock.Global.currentTimeMillis() - maxHintWindow > earliestHint;
if (hintWindowExpired)
Tracing.trace("Not hinting {} for which there is the earliest hint stored at {}", replica, earliestHint);
}
if (hintWindowExpired) {
HintsService.instance.metrics.incrPastWindow(endpoint);
Tracing.trace("Not hinting {} which has been down {} ms", endpoint, endpointDowntime);
return false;
}
long maxHintsSize = DatabaseDescriptor.getMaxHintsSizePerHost();
long actualTotalHintsSize = HintsService.instance.getTotalHintsSize(hostIdForEndpoint);
boolean hasHintsReachedMaxSize = maxHintsSize > 0 && actualTotalHintsSize > maxHintsSize;
if (hasHintsReachedMaxSize) {
Tracing.trace("Not hinting {} which has reached to the max hints size {} bytes on disk. The actual hints size on disk: {}", endpoint, maxHintsSize, actualTotalHintsSize);
return false;
}
return true;
}
use of org.apache.cassandra.locator.InetAddressAndPort in project cassandra by apache.
the class ActiveRepairService method getNeighbors.
/**
* Return all of the neighbors with whom we share the provided range.
*
* @param keyspaceName keyspace to repair
* @param keyspaceLocalRanges local-range for given keyspaceName
* @param toRepair token to repair
* @param dataCenters the data centers to involve in the repair
*
* @return neighbors with whom we share the provided range
*/
public static EndpointsForRange getNeighbors(String keyspaceName, Iterable<Range<Token>> keyspaceLocalRanges, Range<Token> toRepair, Collection<String> dataCenters, Collection<String> hosts) {
StorageService ss = StorageService.instance;
EndpointsByRange replicaSets = ss.getRangeToAddressMap(keyspaceName);
Range<Token> rangeSuperSet = null;
for (Range<Token> range : keyspaceLocalRanges) {
if (range.contains(toRepair)) {
rangeSuperSet = range;
break;
} else if (range.intersects(toRepair)) {
throw new IllegalArgumentException(String.format("Requested range %s intersects a local range (%s) " + "but is not fully contained in one; this would lead to " + "imprecise repair. keyspace: %s", toRepair.toString(), range.toString(), keyspaceName));
}
}
if (rangeSuperSet == null || !replicaSets.containsKey(rangeSuperSet))
return EndpointsForRange.empty(toRepair);
EndpointsForRange neighbors = replicaSets.get(rangeSuperSet).withoutSelf();
if (dataCenters != null && !dataCenters.isEmpty()) {
TokenMetadata.Topology topology = ss.getTokenMetadata().cloneOnlyTokenMap().getTopology();
Multimap<String, InetAddressAndPort> dcEndpointsMap = topology.getDatacenterEndpoints();
Iterable<InetAddressAndPort> dcEndpoints = concat(transform(dataCenters, dcEndpointsMap::get));
return neighbors.select(dcEndpoints, true);
} else if (hosts != null && !hosts.isEmpty()) {
Set<InetAddressAndPort> specifiedHost = new HashSet<>();
for (final String host : hosts) {
try {
final InetAddressAndPort endpoint = InetAddressAndPort.getByName(host.trim());
if (endpoint.equals(FBUtilities.getBroadcastAddressAndPort()) || neighbors.endpoints().contains(endpoint))
specifiedHost.add(endpoint);
} catch (UnknownHostException e) {
throw new IllegalArgumentException("Unknown host specified " + host, e);
}
}
if (!specifiedHost.contains(FBUtilities.getBroadcastAddressAndPort()))
throw new IllegalArgumentException("The current host must be part of the repair");
if (specifiedHost.size() <= 1) {
String msg = "Specified hosts %s do not share range %s needed for repair. Either restrict repair ranges " + "with -st/-et options, or specify one of the neighbors that share this range with " + "this node: %s.";
throw new IllegalArgumentException(String.format(msg, hosts, toRepair, neighbors));
}
specifiedHost.remove(FBUtilities.getBroadcastAddressAndPort());
return neighbors.keep(specifiedHost);
}
return neighbors;
}
use of org.apache.cassandra.locator.InetAddressAndPort in project cassandra by apache.
the class ActiveRepairService method cleanUp.
/**
* Send Verb.CLEANUP_MSG to the given endpoints. This results in removing parent session object from the
* endpoint's cache.
* This method does not throw an exception in case of a messaging failure.
*/
public void cleanUp(UUID parentRepairSession, Set<InetAddressAndPort> endpoints) {
for (InetAddressAndPort endpoint : endpoints) {
try {
if (FailureDetector.instance.isAlive(endpoint)) {
CleanupMessage message = new CleanupMessage(parentRepairSession);
Message<CleanupMessage> msg = Message.out(Verb.CLEANUP_MSG, message);
RequestCallback loggingCallback = new RequestCallback() {
@Override
public void onResponse(Message msg) {
logger.trace("Successfully cleaned up {} parent repair session on {}.", parentRepairSession, endpoint);
}
@Override
public void onFailure(InetAddressAndPort from, RequestFailureReason failureReason) {
logger.debug("Failed to clean up parent repair session {} on {}. The uncleaned sessions will " + "be removed on a node restart. This should not be a problem unless you see thousands " + "of messages like this.", parentRepairSession, endpoint);
}
};
MessagingService.instance().sendWithCallback(msg, endpoint, loggingCallback);
}
} catch (Exception exc) {
logger.warn("Failed to send a clean up message to {}", endpoint, exc);
}
}
}
Aggregations