use of org.apache.cassandra.locator.EndpointsByRange in project cassandra by apache.
the class ActiveRepairService method getNeighbors.
/**
* Return all of the neighbors with whom we share the provided range.
*
* @param keyspaceName keyspace to repair
* @param keyspaceLocalRanges local-range for given keyspaceName
* @param toRepair token to repair
* @param dataCenters the data centers to involve in the repair
*
* @return neighbors with whom we share the provided range
*/
public static EndpointsForRange getNeighbors(String keyspaceName, Iterable<Range<Token>> keyspaceLocalRanges, Range<Token> toRepair, Collection<String> dataCenters, Collection<String> hosts) {
StorageService ss = StorageService.instance;
EndpointsByRange replicaSets = ss.getRangeToAddressMap(keyspaceName);
Range<Token> rangeSuperSet = null;
for (Range<Token> range : keyspaceLocalRanges) {
if (range.contains(toRepair)) {
rangeSuperSet = range;
break;
} else if (range.intersects(toRepair)) {
throw new IllegalArgumentException(String.format("Requested range %s intersects a local range (%s) " + "but is not fully contained in one; this would lead to " + "imprecise repair. keyspace: %s", toRepair.toString(), range.toString(), keyspaceName));
}
}
if (rangeSuperSet == null || !replicaSets.containsKey(rangeSuperSet))
return EndpointsForRange.empty(toRepair);
EndpointsForRange neighbors = replicaSets.get(rangeSuperSet).withoutSelf();
if (dataCenters != null && !dataCenters.isEmpty()) {
TokenMetadata.Topology topology = ss.getTokenMetadata().cloneOnlyTokenMap().getTopology();
Multimap<String, InetAddressAndPort> dcEndpointsMap = topology.getDatacenterEndpoints();
Iterable<InetAddressAndPort> dcEndpoints = concat(transform(dataCenters, dcEndpointsMap::get));
return neighbors.select(dcEndpoints, true);
} else if (hosts != null && !hosts.isEmpty()) {
Set<InetAddressAndPort> specifiedHost = new HashSet<>();
for (final String host : hosts) {
try {
final InetAddressAndPort endpoint = InetAddressAndPort.getByName(host.trim());
if (endpoint.equals(FBUtilities.getBroadcastAddressAndPort()) || neighbors.endpoints().contains(endpoint))
specifiedHost.add(endpoint);
} catch (UnknownHostException e) {
throw new IllegalArgumentException("Unknown host specified " + host, e);
}
}
if (!specifiedHost.contains(FBUtilities.getBroadcastAddressAndPort()))
throw new IllegalArgumentException("The current host must be part of the repair");
if (specifiedHost.size() <= 1) {
String msg = "Specified hosts %s do not share range %s needed for repair. Either restrict repair ranges " + "with -st/-et options, or specify one of the neighbors that share this range with " + "this node: %s.";
throw new IllegalArgumentException(String.format(msg, hosts, toRepair, neighbors));
}
specifiedHost.remove(FBUtilities.getBroadcastAddressAndPort());
return neighbors.keep(specifiedHost);
}
return neighbors;
}
use of org.apache.cassandra.locator.EndpointsByRange in project cassandra by apache.
the class RangeStreamer method getOptimizedWorkMap.
/**
* Optimized version that also outputs the final work map
*/
private static Multimap<InetAddressAndPort, FetchReplica> getOptimizedWorkMap(EndpointsByReplica rangesWithSources, Collection<SourceFilter> sourceFilters, String keyspace) {
// For now we just aren't going to use the optimized range fetch map with transient replication to shrink
// the surface area to test and introduce bugs.
// In the future it's possible we could run it twice once for full ranges with only full replicas
// and once with transient ranges and all replicas. Then merge the result.
EndpointsByRange.Builder unwrapped = new EndpointsByRange.Builder();
for (Map.Entry<Replica, Replica> entry : rangesWithSources.flattenEntries()) {
Replicas.temporaryAssertFull(entry.getValue());
unwrapped.put(entry.getKey().range(), entry.getValue());
}
EndpointsByRange unwrappedView = unwrapped.build();
RangeFetchMapCalculator calculator = new RangeFetchMapCalculator(unwrappedView, sourceFilters, keyspace);
Multimap<InetAddressAndPort, Range<Token>> rangeFetchMapMap = calculator.getRangeFetchMap();
logger.info("Output from RangeFetchMapCalculator for keyspace {}", keyspace);
validateRangeFetchMap(unwrappedView, rangeFetchMapMap, keyspace);
// Need to rewrap as Replicas
Multimap<InetAddressAndPort, FetchReplica> wrapped = HashMultimap.create();
for (Map.Entry<InetAddressAndPort, Range<Token>> entry : rangeFetchMapMap.entries()) {
Replica toFetch = null;
for (Replica r : rangesWithSources.keySet()) {
if (r.range().equals(entry.getValue())) {
if (toFetch != null)
throw new AssertionError(String.format("There shouldn't be multiple replicas for range %s, replica %s and %s here", r.range(), r, toFetch));
toFetch = r;
}
}
if (toFetch == null)
throw new AssertionError("Shouldn't be possible for the Replica we fetch to be null here");
// Committing the cardinal sin of synthesizing a Replica, but it's ok because we assert earlier all of them
// are full and optimized range fetch map doesn't support transient replication yet.
wrapped.put(entry.getKey(), new FetchReplica(toFetch, fullReplica(entry.getKey(), entry.getValue())));
}
return wrapped;
}
use of org.apache.cassandra.locator.EndpointsByRange in project cassandra by apache.
the class RangeStreamer method calculateRangesToFetchWithPreferredEndpoints.
/**
* Get a map of all ranges and the source that will be cleaned up once this bootstrapped node is added for the given ranges.
* For each range, the list should only contain a single source. This allows us to consistently migrate data without violating
* consistency.
*/
public static EndpointsByReplica calculateRangesToFetchWithPreferredEndpoints(BiFunction<InetAddressAndPort, EndpointsForRange, EndpointsForRange> snitchGetSortedListByProximity, AbstractReplicationStrategy strat, ReplicaCollection<?> fetchRanges, boolean useStrictConsistency, TokenMetadata tmdBefore, TokenMetadata tmdAfter, String keyspace, Collection<SourceFilter> sourceFilters) {
EndpointsByRange rangeAddresses = strat.getRangeAddresses(tmdBefore);
InetAddressAndPort localAddress = FBUtilities.getBroadcastAddressAndPort();
logger.debug("Keyspace: {}", keyspace);
logger.debug("To fetch RN: {}", fetchRanges);
logger.debug("Fetch ranges: {}", rangeAddresses);
Predicate<Replica> testSourceFilters = and(sourceFilters);
Function<EndpointsForRange, EndpointsForRange> sorted = endpoints -> snitchGetSortedListByProximity.apply(localAddress, endpoints);
// This list of replicas is just candidates. With strict consistency it's going to be a narrow list.
EndpointsByReplica.Builder rangesToFetchWithPreferredEndpoints = new EndpointsByReplica.Builder();
for (Replica toFetch : fetchRanges) {
// Replica that is sufficient to provide the data we need
// With strict consistency and transient replication we may end up with multiple types
// so this isn't used with strict consistency
Predicate<Replica> isSufficient = r -> toFetch.isTransient() || r.isFull();
logger.debug("To fetch {}", toFetch);
for (Range<Token> range : rangeAddresses.keySet()) {
if (!range.contains(toFetch.range()))
continue;
final EndpointsForRange oldEndpoints = sorted.apply(rangeAddresses.get(range));
// Ultimately we populate this with whatever is going to be fetched from to satisfy toFetch
// It could be multiple endpoints and we must fetch from all of them if they are there
// With transient replication and strict consistency this is to get the full data from a full replica and
// transient data from the transient replica losing data
EndpointsForRange sources;
// Due to CASSANDRA-5953 we can have a higher RF than we have endpoints.
// So we need to be careful to only be strict when endpoints == RF
boolean isStrictConsistencyApplicable = useStrictConsistency && (oldEndpoints.size() == strat.getReplicationFactor().allReplicas);
if (isStrictConsistencyApplicable) {
EndpointsForRange strictEndpoints;
// Start with two sets of who replicates the range before and who replicates it after
EndpointsForRange newEndpoints = strat.calculateNaturalReplicas(toFetch.range().right, tmdAfter);
logger.debug("Old endpoints {}", oldEndpoints);
logger.debug("New endpoints {}", newEndpoints);
// Remove new endpoints from old endpoints based on address
strictEndpoints = oldEndpoints.without(newEndpoints.endpoints());
if (strictEndpoints.size() > 1)
throw new AssertionError("Expected <= 1 endpoint but found " + strictEndpoints);
// required for strict consistency
if (!all(strictEndpoints, testSourceFilters))
throw new IllegalStateException("Necessary replicas for strict consistency were removed by source filters: " + buildErrorMessage(sourceFilters, strictEndpoints));
// So it's an error if we don't find what we need.
if (strictEndpoints.isEmpty() && toFetch.isTransient())
throw new AssertionError("If there are no endpoints to fetch from then we must be transitioning from transient to full for range " + toFetch);
if (!any(strictEndpoints, isSufficient)) {
// need an additional replica; include all our filters, to ensure we include a matching node
Optional<Replica> fullReplica = Iterables.<Replica>tryFind(oldEndpoints, and(isSufficient, testSourceFilters)).toJavaUtil();
if (fullReplica.isPresent())
strictEndpoints = Endpoints.concat(strictEndpoints, EndpointsForRange.of(fullReplica.get()));
else
throw new IllegalStateException("Couldn't find any matching sufficient replica out of " + buildErrorMessage(sourceFilters, oldEndpoints));
}
sources = strictEndpoints;
} else {
// Without strict consistency we have given up on correctness so no point in fetching from
// a random full + transient replica since it's also likely to lose data
// Also apply testSourceFilters that were given to us so we can safely select a single source
sources = sorted.apply(oldEndpoints.filter(and(isSufficient, testSourceFilters)));
// Limit it to just the first possible source, we don't need more than one and downstream
// will fetch from every source we supply
sources = sources.size() > 0 ? sources.subList(0, 1) : sources;
}
// storing range and preferred endpoint set
rangesToFetchWithPreferredEndpoints.putAll(toFetch, sources, Conflict.NONE);
logger.debug("Endpoints to fetch for {} are {}", toFetch, sources);
}
EndpointsForRange addressList = rangesToFetchWithPreferredEndpoints.getIfPresent(toFetch);
if (addressList == null)
throw new IllegalStateException("Failed to find endpoints to fetch " + toFetch);
/*
* When we move forwards (shrink our bucket) we are the one losing a range and no one else loses
* from that action (we also don't gain). When we move backwards there are two people losing a range. One is a full replica
* and the other is a transient replica. So we must need fetch from two places in that case for the full range we gain.
* For a transient range we only need to fetch from one.
*/
if (useStrictConsistency && addressList.size() > 1 && (addressList.filter(Replica::isFull).size() > 1 || addressList.filter(Replica::isTransient).size() > 1))
throw new IllegalStateException(String.format("Multiple strict sources found for %s, sources: %s", toFetch, addressList));
// We must have enough stuff to fetch from
if (!any(addressList, isSufficient)) {
if (strat.getReplicationFactor().allReplicas == 1) {
if (useStrictConsistency) {
logger.warn("A node required to move the data consistently is down");
throw new IllegalStateException("Unable to find sufficient sources for streaming range " + toFetch + " in keyspace " + keyspace + " with RF=1. " + "Ensure this keyspace contains replicas in the source datacenter.");
} else
logger.warn("Unable to find sufficient sources for streaming range {} in keyspace {} with RF=1. " + "Keyspace might be missing data.", toFetch, keyspace);
} else {
if (useStrictConsistency)
logger.warn("A node required to move the data consistently is down");
throw new IllegalStateException("Unable to find sufficient sources for streaming range " + toFetch + " in keyspace " + keyspace);
}
}
}
return rangesToFetchWithPreferredEndpoints.build();
}
use of org.apache.cassandra.locator.EndpointsByRange in project cassandra by apache.
the class RangeFetchMapCalculatorTest method testTrivalRangeLocalHostStreaming.
@Test
public void testTrivalRangeLocalHostStreaming() throws UnknownHostException {
// trivial ranges ranges should not try to stream from localhost
EndpointsByRange.Builder rangesWithSources = new EndpointsByRange.Builder();
addTrivialRangeAndSources(rangesWithSources, 21, 30, "127.0.0.2", "127.0.0.1");
addTrivialRangeAndSources(rangesWithSources, 31, 40, "127.0.0.1", "127.0.0.2");
EndpointsByRange ebr = rangesWithSources.build();
RangeFetchMapCalculator calculator = new RangeFetchMapCalculator(ebr, Collections.emptyList(), "Test");
RangeStreamer.validateRangeFetchMap(ebr, calculator.getRangeFetchMap(), "Test");
}
Aggregations