use of org.apache.cassandra.dht.Token in project eiger by wlloyd.
the class NetworkTopologyStrategy method calculateNaturalEndpoints.
public List<InetAddress> calculateNaturalEndpoints(Token searchToken, TokenMetadata tokenMetadata) {
List<InetAddress> endpoints = new ArrayList<InetAddress>(getReplicationFactor());
for (Entry<String, Integer> dcEntry : datacenters.entrySet()) {
String dcName = dcEntry.getKey();
int dcReplicas = dcEntry.getValue();
// collect endpoints in this DC
TokenMetadata dcTokens = new TokenMetadata();
for (Entry<Token, InetAddress> tokenEntry : tokenMetadata.entrySet()) {
if (snitch.getDatacenter(tokenEntry.getValue()).equals(dcName))
dcTokens.updateNormalToken(tokenEntry.getKey(), tokenEntry.getValue());
}
List<InetAddress> dcEndpoints = new ArrayList<InetAddress>(dcReplicas);
Set<String> racks = new HashSet<String>();
// first pass: only collect replicas on unique racks
for (Iterator<Token> iter = TokenMetadata.ringIterator(dcTokens.sortedTokens(), searchToken, false); dcEndpoints.size() < dcReplicas && iter.hasNext(); ) {
Token token = iter.next();
InetAddress endpoint = dcTokens.getEndpoint(token);
String rack = snitch.getRack(endpoint);
if (!racks.contains(rack)) {
dcEndpoints.add(endpoint);
racks.add(rack);
}
}
// second pass: if replica count has not been achieved from unique racks, add nodes from duplicate racks
for (Iterator<Token> iter = TokenMetadata.ringIterator(dcTokens.sortedTokens(), searchToken, false); dcEndpoints.size() < dcReplicas && iter.hasNext(); ) {
Token token = iter.next();
InetAddress endpoint = dcTokens.getEndpoint(token);
if (!dcEndpoints.contains(endpoint))
dcEndpoints.add(endpoint);
}
if (logger.isDebugEnabled())
logger.debug("{} endpoints in datacenter {} for token {} ", new Object[] { StringUtils.join(dcEndpoints, ","), dcName, searchToken });
endpoints.addAll(dcEndpoints);
}
return endpoints;
}
use of org.apache.cassandra.dht.Token in project eiger by wlloyd.
the class TokenMetadata method getSuccessor.
public Token getSuccessor(Token token) {
List tokens = sortedTokens();
int index = Collections.binarySearch(tokens, token);
assert index >= 0 : token + " not found in " + StringUtils.join(tokenToEndpointMap.keySet(), ", ");
return (Token) ((index == (tokens.size() - 1)) ? tokens.get(0) : tokens.get(index + 1));
}
use of org.apache.cassandra.dht.Token in project eiger by wlloyd.
the class TokenMetadata method toString.
public String toString() {
StringBuilder sb = new StringBuilder();
lock.readLock().lock();
try {
Set<InetAddress> eps = tokenToEndpointMap.inverse().keySet();
if (!eps.isEmpty()) {
sb.append("Normal Tokens:");
sb.append(System.getProperty("line.separator"));
for (InetAddress ep : eps) {
sb.append(ep);
sb.append(":");
sb.append(tokenToEndpointMap.inverse().get(ep));
sb.append(System.getProperty("line.separator"));
}
}
if (!bootstrapTokens.isEmpty()) {
sb.append("Bootstrapping Tokens:");
sb.append(System.getProperty("line.separator"));
for (Map.Entry<Token, InetAddress> entry : bootstrapTokens.entrySet()) {
sb.append(entry.getValue() + ":" + entry.getKey());
sb.append(System.getProperty("line.separator"));
}
}
if (!leavingEndpoints.isEmpty()) {
sb.append("Leaving Endpoints:");
sb.append(System.getProperty("line.separator"));
for (InetAddress ep : leavingEndpoints) {
sb.append(ep);
sb.append(System.getProperty("line.separator"));
}
}
if (!pendingRanges.isEmpty()) {
sb.append("Pending Ranges:");
sb.append(System.getProperty("line.separator"));
sb.append(printPendingRanges());
}
} finally {
lock.readLock().unlock();
}
return sb.toString();
}
use of org.apache.cassandra.dht.Token in project eiger by wlloyd.
the class TokenMetadata method getPredecessor.
public Token getPredecessor(Token token) {
List tokens = sortedTokens();
int index = Collections.binarySearch(tokens, token);
assert index >= 0 : token + " not found in " + StringUtils.join(tokenToEndpointMap.keySet(), ", ");
return (Token) (index == 0 ? tokens.get(tokens.size() - 1) : tokens.get(index - 1));
}
use of org.apache.cassandra.dht.Token in project eiger by wlloyd.
the class ThriftValidation method validateKeyRange.
public static void validateKeyRange(CFMetaData metadata, ByteBuffer superColumn, KeyRange range) throws InvalidRequestException {
if ((range.start_key == null) != (range.end_key == null)) {
throw new InvalidRequestException("start key and end key must either both be non-null, or both be null");
}
if ((range.start_token == null) != (range.end_token == null)) {
throw new InvalidRequestException("start token and end token must either both be non-null, or both be null");
}
if ((range.start_key == null) == (range.start_token == null)) {
throw new InvalidRequestException("exactly one of {start key, end key} or {start token, end token} must be specified");
}
if (range.start_key != null) {
IPartitioner p = StorageService.getPartitioner();
Token startToken = p.getToken(range.start_key);
Token endToken = p.getToken(range.end_key);
if (startToken.compareTo(endToken) > 0 && !endToken.isMinimum(p)) {
if (p instanceof RandomPartitioner)
throw new InvalidRequestException("start key's md5 sorts after end key's md5. this is not allowed; you probably should not specify end key at all, under RandomPartitioner");
else
throw new InvalidRequestException("start key must sort before (or equal to) finish key in your partitioner!");
}
}
validateFilterClauses(metadata, range.row_filter);
if (!isEmpty(range.row_filter) && superColumn != null) {
throw new InvalidRequestException("super columns are not yet supported for indexing");
}
if (!isEmpty(range.row_filter) && range.start_key == null) {
// See KeySearcher.search()
throw new InvalidRequestException("filtered queries must use concrete keys rather than tokens");
}
if (range.count <= 0) {
throw new InvalidRequestException("maxRows must be positive");
}
}
Aggregations