use of org.apache.cassandra.locator.TokenMetadata in project eiger by wlloyd.
the class TokenMetadataTest method beforeClass.
@BeforeClass
public static void beforeClass() throws Throwable {
TokenMetadata tmd = StorageService.instance.getTokenMetadata();
tmd.updateNormalToken(token(ONE), InetAddress.getByName("127.0.0.1"));
tmd.updateNormalToken(token(SIX), InetAddress.getByName("127.0.0.6"));
RING = tmd.sortedTokens();
}
use of org.apache.cassandra.locator.TokenMetadata in project eiger by wlloyd.
the class CleanupTest method testCleanupWithIndexes.
@Test
public void testCleanupWithIndexes() throws IOException, ExecutionException, InterruptedException {
Table table = Table.open(TABLE1);
ColumnFamilyStore cfs = table.getColumnFamilyStore(CF1);
assertEquals(cfs.indexManager.getIndexedColumns().iterator().next(), COLUMN);
List<Row> rows;
// insert data and verify we get it back w/ range query
fillCF(cfs, LOOPS);
rows = Util.getRangeSlice(cfs);
assertEquals(LOOPS, rows.size());
SecondaryIndex index = cfs.indexManager.getIndexForColumn(COLUMN);
long start = System.currentTimeMillis();
while (!index.isIndexBuilt(COLUMN) && System.currentTimeMillis() < start + 10000) Thread.sleep(10);
// verify we get it back w/ index query too
IndexExpression expr = new IndexExpression(COLUMN, IndexOperator.EQ, VALUE);
List<IndexExpression> clause = Arrays.asList(expr);
IFilter filter = new IdentityQueryFilter();
IPartitioner p = StorageService.getPartitioner();
Range<RowPosition> range = Util.range("", "");
rows = table.getColumnFamilyStore(CF1).search(clause, range, Integer.MAX_VALUE, filter);
assertEquals(LOOPS, rows.size());
// we don't allow cleanup when the local host has no range to avoid wipping up all data when a node has not join the ring.
// So to make sure cleanup erase everything here, we give the localhost the tiniest possible range.
TokenMetadata tmd = StorageService.instance.getTokenMetadata();
byte[] tk1 = new byte[1], tk2 = new byte[1];
tk1[0] = 2;
tk2[0] = 1;
tmd.updateNormalToken(new BytesToken(tk1), InetAddress.getByName("127.0.0.1"));
tmd.updateNormalToken(new BytesToken(tk2), InetAddress.getByName("127.0.0.2"));
CompactionManager.instance.performCleanup(cfs, new NodeId.OneShotRenewer());
// row data should be gone
rows = Util.getRangeSlice(cfs);
assertEquals(0, rows.size());
// not only should it be gone but there should be no data on disk, not even tombstones
assert cfs.getSSTables().isEmpty();
// 2ary indexes should result in no results, too (although tombstones won't be gone until compacted)
rows = cfs.search(clause, range, Integer.MAX_VALUE, filter);
assertEquals(0, rows.size());
}
use of org.apache.cassandra.locator.TokenMetadata in project eiger by wlloyd.
the class StorageProxy method getRestrictedRanges.
/**
* Compute all ranges we're going to query, in sorted order. Nodes can be replica destinations for many ranges,
* so we need to restrict each scan to the specific range we want, or else we'd get duplicate results.
*/
static <T extends RingPosition> List<AbstractBounds<T>> getRestrictedRanges(final AbstractBounds<T> queryRange) {
// special case for bounds containing exactly 1 (non-minimum) token
if (queryRange instanceof Bounds && queryRange.left.equals(queryRange.right) && !queryRange.left.isMinimum(StorageService.getPartitioner())) {
if (logger.isDebugEnabled())
logger.debug("restricted single token match for query {}", queryRange);
return Collections.singletonList(queryRange);
}
TokenMetadata tokenMetadata = StorageService.instance.getTokenMetadata();
List<AbstractBounds<T>> ranges = new ArrayList<AbstractBounds<T>>();
// divide the queryRange into pieces delimited by the ring and minimum tokens
Iterator<Token> ringIter = TokenMetadata.ringIterator(tokenMetadata.sortedTokens(), queryRange.left.getToken(), true);
AbstractBounds<T> remainder = queryRange;
while (ringIter.hasNext()) {
Token token = ringIter.next();
/*
* remainder can be a range/bounds of token _or_ keys and we want to split it with a token:
* - if remainder is tokens, then we'll just split using the provided token.
* - if reaminer is keys, we want to split using token.upperBoundKey. For instance, if remainder
* is [DK(10, 'foo'), DK(20, 'bar')], and we have 3 nodes with tokens 0, 15, 30. We want to
* split remainder to A=[DK(10, 'foo'), 15] and B=(15, DK(20, 'bar')]. But since we can't mix
* tokens and keys at the same time in a range, we uses 15.upperBoundKey() to have A include all
* keys having 15 as token and B include none of those (since that is what our node owns).
* asSplitValue() abstracts that choice.
*/
T splitValue = (T) token.asSplitValue(queryRange.left.getClass());
if (remainder == null || !(remainder.left.equals(splitValue) || remainder.contains(splitValue)))
// no more splits
break;
Pair<AbstractBounds<T>, AbstractBounds<T>> splits = remainder.split(splitValue);
if (splits.left != null)
ranges.add(splits.left);
remainder = splits.right;
}
if (remainder != null)
ranges.add(remainder);
if (logger.isDebugEnabled())
logger.debug("restricted ranges for query {} are {}", queryRange, ranges);
return ranges;
}
use of org.apache.cassandra.locator.TokenMetadata in project eiger by wlloyd.
the class StorageService method calculatePendingRanges.
// public & static for testing purposes
public static void calculatePendingRanges(AbstractReplicationStrategy strategy, String table) {
TokenMetadata tm = StorageService.instance.getTokenMetadata();
Multimap<Range<Token>, InetAddress> pendingRanges = HashMultimap.create();
Map<Token, InetAddress> bootstrapTokens = tm.getBootstrapTokens();
Set<InetAddress> leavingEndpoints = tm.getLeavingEndpoints();
if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && tm.getMovingEndpoints().isEmpty()) {
if (logger_.isDebugEnabled())
logger_.debug("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}", table);
tm.setPendingRanges(table, pendingRanges);
return;
}
Multimap<InetAddress, Range<Token>> addressRanges = strategy.getAddressRanges();
// Copy of metadata reflecting the situation after all leave operations are finished.
TokenMetadata allLeftMetadata = tm.cloneAfterAllLeft();
// get all ranges that will be affected by leaving nodes
Set<Range<Token>> affectedRanges = new HashSet<Range<Token>>();
for (InetAddress endpoint : leavingEndpoints) affectedRanges.addAll(addressRanges.get(endpoint));
// all leaving nodes are gone.
for (Range<Token> range : affectedRanges) {
Set<InetAddress> currentEndpoints = ImmutableSet.copyOf(strategy.calculateNaturalEndpoints(range.right, tm));
Set<InetAddress> newEndpoints = ImmutableSet.copyOf(strategy.calculateNaturalEndpoints(range.right, allLeftMetadata));
pendingRanges.putAll(range, Sets.difference(newEndpoints, currentEndpoints));
}
// allLeftMetadata and check in between what their ranges would be.
for (Map.Entry<Token, InetAddress> entry : bootstrapTokens.entrySet()) {
InetAddress endpoint = entry.getValue();
allLeftMetadata.updateNormalToken(entry.getKey(), endpoint);
for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) pendingRanges.put(range, endpoint);
allLeftMetadata.removeEndpoint(endpoint);
}
// simply add and remove them one by one to allLeftMetadata and check in between what their ranges would be.
for (Pair<Token, InetAddress> moving : tm.getMovingEndpoints()) {
// address of the moving node
InetAddress endpoint = moving.right;
// moving.left is a new token of the endpoint
allLeftMetadata.updateNormalToken(moving.left, endpoint);
for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) {
pendingRanges.put(range, endpoint);
}
allLeftMetadata.removeEndpoint(endpoint);
}
tm.setPendingRanges(table, pendingRanges);
if (logger_.isDebugEnabled())
logger_.debug("Pending ranges:\n" + (pendingRanges.isEmpty() ? "<empty>" : tm.printPendingRanges()));
}
use of org.apache.cassandra.locator.TokenMetadata in project cassandra by apache.
the class RangeStreamer method getAllRangesWithStrictSourcesFor.
/**
* Get a map of all ranges and the source that will be cleaned up once this bootstrapped node is added for the given ranges.
* For each range, the list should only contain a single source. This allows us to consistently migrate data without violating
* consistency.
*
* @throws java.lang.IllegalStateException when there is no source to get data streamed, or more than 1 source found.
*/
private Multimap<Range<Token>, InetAddress> getAllRangesWithStrictSourcesFor(String keyspace, Collection<Range<Token>> desiredRanges) {
assert tokens != null;
AbstractReplicationStrategy strat = Keyspace.open(keyspace).getReplicationStrategy();
// Active ranges
TokenMetadata metadataClone = metadata.cloneOnlyTokenMap();
Multimap<Range<Token>, InetAddress> addressRanges = strat.getRangeAddresses(metadataClone);
// Pending ranges
metadataClone.updateNormalTokens(tokens, address);
Multimap<Range<Token>, InetAddress> pendingRangeAddresses = strat.getRangeAddresses(metadataClone);
// Collects the source that will have its range moved to the new node
Multimap<Range<Token>, InetAddress> rangeSources = ArrayListMultimap.create();
for (Range<Token> desiredRange : desiredRanges) {
for (Map.Entry<Range<Token>, Collection<InetAddress>> preEntry : addressRanges.asMap().entrySet()) {
if (preEntry.getKey().contains(desiredRange)) {
Set<InetAddress> oldEndpoints = Sets.newHashSet(preEntry.getValue());
Set<InetAddress> newEndpoints = Sets.newHashSet(pendingRangeAddresses.get(desiredRange));
// So we need to be careful to only be strict when endpoints == RF
if (oldEndpoints.size() == strat.getReplicationFactor()) {
oldEndpoints.removeAll(newEndpoints);
assert oldEndpoints.size() == 1 : "Expected 1 endpoint but found " + oldEndpoints.size();
}
rangeSources.put(desiredRange, oldEndpoints.iterator().next());
}
}
// Validate
Collection<InetAddress> addressList = rangeSources.get(desiredRange);
if (addressList == null || addressList.isEmpty())
throw new IllegalStateException("No sources found for " + desiredRange);
if (addressList.size() > 1)
throw new IllegalStateException("Multiple endpoints found for " + desiredRange);
InetAddress sourceIp = addressList.iterator().next();
EndpointState sourceState = Gossiper.instance.getEndpointStateForEndpoint(sourceIp);
if (Gossiper.instance.isEnabled() && (sourceState == null || !sourceState.isAlive()))
throw new RuntimeException("A node required to move the data consistently is down (" + sourceIp + "). " + "If you wish to move the data from a potentially inconsistent replica, restart the node with -Dcassandra.consistent.rangemovement=false");
}
return rangeSources;
}
Aggregations