Search in sources :

Example 21 with TokenMetadata

use of org.apache.cassandra.locator.TokenMetadata in project eiger by wlloyd.

the class StorageProxy method getRestrictedRanges.

/**
     * Compute all ranges we're going to query, in sorted order. Nodes can be replica destinations for many ranges,
     * so we need to restrict each scan to the specific range we want, or else we'd get duplicate results.
     */
static <T extends RingPosition> List<AbstractBounds<T>> getRestrictedRanges(final AbstractBounds<T> queryRange) {
    // special case for bounds containing exactly 1 (non-minimum) token
    if (queryRange instanceof Bounds && queryRange.left.equals(queryRange.right) && !queryRange.left.isMinimum(StorageService.getPartitioner())) {
        if (logger.isDebugEnabled())
            logger.debug("restricted single token match for query {}", queryRange);
        return Collections.singletonList(queryRange);
    }
    TokenMetadata tokenMetadata = StorageService.instance.getTokenMetadata();
    List<AbstractBounds<T>> ranges = new ArrayList<AbstractBounds<T>>();
    // divide the queryRange into pieces delimited by the ring and minimum tokens
    Iterator<Token> ringIter = TokenMetadata.ringIterator(tokenMetadata.sortedTokens(), queryRange.left.getToken(), true);
    AbstractBounds<T> remainder = queryRange;
    while (ringIter.hasNext()) {
        Token token = ringIter.next();
        /*
             * remainder can be a range/bounds of token _or_ keys and we want to split it with a token:
             *   - if remainder is tokens, then we'll just split using the provided token.
             *   - if reaminer is keys, we want to split using token.upperBoundKey. For instance, if remainder
             *     is [DK(10, 'foo'), DK(20, 'bar')], and we have 3 nodes with tokens 0, 15, 30. We want to
             *     split remainder to A=[DK(10, 'foo'), 15] and B=(15, DK(20, 'bar')]. But since we can't mix
             *     tokens and keys at the same time in a range, we uses 15.upperBoundKey() to have A include all
             *     keys having 15 as token and B include none of those (since that is what our node owns).
             * asSplitValue() abstracts that choice.
             */
        T splitValue = (T) token.asSplitValue(queryRange.left.getClass());
        if (remainder == null || !(remainder.left.equals(splitValue) || remainder.contains(splitValue)))
            // no more splits
            break;
        Pair<AbstractBounds<T>, AbstractBounds<T>> splits = remainder.split(splitValue);
        if (splits.left != null)
            ranges.add(splits.left);
        remainder = splits.right;
    }
    if (remainder != null)
        ranges.add(remainder);
    if (logger.isDebugEnabled())
        logger.debug("restricted ranges for query {} are {}", queryRange, ranges);
    return ranges;
}
Also used : TokenMetadata(org.apache.cassandra.locator.TokenMetadata)

Example 22 with TokenMetadata

use of org.apache.cassandra.locator.TokenMetadata in project eiger by wlloyd.

the class StorageService method calculatePendingRanges.

// public & static for testing purposes
public static void calculatePendingRanges(AbstractReplicationStrategy strategy, String table) {
    TokenMetadata tm = StorageService.instance.getTokenMetadata();
    Multimap<Range<Token>, InetAddress> pendingRanges = HashMultimap.create();
    Map<Token, InetAddress> bootstrapTokens = tm.getBootstrapTokens();
    Set<InetAddress> leavingEndpoints = tm.getLeavingEndpoints();
    if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && tm.getMovingEndpoints().isEmpty()) {
        if (logger_.isDebugEnabled())
            logger_.debug("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}", table);
        tm.setPendingRanges(table, pendingRanges);
        return;
    }
    Multimap<InetAddress, Range<Token>> addressRanges = strategy.getAddressRanges();
    // Copy of metadata reflecting the situation after all leave operations are finished.
    TokenMetadata allLeftMetadata = tm.cloneAfterAllLeft();
    // get all ranges that will be affected by leaving nodes
    Set<Range<Token>> affectedRanges = new HashSet<Range<Token>>();
    for (InetAddress endpoint : leavingEndpoints) affectedRanges.addAll(addressRanges.get(endpoint));
    // all leaving nodes are gone.
    for (Range<Token> range : affectedRanges) {
        Set<InetAddress> currentEndpoints = ImmutableSet.copyOf(strategy.calculateNaturalEndpoints(range.right, tm));
        Set<InetAddress> newEndpoints = ImmutableSet.copyOf(strategy.calculateNaturalEndpoints(range.right, allLeftMetadata));
        pendingRanges.putAll(range, Sets.difference(newEndpoints, currentEndpoints));
    }
    // allLeftMetadata and check in between what their ranges would be.
    for (Map.Entry<Token, InetAddress> entry : bootstrapTokens.entrySet()) {
        InetAddress endpoint = entry.getValue();
        allLeftMetadata.updateNormalToken(entry.getKey(), endpoint);
        for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) pendingRanges.put(range, endpoint);
        allLeftMetadata.removeEndpoint(endpoint);
    }
    // simply add and remove them one by one to allLeftMetadata and check in between what their ranges would be.
    for (Pair<Token, InetAddress> moving : tm.getMovingEndpoints()) {
        // address of the moving node
        InetAddress endpoint = moving.right;
        //  moving.left is a new token of the endpoint
        allLeftMetadata.updateNormalToken(moving.left, endpoint);
        for (Range<Token> range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) {
            pendingRanges.put(range, endpoint);
        }
        allLeftMetadata.removeEndpoint(endpoint);
    }
    tm.setPendingRanges(table, pendingRanges);
    if (logger_.isDebugEnabled())
        logger_.debug("Pending ranges:\n" + (pendingRanges.isEmpty() ? "<empty>" : tm.printPendingRanges()));
}
Also used : TokenMetadata(org.apache.cassandra.locator.TokenMetadata) InetAddress(java.net.InetAddress)

Example 23 with TokenMetadata

use of org.apache.cassandra.locator.TokenMetadata in project eiger by wlloyd.

the class CleanupTest method testCleanupWithIndexes.

@Test
public void testCleanupWithIndexes() throws IOException, ExecutionException, InterruptedException {
    Table table = Table.open(TABLE1);
    ColumnFamilyStore cfs = table.getColumnFamilyStore(CF1);
    assertEquals(cfs.indexManager.getIndexedColumns().iterator().next(), COLUMN);
    List<Row> rows;
    // insert data and verify we get it back w/ range query
    fillCF(cfs, LOOPS);
    rows = Util.getRangeSlice(cfs);
    assertEquals(LOOPS, rows.size());
    SecondaryIndex index = cfs.indexManager.getIndexForColumn(COLUMN);
    long start = System.currentTimeMillis();
    while (!index.isIndexBuilt(COLUMN) && System.currentTimeMillis() < start + 10000) Thread.sleep(10);
    // verify we get it back w/ index query too
    IndexExpression expr = new IndexExpression(COLUMN, IndexOperator.EQ, VALUE);
    List<IndexExpression> clause = Arrays.asList(expr);
    IFilter filter = new IdentityQueryFilter();
    IPartitioner p = StorageService.getPartitioner();
    Range<RowPosition> range = Util.range("", "");
    rows = table.getColumnFamilyStore(CF1).search(clause, range, Integer.MAX_VALUE, filter);
    assertEquals(LOOPS, rows.size());
    // we don't allow cleanup when the local host has no range to avoid wipping up all data when a node has not join the ring.
    // So to make sure cleanup erase everything here, we give the localhost the tiniest possible range.
    TokenMetadata tmd = StorageService.instance.getTokenMetadata();
    byte[] tk1 = new byte[1], tk2 = new byte[1];
    tk1[0] = 2;
    tk2[0] = 1;
    tmd.updateNormalToken(new BytesToken(tk1), InetAddress.getByName("127.0.0.1"));
    tmd.updateNormalToken(new BytesToken(tk2), InetAddress.getByName("127.0.0.2"));
    CompactionManager.instance.performCleanup(cfs, new NodeId.OneShotRenewer());
    // row data should be gone
    rows = Util.getRangeSlice(cfs);
    assertEquals(0, rows.size());
    // not only should it be gone but there should be no data on disk, not even tombstones
    assert cfs.getSSTables().isEmpty();
    // 2ary indexes should result in no results, too (although tombstones won't be gone until compacted)
    rows = cfs.search(clause, range, Integer.MAX_VALUE, filter);
    assertEquals(0, rows.size());
}
Also used : IndexExpression(org.apache.cassandra.thrift.IndexExpression) TokenMetadata(org.apache.cassandra.locator.TokenMetadata) IdentityQueryFilter(org.apache.cassandra.db.columniterator.IdentityQueryFilter) SecondaryIndex(org.apache.cassandra.db.index.SecondaryIndex) IFilter(org.apache.cassandra.db.filter.IFilter) BytesToken(org.apache.cassandra.dht.BytesToken) NodeId(org.apache.cassandra.utils.NodeId) IPartitioner(org.apache.cassandra.dht.IPartitioner) Test(org.junit.Test)

Example 24 with TokenMetadata

use of org.apache.cassandra.locator.TokenMetadata in project eiger by wlloyd.

the class BootStrapperTest method generateFakeEndpoints.

private void generateFakeEndpoints(int numOldNodes) throws UnknownHostException {
    TokenMetadata tmd = StorageService.instance.getTokenMetadata();
    tmd.clearUnsafe();
    IPartitioner<?> p = StorageService.getPartitioner();
    for (int i = 1; i <= numOldNodes; i++) {
        // leave .1 for myEndpoint
        tmd.updateNormalToken(p.getRandomToken(), InetAddress.getByName("127.0.0." + (i + 1)));
    }
}
Also used : TokenMetadata(org.apache.cassandra.locator.TokenMetadata)

Example 25 with TokenMetadata

use of org.apache.cassandra.locator.TokenMetadata in project eiger by wlloyd.

the class BootStrapperTest method testSourceTargetComputation.

private void testSourceTargetComputation(String table, int numOldNodes, int replicationFactor) throws UnknownHostException {
    StorageService ss = StorageService.instance;
    generateFakeEndpoints(numOldNodes);
    Token myToken = StorageService.getPartitioner().getRandomToken();
    InetAddress myEndpoint = InetAddress.getByName("127.0.0.1");
    TokenMetadata tmd = ss.getTokenMetadata();
    assertEquals(numOldNodes, tmd.sortedTokens().size());
    BootStrapper b = new BootStrapper(myEndpoint, myToken, tmd);
    Multimap<Range<Token>, InetAddress> res = b.getRangesWithSources(table);
    int transferCount = 0;
    for (Map.Entry<Range<Token>, Collection<InetAddress>> e : res.asMap().entrySet()) {
        assert e.getValue() != null && e.getValue().size() > 0 : StringUtils.join(e.getValue(), ", ");
        transferCount++;
    }
    assertEquals(replicationFactor, transferCount);
    IFailureDetector mockFailureDetector = new IFailureDetector() {

        public boolean isAlive(InetAddress ep) {
            return true;
        }

        public void interpret(InetAddress ep) {
            throw new UnsupportedOperationException();
        }

        public void report(InetAddress ep) {
            throw new UnsupportedOperationException();
        }

        public void registerFailureDetectionEventListener(IFailureDetectionEventListener listener) {
            throw new UnsupportedOperationException();
        }

        public void unregisterFailureDetectionEventListener(IFailureDetectionEventListener listener) {
            throw new UnsupportedOperationException();
        }

        public void remove(InetAddress ep) {
            throw new UnsupportedOperationException();
        }

        public void clear(InetAddress ep) {
            throw new UnsupportedOperationException();
        }
    };
    Multimap<InetAddress, Range<Token>> temp = BootStrapper.getWorkMap(res, mockFailureDetector);
    // is used, they will vary.
    assert temp.keySet().size() > 0;
    assert temp.asMap().values().iterator().next().size() > 0;
    assert !temp.keySet().iterator().next().equals(myEndpoint);
}
Also used : TokenMetadata(org.apache.cassandra.locator.TokenMetadata) StorageService(org.apache.cassandra.service.StorageService) IFailureDetector(org.apache.cassandra.gms.IFailureDetector) Collection(java.util.Collection) IFailureDetectionEventListener(org.apache.cassandra.gms.IFailureDetectionEventListener) InetAddress(java.net.InetAddress) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

TokenMetadata (org.apache.cassandra.locator.TokenMetadata)72 InetAddress (java.net.InetAddress)50 Test (org.junit.Test)50 Token (org.apache.cassandra.dht.Token)27 VersionedValue (org.apache.cassandra.gms.VersionedValue)22 Range (org.apache.cassandra.dht.Range)16 AbstractReplicationStrategy (org.apache.cassandra.locator.AbstractReplicationStrategy)14 BigIntegerToken (org.apache.cassandra.dht.RandomPartitioner.BigIntegerToken)13 StringToken (org.apache.cassandra.dht.OrderPreservingPartitioner.StringToken)12 KeyspaceMetadata (org.apache.cassandra.schema.KeyspaceMetadata)12 IPartitioner (org.apache.cassandra.dht.IPartitioner)10 LongToken (org.apache.cassandra.dht.Murmur3Partitioner.LongToken)7 HashMap (java.util.HashMap)5 HashMultimap (com.google.common.collect.HashMultimap)4 Multimap (com.google.common.collect.Multimap)4 BytesToken (org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken)4 NetworkTopologyStrategy (org.apache.cassandra.locator.NetworkTopologyStrategy)4 StorageService (org.apache.cassandra.service.StorageService)4 UUID (java.util.UUID)3 Before (org.junit.Before)3