Search in sources :

Example 6 with RandomPartitioner

use of org.apache.cassandra.dht.RandomPartitioner in project eiger by wlloyd.

the class QueryProcessor method multiRangeSlice.

private static List<org.apache.cassandra.db.Row> multiRangeSlice(CFMetaData metadata, SelectStatement select, List<String> variables) throws TimedOutException, UnavailableException, InvalidRequestException {
    List<org.apache.cassandra.db.Row> rows;
    IPartitioner<?> p = StorageService.getPartitioner();
    AbstractType<?> keyType = Schema.instance.getCFMetaData(metadata.ksName, select.getColumnFamily()).getKeyValidator();
    ByteBuffer startKeyBytes = (select.getKeyStart() != null) ? select.getKeyStart().getByteBuffer(keyType, variables) : null;
    ByteBuffer finishKeyBytes = (select.getKeyFinish() != null) ? select.getKeyFinish().getByteBuffer(keyType, variables) : null;
    RowPosition startKey = RowPosition.forKey(startKeyBytes, p), finishKey = RowPosition.forKey(finishKeyBytes, p);
    if (startKey.compareTo(finishKey) > 0 && !finishKey.isMinimum(p)) {
        if (p instanceof RandomPartitioner)
            throw new InvalidRequestException("Start key sorts after end key. This is not allowed; you probably should not specify end key at all, under RandomPartitioner");
        else
            throw new InvalidRequestException("Start key must sort before (or equal to) finish key in your partitioner!");
    }
    AbstractBounds<RowPosition> bounds = new Bounds<RowPosition>(startKey, finishKey);
    // XXX: Our use of Thrift structs internally makes me Sad. :(
    SlicePredicate thriftSlicePredicate = slicePredicateFromSelect(select, metadata, variables);
    validateSlicePredicate(metadata, thriftSlicePredicate);
    List<IndexExpression> expressions = new ArrayList<IndexExpression>();
    for (Relation columnRelation : select.getColumnRelations()) {
        // Left and right side of relational expression encoded according to comparator/validator.
        ByteBuffer entity = columnRelation.getEntity().getByteBuffer(metadata.comparator, variables);
        ByteBuffer value = columnRelation.getValue().getByteBuffer(select.getValueValidator(metadata.ksName, entity), variables);
        expressions.add(new IndexExpression(entity, IndexOperator.valueOf(columnRelation.operator().toString()), value));
    }
    int limit = select.isKeyRange() && select.getKeyStart() != null ? select.getNumRecords() + 1 : select.getNumRecords();
    try {
        rows = StorageProxy.getRangeSlice(new RangeSliceCommand(metadata.ksName, select.getColumnFamily(), null, thriftSlicePredicate, bounds, expressions, limit), select.getConsistencyLevel());
    } catch (IOException e) {
        throw new RuntimeException(e);
    } catch (org.apache.cassandra.thrift.UnavailableException e) {
        throw new UnavailableException();
    } catch (TimeoutException e) {
        throw new TimedOutException();
    }
    // if start key was set and relation was "greater than"
    if (select.getKeyStart() != null && !select.includeStartKey() && !rows.isEmpty()) {
        if (rows.get(0).key.key.equals(startKeyBytes))
            rows.remove(0);
    }
    // if finish key was set and relation was "less than"
    if (select.getKeyFinish() != null && !select.includeFinishKey() && !rows.isEmpty()) {
        int lastIndex = rows.size() - 1;
        if (rows.get(lastIndex).key.key.equals(finishKeyBytes))
            rows.remove(lastIndex);
    }
    return rows.subList(0, select.getNumRecords() < rows.size() ? select.getNumRecords() : rows.size());
}
Also used : RandomPartitioner(org.apache.cassandra.dht.RandomPartitioner) org.apache.cassandra.thrift(org.apache.cassandra.thrift) TimeoutException(java.util.concurrent.TimeoutException) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) Bounds(org.apache.cassandra.dht.Bounds) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer)

Example 7 with RandomPartitioner

use of org.apache.cassandra.dht.RandomPartitioner in project eiger by wlloyd.

the class RemoveTest method setup.

@Before
public void setup() throws IOException, ConfigurationException {
    tmd.clearUnsafe();
    IPartitioner partitioner = new RandomPartitioner();
    oldPartitioner = ss.setPartitionerUnsafe(partitioner);
    // create a ring of 5 nodes
    Util.createInitialRing(ss, partitioner, endpointTokens, keyTokens, hosts, 6);
    MessagingService.instance().listen(FBUtilities.getBroadcastAddress());
    Gossiper.instance.start(1);
    for (int i = 0; i < 6; i++) {
        Gossiper.instance.initializeNodeUnsafe(hosts.get(i), 1);
    }
    removalhost = hosts.get(5);
    hosts.remove(removalhost);
    removaltoken = endpointTokens.get(5);
    endpointTokens.remove(removaltoken);
}
Also used : RandomPartitioner(org.apache.cassandra.dht.RandomPartitioner) IPartitioner(org.apache.cassandra.dht.IPartitioner) Before(org.junit.Before)

Example 8 with RandomPartitioner

use of org.apache.cassandra.dht.RandomPartitioner in project eiger by wlloyd.

the class SerializationsTest method testTreeResponseWrite.

private void testTreeResponseWrite() throws IOException {
    AntiEntropyService.Validator v0 = new AntiEntropyService.Validator(Statics.req);
    IPartitioner part = new RandomPartitioner();
    MerkleTree mt = new MerkleTree(part, FULL_RANGE, MerkleTree.RECOMMENDED_DEPTH, Integer.MAX_VALUE);
    List<Token> tokens = new ArrayList<Token>();
    for (int i = 0; i < 10; i++) {
        Token t = part.getRandomToken();
        tokens.add(t);
        mt.split(t);
    }
    AntiEntropyService.Validator v1 = new AntiEntropyService.Validator(Statics.req, mt);
    DataOutputStream out = getOutput("service.TreeResponse.bin");
    AntiEntropyService.TreeResponseVerbHandler.SERIALIZER.serialize(v0, out, getVersion());
    AntiEntropyService.TreeResponseVerbHandler.SERIALIZER.serialize(v1, out, getVersion());
    messageSerializer.serialize(AntiEntropyService.TreeResponseVerbHandler.makeVerb(FBUtilities.getBroadcastAddress(), v0), out, getVersion());
    messageSerializer.serialize(AntiEntropyService.TreeResponseVerbHandler.makeVerb(FBUtilities.getBroadcastAddress(), v1), out, getVersion());
    out.close();
}
Also used : RandomPartitioner(org.apache.cassandra.dht.RandomPartitioner) MerkleTree(org.apache.cassandra.utils.MerkleTree) DataOutputStream(java.io.DataOutputStream) ArrayList(java.util.ArrayList) Token(org.apache.cassandra.dht.Token) IPartitioner(org.apache.cassandra.dht.IPartitioner)

Aggregations

RandomPartitioner (org.apache.cassandra.dht.RandomPartitioner)8 IPartitioner (org.apache.cassandra.dht.IPartitioner)6 Token (org.apache.cassandra.dht.Token)4 Before (org.junit.Before)3 InetAddress (java.net.InetAddress)2 ArrayList (java.util.ArrayList)2 Test (org.junit.Test)2 ByteArrayInputStream (java.io.ByteArrayInputStream)1 DataInputStream (java.io.DataInputStream)1 DataOutputStream (java.io.DataOutputStream)1 IOException (java.io.IOException)1 BigInteger (java.math.BigInteger)1 ByteBuffer (java.nio.ByteBuffer)1 UUID (java.util.UUID)1 TimeoutException (java.util.concurrent.TimeoutException)1 AbstractBounds (org.apache.cassandra.dht.AbstractBounds)1 Bounds (org.apache.cassandra.dht.Bounds)1 VersionedValue (org.apache.cassandra.gms.VersionedValue)1 DataOutputBuffer (org.apache.cassandra.io.util.DataOutputBuffer)1 TokenMetadata (org.apache.cassandra.locator.TokenMetadata)1