use of org.apache.cassandra.dht.RandomPartitioner in project eiger by wlloyd.
the class QueryProcessor method multiRangeSlice.
private static List<org.apache.cassandra.db.Row> multiRangeSlice(CFMetaData metadata, SelectStatement select, List<String> variables) throws TimedOutException, UnavailableException, InvalidRequestException {
List<org.apache.cassandra.db.Row> rows;
IPartitioner<?> p = StorageService.getPartitioner();
AbstractType<?> keyType = Schema.instance.getCFMetaData(metadata.ksName, select.getColumnFamily()).getKeyValidator();
ByteBuffer startKeyBytes = (select.getKeyStart() != null) ? select.getKeyStart().getByteBuffer(keyType, variables) : null;
ByteBuffer finishKeyBytes = (select.getKeyFinish() != null) ? select.getKeyFinish().getByteBuffer(keyType, variables) : null;
RowPosition startKey = RowPosition.forKey(startKeyBytes, p), finishKey = RowPosition.forKey(finishKeyBytes, p);
if (startKey.compareTo(finishKey) > 0 && !finishKey.isMinimum(p)) {
if (p instanceof RandomPartitioner)
throw new InvalidRequestException("Start key sorts after end key. This is not allowed; you probably should not specify end key at all, under RandomPartitioner");
else
throw new InvalidRequestException("Start key must sort before (or equal to) finish key in your partitioner!");
}
AbstractBounds<RowPosition> bounds = new Bounds<RowPosition>(startKey, finishKey);
// XXX: Our use of Thrift structs internally makes me Sad. :(
SlicePredicate thriftSlicePredicate = slicePredicateFromSelect(select, metadata, variables);
validateSlicePredicate(metadata, thriftSlicePredicate);
List<IndexExpression> expressions = new ArrayList<IndexExpression>();
for (Relation columnRelation : select.getColumnRelations()) {
// Left and right side of relational expression encoded according to comparator/validator.
ByteBuffer entity = columnRelation.getEntity().getByteBuffer(metadata.comparator, variables);
ByteBuffer value = columnRelation.getValue().getByteBuffer(select.getValueValidator(metadata.ksName, entity), variables);
expressions.add(new IndexExpression(entity, IndexOperator.valueOf(columnRelation.operator().toString()), value));
}
int limit = select.isKeyRange() && select.getKeyStart() != null ? select.getNumRecords() + 1 : select.getNumRecords();
try {
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(metadata.ksName, select.getColumnFamily(), null, thriftSlicePredicate, bounds, expressions, limit), select.getConsistencyLevel());
} catch (IOException e) {
throw new RuntimeException(e);
} catch (org.apache.cassandra.thrift.UnavailableException e) {
throw new UnavailableException();
} catch (TimeoutException e) {
throw new TimedOutException();
}
// if start key was set and relation was "greater than"
if (select.getKeyStart() != null && !select.includeStartKey() && !rows.isEmpty()) {
if (rows.get(0).key.key.equals(startKeyBytes))
rows.remove(0);
}
// if finish key was set and relation was "less than"
if (select.getKeyFinish() != null && !select.includeFinishKey() && !rows.isEmpty()) {
int lastIndex = rows.size() - 1;
if (rows.get(lastIndex).key.key.equals(finishKeyBytes))
rows.remove(lastIndex);
}
return rows.subList(0, select.getNumRecords() < rows.size() ? select.getNumRecords() : rows.size());
}
use of org.apache.cassandra.dht.RandomPartitioner in project eiger by wlloyd.
the class RemoveTest method setup.
@Before
public void setup() throws IOException, ConfigurationException {
tmd.clearUnsafe();
IPartitioner partitioner = new RandomPartitioner();
oldPartitioner = ss.setPartitionerUnsafe(partitioner);
// create a ring of 5 nodes
Util.createInitialRing(ss, partitioner, endpointTokens, keyTokens, hosts, 6);
MessagingService.instance().listen(FBUtilities.getBroadcastAddress());
Gossiper.instance.start(1);
for (int i = 0; i < 6; i++) {
Gossiper.instance.initializeNodeUnsafe(hosts.get(i), 1);
}
removalhost = hosts.get(5);
hosts.remove(removalhost);
removaltoken = endpointTokens.get(5);
endpointTokens.remove(removaltoken);
}
use of org.apache.cassandra.dht.RandomPartitioner in project eiger by wlloyd.
the class SerializationsTest method testTreeResponseWrite.
private void testTreeResponseWrite() throws IOException {
AntiEntropyService.Validator v0 = new AntiEntropyService.Validator(Statics.req);
IPartitioner part = new RandomPartitioner();
MerkleTree mt = new MerkleTree(part, FULL_RANGE, MerkleTree.RECOMMENDED_DEPTH, Integer.MAX_VALUE);
List<Token> tokens = new ArrayList<Token>();
for (int i = 0; i < 10; i++) {
Token t = part.getRandomToken();
tokens.add(t);
mt.split(t);
}
AntiEntropyService.Validator v1 = new AntiEntropyService.Validator(Statics.req, mt);
DataOutputStream out = getOutput("service.TreeResponse.bin");
AntiEntropyService.TreeResponseVerbHandler.SERIALIZER.serialize(v0, out, getVersion());
AntiEntropyService.TreeResponseVerbHandler.SERIALIZER.serialize(v1, out, getVersion());
messageSerializer.serialize(AntiEntropyService.TreeResponseVerbHandler.makeVerb(FBUtilities.getBroadcastAddress(), v0), out, getVersion());
messageSerializer.serialize(AntiEntropyService.TreeResponseVerbHandler.makeVerb(FBUtilities.getBroadcastAddress(), v1), out, getVersion());
out.close();
}
Aggregations