use of org.apache.cassandra.dht.AbstractBounds in project cassandra by apache.
the class ReplicaPlanIterator method getRestrictedRanges.
/**
* Compute all ranges we're going to query, in sorted order. Nodes can be replica destinations for many ranges,
* so we need to restrict each scan to the specific range we want, or else we'd get duplicate results.
*/
private static List<AbstractBounds<PartitionPosition>> getRestrictedRanges(final AbstractBounds<PartitionPosition> queryRange) {
// special case for bounds containing exactly 1 (non-minimum) token
if (queryRange instanceof Bounds && queryRange.left.equals(queryRange.right) && !queryRange.left.isMinimum()) {
return Collections.singletonList(queryRange);
}
TokenMetadata tokenMetadata = StorageService.instance.getTokenMetadata();
List<AbstractBounds<PartitionPosition>> ranges = new ArrayList<>();
// divide the queryRange into pieces delimited by the ring and minimum tokens
Iterator<Token> ringIter = TokenMetadata.ringIterator(tokenMetadata.sortedTokens(), queryRange.left.getToken(), true);
AbstractBounds<PartitionPosition> remainder = queryRange;
while (ringIter.hasNext()) {
/*
* remainder is a range/bounds of partition positions and we want to split it with a token. We want to split
* using the key returned by token.maxKeyBound. For instance, if remainder is [DK(10, 'foo'), DK(20, 'bar')],
* and we have 3 nodes with tokens 0, 15, 30, we want to split remainder to A=[DK(10, 'foo'), 15] and
* B=(15, DK(20, 'bar')]. But since we can't mix tokens and keys at the same time in a range, we use
* 15.maxKeyBound() to have A include all keys having 15 as token and B include none of those (since that is
* what our node owns).
*/
Token upperBoundToken = ringIter.next();
PartitionPosition upperBound = upperBoundToken.maxKeyBound();
if (!remainder.left.equals(upperBound) && !remainder.contains(upperBound))
// no more splits
break;
Pair<AbstractBounds<PartitionPosition>, AbstractBounds<PartitionPosition>> splits = remainder.split(upperBound);
if (splits == null)
continue;
ranges.add(splits.left);
remainder = splits.right;
}
ranges.add(remainder);
return ranges;
}
use of org.apache.cassandra.dht.AbstractBounds in project cassandra by apache.
the class SSTableExport method main.
/**
* Given arguments specifying an SSTable, and optionally an output file, export the contents of the SSTable to JSON.
*
* @param args
* command lines arguments
* @throws ConfigurationException
* on configuration failure (wrong params given)
*/
@SuppressWarnings("resource")
public static void main(String[] args) throws ConfigurationException {
CommandLineParser parser = new PosixParser();
try {
cmd = parser.parse(options, args);
} catch (ParseException e1) {
System.err.println(e1.getMessage());
printUsage();
System.exit(1);
}
String[] keys = cmd.getOptionValues(KEY_OPTION);
HashSet<String> excludes = new HashSet<>(Arrays.asList(cmd.getOptionValues(EXCLUDE_KEY_OPTION) == null ? new String[0] : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
if (cmd.getArgs().length != 1) {
String msg = "You must supply exactly one sstable";
if (cmd.getArgs().length == 0 && (keys != null && keys.length > 0 || !excludes.isEmpty()))
msg += ", which should be before the -k/-x options so it's not interpreted as a partition key.";
System.err.println(msg);
printUsage();
System.exit(1);
}
String ssTableFileName = new File(cmd.getArgs()[0]).absolutePath();
if (!new File(ssTableFileName).exists()) {
System.err.println("Cannot find file " + ssTableFileName);
System.exit(1);
}
Descriptor desc = Descriptor.fromFilename(ssTableFileName);
try {
TableMetadata metadata = Util.metadataFromSSTable(desc);
if (cmd.hasOption(ENUMERATE_KEYS_OPTION)) {
try (KeyIterator iter = new KeyIterator(desc, metadata)) {
JsonTransformer.keysToJson(null, Util.iterToStream(iter), cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
} else {
SSTableReader sstable = SSTableReader.openNoValidation(desc, TableMetadataRef.forOfflineTools(metadata));
IPartitioner partitioner = sstable.getPartitioner();
final ISSTableScanner currentScanner;
if ((keys != null) && (keys.length > 0)) {
List<AbstractBounds<PartitionPosition>> bounds = Arrays.stream(keys).filter(key -> !excludes.contains(key)).map(metadata.partitionKeyType::fromString).map(partitioner::decorateKey).sorted().map(DecoratedKey::getToken).map(token -> new Bounds<>(token.minKeyBound(), token.maxKeyBound())).collect(Collectors.toList());
currentScanner = sstable.getScanner(bounds.iterator());
} else {
currentScanner = sstable.getScanner();
}
Stream<UnfilteredRowIterator> partitions = Util.iterToStream(currentScanner).filter(i -> excludes.isEmpty() || !excludes.contains(metadata.partitionKeyType.getString(i.partitionKey().getKey())));
if (cmd.hasOption(DEBUG_OUTPUT_OPTION)) {
AtomicLong position = new AtomicLong();
partitions.forEach(partition -> {
position.set(currentScanner.getCurrentPosition());
if (!partition.partitionLevelDeletion().isLive()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.partitionLevelDeletion());
}
if (!partition.staticRow().isEmpty()) {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.staticRow().toString(metadata, true));
}
partition.forEachRemaining(row -> {
System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + row.toString(metadata, false, true));
position.set(currentScanner.getCurrentPosition());
});
});
} else if (cmd.hasOption(PARTITION_JSON_LINES)) {
JsonTransformer.toJsonLines(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
} else {
JsonTransformer.toJson(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
}
}
} catch (IOException e) {
e.printStackTrace(System.err);
}
System.exit(0);
}
use of org.apache.cassandra.dht.AbstractBounds in project eiger by wlloyd.
the class QueryProcessor method multiRangeSlice.
private static List<org.apache.cassandra.db.Row> multiRangeSlice(CFMetaData metadata, SelectStatement select, List<String> variables) throws TimedOutException, UnavailableException, InvalidRequestException {
List<org.apache.cassandra.db.Row> rows;
IPartitioner<?> p = StorageService.getPartitioner();
AbstractType<?> keyType = Schema.instance.getCFMetaData(metadata.ksName, select.getColumnFamily()).getKeyValidator();
ByteBuffer startKeyBytes = (select.getKeyStart() != null) ? select.getKeyStart().getByteBuffer(keyType, variables) : null;
ByteBuffer finishKeyBytes = (select.getKeyFinish() != null) ? select.getKeyFinish().getByteBuffer(keyType, variables) : null;
RowPosition startKey = RowPosition.forKey(startKeyBytes, p), finishKey = RowPosition.forKey(finishKeyBytes, p);
if (startKey.compareTo(finishKey) > 0 && !finishKey.isMinimum(p)) {
if (p instanceof RandomPartitioner)
throw new InvalidRequestException("Start key sorts after end key. This is not allowed; you probably should not specify end key at all, under RandomPartitioner");
else
throw new InvalidRequestException("Start key must sort before (or equal to) finish key in your partitioner!");
}
AbstractBounds<RowPosition> bounds = new Bounds<RowPosition>(startKey, finishKey);
// XXX: Our use of Thrift structs internally makes me Sad. :(
SlicePredicate thriftSlicePredicate = slicePredicateFromSelect(select, metadata, variables);
validateSlicePredicate(metadata, thriftSlicePredicate);
List<IndexExpression> expressions = new ArrayList<IndexExpression>();
for (Relation columnRelation : select.getColumnRelations()) {
// Left and right side of relational expression encoded according to comparator/validator.
ByteBuffer entity = columnRelation.getEntity().getByteBuffer(metadata.comparator, variables);
ByteBuffer value = columnRelation.getValue().getByteBuffer(select.getValueValidator(metadata.ksName, entity), variables);
expressions.add(new IndexExpression(entity, IndexOperator.valueOf(columnRelation.operator().toString()), value));
}
int limit = select.isKeyRange() && select.getKeyStart() != null ? select.getNumRecords() + 1 : select.getNumRecords();
try {
rows = StorageProxy.getRangeSlice(new RangeSliceCommand(metadata.ksName, select.getColumnFamily(), null, thriftSlicePredicate, bounds, expressions, limit), select.getConsistencyLevel());
} catch (IOException e) {
throw new RuntimeException(e);
} catch (org.apache.cassandra.thrift.UnavailableException e) {
throw new UnavailableException();
} catch (TimeoutException e) {
throw new TimedOutException();
}
// if start key was set and relation was "greater than"
if (select.getKeyStart() != null && !select.includeStartKey() && !rows.isEmpty()) {
if (rows.get(0).key.key.equals(startKeyBytes))
rows.remove(0);
}
// if finish key was set and relation was "less than"
if (select.getKeyFinish() != null && !select.includeFinishKey() && !rows.isEmpty()) {
int lastIndex = rows.size() - 1;
if (rows.get(lastIndex).key.key.equals(finishKeyBytes))
rows.remove(lastIndex);
}
return rows.subList(0, select.getNumRecords() < rows.size() ? select.getNumRecords() : rows.size());
}
Aggregations