Search in sources :

Example 1 with AbstractBounds

use of org.apache.cassandra.dht.AbstractBounds in project cassandra by apache.

the class ReplicaPlanIterator method getRestrictedRanges.

/**
 * Compute all ranges we're going to query, in sorted order. Nodes can be replica destinations for many ranges,
 * so we need to restrict each scan to the specific range we want, or else we'd get duplicate results.
 */
private static List<AbstractBounds<PartitionPosition>> getRestrictedRanges(final AbstractBounds<PartitionPosition> queryRange) {
    // special case for bounds containing exactly 1 (non-minimum) token
    if (queryRange instanceof Bounds && queryRange.left.equals(queryRange.right) && !queryRange.left.isMinimum()) {
        return Collections.singletonList(queryRange);
    }
    TokenMetadata tokenMetadata = StorageService.instance.getTokenMetadata();
    List<AbstractBounds<PartitionPosition>> ranges = new ArrayList<>();
    // divide the queryRange into pieces delimited by the ring and minimum tokens
    Iterator<Token> ringIter = TokenMetadata.ringIterator(tokenMetadata.sortedTokens(), queryRange.left.getToken(), true);
    AbstractBounds<PartitionPosition> remainder = queryRange;
    while (ringIter.hasNext()) {
        /*
             * remainder is a range/bounds of partition positions and we want to split it with a token. We want to split
             * using the key returned by token.maxKeyBound. For instance, if remainder is [DK(10, 'foo'), DK(20, 'bar')],
             * and we have 3 nodes with tokens 0, 15, 30, we want to split remainder to A=[DK(10, 'foo'), 15] and
             * B=(15, DK(20, 'bar')]. But since we can't mix tokens and keys at the same time in a range, we use
             * 15.maxKeyBound() to have A include all keys having 15 as token and B include none of those (since that is
             * what our node owns).
             */
        Token upperBoundToken = ringIter.next();
        PartitionPosition upperBound = upperBoundToken.maxKeyBound();
        if (!remainder.left.equals(upperBound) && !remainder.contains(upperBound))
            // no more splits
            break;
        Pair<AbstractBounds<PartitionPosition>, AbstractBounds<PartitionPosition>> splits = remainder.split(upperBound);
        if (splits == null)
            continue;
        ranges.add(splits.left);
        remainder = splits.right;
    }
    ranges.add(remainder);
    return ranges;
}
Also used : AbstractBounds(org.apache.cassandra.dht.AbstractBounds) PartitionPosition(org.apache.cassandra.db.PartitionPosition) Bounds(org.apache.cassandra.dht.Bounds) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) ArrayList(java.util.ArrayList) Token(org.apache.cassandra.dht.Token) TokenMetadata(org.apache.cassandra.locator.TokenMetadata)

Example 2 with AbstractBounds

use of org.apache.cassandra.dht.AbstractBounds in project cassandra by apache.

the class SSTableExport method main.

/**
 * Given arguments specifying an SSTable, and optionally an output file, export the contents of the SSTable to JSON.
 *
 * @param args
 *            command lines arguments
 * @throws ConfigurationException
 *             on configuration failure (wrong params given)
 */
@SuppressWarnings("resource")
public static void main(String[] args) throws ConfigurationException {
    CommandLineParser parser = new PosixParser();
    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e1) {
        System.err.println(e1.getMessage());
        printUsage();
        System.exit(1);
    }
    String[] keys = cmd.getOptionValues(KEY_OPTION);
    HashSet<String> excludes = new HashSet<>(Arrays.asList(cmd.getOptionValues(EXCLUDE_KEY_OPTION) == null ? new String[0] : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
    if (cmd.getArgs().length != 1) {
        String msg = "You must supply exactly one sstable";
        if (cmd.getArgs().length == 0 && (keys != null && keys.length > 0 || !excludes.isEmpty()))
            msg += ", which should be before the -k/-x options so it's not interpreted as a partition key.";
        System.err.println(msg);
        printUsage();
        System.exit(1);
    }
    String ssTableFileName = new File(cmd.getArgs()[0]).absolutePath();
    if (!new File(ssTableFileName).exists()) {
        System.err.println("Cannot find file " + ssTableFileName);
        System.exit(1);
    }
    Descriptor desc = Descriptor.fromFilename(ssTableFileName);
    try {
        TableMetadata metadata = Util.metadataFromSSTable(desc);
        if (cmd.hasOption(ENUMERATE_KEYS_OPTION)) {
            try (KeyIterator iter = new KeyIterator(desc, metadata)) {
                JsonTransformer.keysToJson(null, Util.iterToStream(iter), cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
            }
        } else {
            SSTableReader sstable = SSTableReader.openNoValidation(desc, TableMetadataRef.forOfflineTools(metadata));
            IPartitioner partitioner = sstable.getPartitioner();
            final ISSTableScanner currentScanner;
            if ((keys != null) && (keys.length > 0)) {
                List<AbstractBounds<PartitionPosition>> bounds = Arrays.stream(keys).filter(key -> !excludes.contains(key)).map(metadata.partitionKeyType::fromString).map(partitioner::decorateKey).sorted().map(DecoratedKey::getToken).map(token -> new Bounds<>(token.minKeyBound(), token.maxKeyBound())).collect(Collectors.toList());
                currentScanner = sstable.getScanner(bounds.iterator());
            } else {
                currentScanner = sstable.getScanner();
            }
            Stream<UnfilteredRowIterator> partitions = Util.iterToStream(currentScanner).filter(i -> excludes.isEmpty() || !excludes.contains(metadata.partitionKeyType.getString(i.partitionKey().getKey())));
            if (cmd.hasOption(DEBUG_OUTPUT_OPTION)) {
                AtomicLong position = new AtomicLong();
                partitions.forEach(partition -> {
                    position.set(currentScanner.getCurrentPosition());
                    if (!partition.partitionLevelDeletion().isLive()) {
                        System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.partitionLevelDeletion());
                    }
                    if (!partition.staticRow().isEmpty()) {
                        System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.staticRow().toString(metadata, true));
                    }
                    partition.forEachRemaining(row -> {
                        System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + row.toString(metadata, false, true));
                        position.set(currentScanner.getCurrentPosition());
                    });
                });
            } else if (cmd.hasOption(PARTITION_JSON_LINES)) {
                JsonTransformer.toJsonLines(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
            } else {
                JsonTransformer.toJson(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
            }
        }
    } catch (IOException e) {
        e.printStackTrace(System.err);
    }
    System.exit(0);
}
Also used : ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) Arrays(java.util.Arrays) File(org.apache.cassandra.io.util.File) Options(org.apache.commons.cli.Options) HelpFormatter(org.apache.commons.cli.HelpFormatter) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) HashSet(java.util.HashSet) KeyIterator(org.apache.cassandra.io.sstable.KeyIterator) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) Descriptor(org.apache.cassandra.io.sstable.Descriptor) CommandLine(org.apache.commons.cli.CommandLine) PosixParser(org.apache.commons.cli.PosixParser) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) Option(org.apache.commons.cli.Option) FBUtilities(org.apache.cassandra.utils.FBUtilities) CommandLineParser(org.apache.commons.cli.CommandLineParser) ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) IOException(java.io.IOException) Bounds(org.apache.cassandra.dht.Bounds) Collectors(java.util.stream.Collectors) AtomicLong(java.util.concurrent.atomic.AtomicLong) List(java.util.List) Stream(java.util.stream.Stream) PartitionPosition(org.apache.cassandra.db.PartitionPosition) IPartitioner(org.apache.cassandra.dht.IPartitioner) ParseException(org.apache.commons.cli.ParseException) TableMetadataRef(org.apache.cassandra.schema.TableMetadataRef) TableMetadata(org.apache.cassandra.schema.TableMetadata) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) PosixParser(org.apache.commons.cli.PosixParser) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) CommandLineParser(org.apache.commons.cli.CommandLineParser) HashSet(java.util.HashSet) IPartitioner(org.apache.cassandra.dht.IPartitioner) TableMetadata(org.apache.cassandra.schema.TableMetadata) KeyIterator(org.apache.cassandra.io.sstable.KeyIterator) DecoratedKey(org.apache.cassandra.db.DecoratedKey) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) Bounds(org.apache.cassandra.dht.Bounds) IOException(java.io.IOException) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) ParseException(org.apache.commons.cli.ParseException) File(org.apache.cassandra.io.util.File)

Example 3 with AbstractBounds

use of org.apache.cassandra.dht.AbstractBounds in project eiger by wlloyd.

the class QueryProcessor method multiRangeSlice.

private static List<org.apache.cassandra.db.Row> multiRangeSlice(CFMetaData metadata, SelectStatement select, List<String> variables) throws TimedOutException, UnavailableException, InvalidRequestException {
    List<org.apache.cassandra.db.Row> rows;
    IPartitioner<?> p = StorageService.getPartitioner();
    AbstractType<?> keyType = Schema.instance.getCFMetaData(metadata.ksName, select.getColumnFamily()).getKeyValidator();
    ByteBuffer startKeyBytes = (select.getKeyStart() != null) ? select.getKeyStart().getByteBuffer(keyType, variables) : null;
    ByteBuffer finishKeyBytes = (select.getKeyFinish() != null) ? select.getKeyFinish().getByteBuffer(keyType, variables) : null;
    RowPosition startKey = RowPosition.forKey(startKeyBytes, p), finishKey = RowPosition.forKey(finishKeyBytes, p);
    if (startKey.compareTo(finishKey) > 0 && !finishKey.isMinimum(p)) {
        if (p instanceof RandomPartitioner)
            throw new InvalidRequestException("Start key sorts after end key. This is not allowed; you probably should not specify end key at all, under RandomPartitioner");
        else
            throw new InvalidRequestException("Start key must sort before (or equal to) finish key in your partitioner!");
    }
    AbstractBounds<RowPosition> bounds = new Bounds<RowPosition>(startKey, finishKey);
    // XXX: Our use of Thrift structs internally makes me Sad. :(
    SlicePredicate thriftSlicePredicate = slicePredicateFromSelect(select, metadata, variables);
    validateSlicePredicate(metadata, thriftSlicePredicate);
    List<IndexExpression> expressions = new ArrayList<IndexExpression>();
    for (Relation columnRelation : select.getColumnRelations()) {
        // Left and right side of relational expression encoded according to comparator/validator.
        ByteBuffer entity = columnRelation.getEntity().getByteBuffer(metadata.comparator, variables);
        ByteBuffer value = columnRelation.getValue().getByteBuffer(select.getValueValidator(metadata.ksName, entity), variables);
        expressions.add(new IndexExpression(entity, IndexOperator.valueOf(columnRelation.operator().toString()), value));
    }
    int limit = select.isKeyRange() && select.getKeyStart() != null ? select.getNumRecords() + 1 : select.getNumRecords();
    try {
        rows = StorageProxy.getRangeSlice(new RangeSliceCommand(metadata.ksName, select.getColumnFamily(), null, thriftSlicePredicate, bounds, expressions, limit), select.getConsistencyLevel());
    } catch (IOException e) {
        throw new RuntimeException(e);
    } catch (org.apache.cassandra.thrift.UnavailableException e) {
        throw new UnavailableException();
    } catch (TimeoutException e) {
        throw new TimedOutException();
    }
    // if start key was set and relation was "greater than"
    if (select.getKeyStart() != null && !select.includeStartKey() && !rows.isEmpty()) {
        if (rows.get(0).key.key.equals(startKeyBytes))
            rows.remove(0);
    }
    // if finish key was set and relation was "less than"
    if (select.getKeyFinish() != null && !select.includeFinishKey() && !rows.isEmpty()) {
        int lastIndex = rows.size() - 1;
        if (rows.get(lastIndex).key.key.equals(finishKeyBytes))
            rows.remove(lastIndex);
    }
    return rows.subList(0, select.getNumRecords() < rows.size() ? select.getNumRecords() : rows.size());
}
Also used : RandomPartitioner(org.apache.cassandra.dht.RandomPartitioner) org.apache.cassandra.thrift(org.apache.cassandra.thrift) TimeoutException(java.util.concurrent.TimeoutException) AbstractBounds(org.apache.cassandra.dht.AbstractBounds) Bounds(org.apache.cassandra.dht.Bounds) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer)

Aggregations

AbstractBounds (org.apache.cassandra.dht.AbstractBounds)3 Bounds (org.apache.cassandra.dht.Bounds)3 IOException (java.io.IOException)2 PartitionPosition (org.apache.cassandra.db.PartitionPosition)2 ByteBuffer (java.nio.ByteBuffer)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 HashSet (java.util.HashSet)1 List (java.util.List)1 TimeoutException (java.util.concurrent.TimeoutException)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 Collectors (java.util.stream.Collectors)1 Stream (java.util.stream.Stream)1 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)1 DecoratedKey (org.apache.cassandra.db.DecoratedKey)1 UnfilteredRowIterator (org.apache.cassandra.db.rows.UnfilteredRowIterator)1 IPartitioner (org.apache.cassandra.dht.IPartitioner)1 RandomPartitioner (org.apache.cassandra.dht.RandomPartitioner)1 Token (org.apache.cassandra.dht.Token)1 ConfigurationException (org.apache.cassandra.exceptions.ConfigurationException)1