Search in sources :

Example 71 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class SSTableExport method main.

/**
     * Given arguments specifying an SSTable, and optionally an output file, export the contents of the SSTable to JSON.
     *
     * @param args
     *            command lines arguments
     * @throws ConfigurationException
     *             on configuration failure (wrong params given)
     */
public static void main(String[] args) throws ConfigurationException {
    CommandLineParser parser = new PosixParser();
    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e1) {
        System.err.println(e1.getMessage());
        printUsage();
        System.exit(1);
    }
    if (cmd.getArgs().length != 1) {
        System.err.println("You must supply exactly one sstable");
        printUsage();
        System.exit(1);
    }
    String[] keys = cmd.getOptionValues(KEY_OPTION);
    HashSet<String> excludes = new HashSet<>(Arrays.asList(cmd.getOptionValues(EXCLUDE_KEY_OPTION) == null ? new String[0] : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
    String ssTableFileName = new File(cmd.getArgs()[0]).getAbsolutePath();
    if (!new File(ssTableFileName).exists()) {
        System.err.println("Cannot find file " + ssTableFileName);
        System.exit(1);
    }
    Descriptor desc = Descriptor.fromFilename(ssTableFileName);
    try {
        TableMetadata metadata = metadataFromSSTable(desc);
        if (cmd.hasOption(ENUMERATE_KEYS_OPTION)) {
            try (KeyIterator iter = new KeyIterator(desc, metadata)) {
                JsonTransformer.keysToJson(null, iterToStream(iter), cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
            }
        } else {
            SSTableReader sstable = SSTableReader.openNoValidation(desc, TableMetadataRef.forOfflineTools(metadata));
            IPartitioner partitioner = sstable.getPartitioner();
            final ISSTableScanner currentScanner;
            if ((keys != null) && (keys.length > 0)) {
                List<AbstractBounds<PartitionPosition>> bounds = Arrays.stream(keys).filter(key -> !excludes.contains(key)).map(metadata.partitionKeyType::fromString).map(partitioner::decorateKey).sorted().map(DecoratedKey::getToken).map(token -> new Bounds<>(token.minKeyBound(), token.maxKeyBound())).collect(Collectors.toList());
                currentScanner = sstable.getScanner(bounds.iterator());
            } else {
                currentScanner = sstable.getScanner();
            }
            Stream<UnfilteredRowIterator> partitions = iterToStream(currentScanner).filter(i -> excludes.isEmpty() || !excludes.contains(metadata.partitionKeyType.getString(i.partitionKey().getKey())));
            if (cmd.hasOption(DEBUG_OUTPUT_OPTION)) {
                AtomicLong position = new AtomicLong();
                partitions.forEach(partition -> {
                    position.set(currentScanner.getCurrentPosition());
                    if (!partition.partitionLevelDeletion().isLive()) {
                        System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.partitionLevelDeletion());
                    }
                    if (!partition.staticRow().isEmpty()) {
                        System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.staticRow().toString(metadata, true));
                    }
                    partition.forEachRemaining(row -> {
                        System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + row.toString(metadata, false, true));
                        position.set(currentScanner.getCurrentPosition());
                    });
                });
            } else {
                JsonTransformer.toJson(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
            }
        }
    } catch (IOException e) {
        // throwing exception outside main with broken pipe causes windows cmd to hang
        e.printStackTrace(System.err);
    }
    System.exit(0);
}
Also used : ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) java.util(java.util) org.apache.commons.cli(org.apache.commons.cli) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) KeyIterator(org.apache.cassandra.io.sstable.KeyIterator) UTF8Type(org.apache.cassandra.db.marshal.UTF8Type) DecoratedKey(org.apache.cassandra.db.DecoratedKey) ConfigurationException(org.apache.cassandra.exceptions.ConfigurationException) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) MetadataComponent(org.apache.cassandra.io.sstable.metadata.MetadataComponent) Descriptor(org.apache.cassandra.io.sstable.Descriptor) StreamSupport(java.util.stream.StreamSupport) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) SerializationHeader(org.apache.cassandra.db.SerializationHeader) FBUtilities(org.apache.cassandra.utils.FBUtilities) MetadataType(org.apache.cassandra.io.sstable.metadata.MetadataType) ISSTableScanner(org.apache.cassandra.io.sstable.ISSTableScanner) IOException(java.io.IOException) org.apache.cassandra.dht(org.apache.cassandra.dht) Collectors(java.util.stream.Collectors) File(java.io.File) AtomicLong(java.util.concurrent.atomic.AtomicLong) Stream(java.util.stream.Stream) PartitionPosition(org.apache.cassandra.db.PartitionPosition) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) TableMetadataRef(org.apache.cassandra.schema.TableMetadataRef) TableMetadata(org.apache.cassandra.schema.TableMetadata) UnfilteredRowIterator(org.apache.cassandra.db.rows.UnfilteredRowIterator) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) TableMetadata(org.apache.cassandra.schema.TableMetadata) KeyIterator(org.apache.cassandra.io.sstable.KeyIterator) DecoratedKey(org.apache.cassandra.db.DecoratedKey) IOException(java.io.IOException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Descriptor(org.apache.cassandra.io.sstable.Descriptor) DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) File(java.io.File)

Example 72 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class StandaloneSSTableUtil method main.

public static void main(String[] args) {
    Options options = Options.parseArgs(args);
    try {
        // load keyspace descriptions.
        Util.initDatabaseDescriptor();
        Schema.instance.loadFromDisk(false);
        TableMetadata metadata = Schema.instance.getTableMetadata(options.keyspaceName, options.cfName);
        if (metadata == null)
            throw new IllegalArgumentException(String.format("Unknown keyspace/table %s.%s", options.keyspaceName, options.cfName));
        OutputHandler handler = new OutputHandler.SystemOutput(options.verbose, options.debug);
        if (options.cleanup) {
            handler.output("Cleanuping up...");
            LifecycleTransaction.removeUnfinishedLeftovers(metadata);
        } else {
            handler.output("Listing files...");
            listFiles(options, metadata, handler);
        }
        System.exit(0);
    } catch (Exception e) {
        System.err.println(e.getMessage());
        if (options.debug)
            e.printStackTrace(System.err);
        System.exit(1);
    }
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) CmdLineOptions(org.apache.cassandra.tools.BulkLoader.CmdLineOptions) OutputHandler(org.apache.cassandra.utils.OutputHandler) IOException(java.io.IOException)

Example 73 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class StorageService method getNaturalEndpoints.

/**
     * This method returns the N endpoints that are responsible for storing the
     * specified key i.e for replication.
     *
     * @param keyspaceName keyspace name also known as keyspace
     * @param cf Column family name
     * @param key key for which we need to find the endpoint
     * @return the endpoint responsible for this key
     */
public List<InetAddress> getNaturalEndpoints(String keyspaceName, String cf, String key) {
    KeyspaceMetadata ksMetaData = Schema.instance.getKeyspaceMetadata(keyspaceName);
    if (ksMetaData == null)
        throw new IllegalArgumentException("Unknown keyspace '" + keyspaceName + "'");
    TableMetadata metadata = ksMetaData.getTableOrViewNullable(cf);
    if (metadata == null)
        throw new IllegalArgumentException("Unknown table '" + cf + "' in keyspace '" + keyspaceName + "'");
    return getNaturalEndpoints(keyspaceName, tokenMetadata.partitioner.getToken(metadata.partitionKeyType.fromString(key)));
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) KeyspaceMetadata(org.apache.cassandra.schema.KeyspaceMetadata)

Example 74 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class ByteOrderedPartitioner method describeOwnership.

public Map<Token, Float> describeOwnership(List<Token> sortedTokens) {
    // allTokens will contain the count and be returned, sorted_ranges is shorthand for token<->token math.
    Map<Token, Float> allTokens = new HashMap<Token, Float>();
    List<Range<Token>> sortedRanges = new ArrayList<Range<Token>>(sortedTokens.size());
    // this initializes the counts to 0 and calcs the ranges in order.
    Token lastToken = sortedTokens.get(sortedTokens.size() - 1);
    for (Token node : sortedTokens) {
        allTokens.put(node, new Float(0.0));
        sortedRanges.add(new Range<Token>(lastToken, node));
        lastToken = node;
    }
    for (String ks : Schema.instance.getKeyspaces()) {
        for (TableMetadata cfmd : Schema.instance.getTablesAndViews(ks)) {
            for (Range<Token> r : sortedRanges) {
                // Looping over every KS:CF:Range, get the splits size and add it to the count
                allTokens.put(r.right, allTokens.get(r.right) + StorageService.instance.getSplits(ks, cfmd.name, r, 1).size());
            }
        }
    }
    // Sum every count up and divide count/total for the fractional ownership.
    Float total = new Float(0.0);
    for (Float f : allTokens.values()) total += f;
    for (Map.Entry<Token, Float> row : allTokens.entrySet()) allTokens.put(row.getKey(), row.getValue() / total);
    return allTokens;
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashMap(java.util.HashMap) Map(java.util.Map)

Example 75 with TableMetadata

use of org.apache.cassandra.schema.TableMetadata in project cassandra by apache.

the class AbstractBTreePartition method build.

protected static Holder build(UnfilteredRowIterator iterator, int initialRowCapacity, boolean ordered) {
    TableMetadata metadata = iterator.metadata();
    RegularAndStaticColumns columns = iterator.columns();
    boolean reversed = iterator.isReverseOrder();
    BTree.Builder<Row> builder = BTree.builder(metadata.comparator, initialRowCapacity);
    builder.auto(!ordered);
    MutableDeletionInfo.Builder deletionBuilder = MutableDeletionInfo.builder(iterator.partitionLevelDeletion(), metadata.comparator, reversed);
    while (iterator.hasNext()) {
        Unfiltered unfiltered = iterator.next();
        if (unfiltered.kind() == Unfiltered.Kind.ROW)
            builder.add((Row) unfiltered);
        else
            deletionBuilder.add((RangeTombstoneMarker) unfiltered);
    }
    if (reversed)
        builder.reverse();
    return new Holder(columns, builder.build(), deletionBuilder.build(), iterator.staticRow(), iterator.stats());
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) BTree(org.apache.cassandra.utils.btree.BTree)

Aggregations

TableMetadata (org.apache.cassandra.schema.TableMetadata)129 Test (org.junit.Test)63 ByteBuffer (java.nio.ByteBuffer)29 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)17 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)13 File (java.io.File)10 PartitionUpdate (org.apache.cassandra.db.partitions.PartitionUpdate)10 Mutation (org.apache.cassandra.db.Mutation)8 InvalidRequestException (org.apache.cassandra.exceptions.InvalidRequestException)8 KeyspaceMetadata (org.apache.cassandra.schema.KeyspaceMetadata)8 Descriptor (org.apache.cassandra.io.sstable.Descriptor)7 IndexMetadata (org.apache.cassandra.schema.IndexMetadata)6 IOException (java.io.IOException)5 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)5 IndexTarget (org.apache.cassandra.cql3.statements.IndexTarget)5 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)5 ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)4 UntypedResultSet (org.apache.cassandra.cql3.UntypedResultSet)4 AbstractType (org.apache.cassandra.db.marshal.AbstractType)4 ConfigurationException (org.apache.cassandra.exceptions.ConfigurationException)4