Search in sources :

Example 1 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class ColumnFamilyStore method createColumnFamilyStore.

/** This is only directly used by offline tools */
public static synchronized ColumnFamilyStore createColumnFamilyStore(Keyspace keyspace, String columnFamily, TableMetadataRef metadata, Directories directories, boolean loadSSTables, boolean registerBookkeeping, boolean offline) {
    // get the max generation number, to prevent generation conflicts
    Directories.SSTableLister lister = directories.sstableLister(Directories.OnTxnErr.IGNORE).includeBackups(true);
    List<Integer> generations = new ArrayList<Integer>();
    for (Map.Entry<Descriptor, Set<Component>> entry : lister.list().entrySet()) {
        Descriptor desc = entry.getKey();
        generations.add(desc.generation);
        if (!desc.isCompatible())
            throw new RuntimeException(String.format("Incompatible SSTable found. Current version %s is unable to read file: %s. Please run upgradesstables.", desc.getFormat().getLatestVersion(), desc));
    }
    Collections.sort(generations);
    int value = (generations.size() > 0) ? (generations.get(generations.size() - 1)) : 0;
    return new ColumnFamilyStore(keyspace, columnFamily, value, metadata, directories, loadSSTables, registerBookkeeping, offline);
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Descriptor(org.apache.cassandra.io.sstable.Descriptor)

Example 2 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class CompactionManager method forceUserDefinedCompaction.

public void forceUserDefinedCompaction(String dataFiles) {
    String[] filenames = dataFiles.split(",");
    Multimap<ColumnFamilyStore, Descriptor> descriptors = ArrayListMultimap.create();
    for (String filename : filenames) {
        // extract keyspace and columnfamily name from filename
        Descriptor desc = Descriptor.fromFilename(filename.trim());
        if (Schema.instance.getTableMetadataRef(desc) == null) {
            logger.warn("Schema does not exist for file {}. Skipping.", filename);
            continue;
        }
        // group by keyspace/columnfamily
        ColumnFamilyStore cfs = Keyspace.open(desc.ksname).getColumnFamilyStore(desc.cfname);
        descriptors.put(cfs, cfs.getDirectories().find(new File(filename.trim()).getName()));
    }
    List<Future<?>> futures = new ArrayList<>();
    int nowInSec = FBUtilities.nowInSeconds();
    for (ColumnFamilyStore cfs : descriptors.keySet()) futures.add(submitUserDefined(cfs, descriptors.get(cfs), getDefaultGcBefore(cfs, nowInSec)));
    FBUtilities.waitOnFutures(futures);
}
Also used : DatabaseDescriptor(org.apache.cassandra.config.DatabaseDescriptor) Descriptor(org.apache.cassandra.io.sstable.Descriptor) File(java.io.File)

Example 3 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class RangeAwareSSTableWriter method maybeSwitchWriter.

private void maybeSwitchWriter(DecoratedKey key) {
    if (boundaries == null)
        return;
    boolean switched = false;
    while (currentIndex < 0 || key.compareTo(boundaries.get(currentIndex)) > 0) {
        switched = true;
        currentIndex++;
    }
    if (switched) {
        if (currentWriter != null)
            finishedWriters.add(currentWriter);
        Descriptor desc = cfs.newSSTableDescriptor(cfs.getDirectories().getLocationForDisk(directories[currentIndex]), format);
        currentWriter = cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, pendingRepair, sstableLevel, header, txn);
    }
}
Also used : Descriptor(org.apache.cassandra.io.sstable.Descriptor)

Example 4 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class SSTableExpiredBlockers method main.

public static void main(String[] args) {
    PrintStream out = System.out;
    if (args.length < 2) {
        out.println("Usage: sstableexpiredblockers <keyspace> <table>");
        System.exit(1);
    }
    Util.initDatabaseDescriptor();
    String keyspace = args[args.length - 2];
    String columnfamily = args[args.length - 1];
    Schema.instance.loadFromDisk(false);
    TableMetadata metadata = Schema.instance.validateTable(keyspace, columnfamily);
    Keyspace ks = Keyspace.openWithoutSSTables(keyspace);
    ColumnFamilyStore cfs = ks.getColumnFamilyStore(columnfamily);
    Directories.SSTableLister lister = cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).skipTemporary(true);
    Set<SSTableReader> sstables = new HashSet<>();
    for (Map.Entry<Descriptor, Set<Component>> sstable : lister.list().entrySet()) {
        if (sstable.getKey() != null) {
            try {
                SSTableReader reader = SSTableReader.open(sstable.getKey());
                sstables.add(reader);
            } catch (Throwable t) {
                out.println("Couldn't open sstable: " + sstable.getKey().filenameFor(Component.DATA) + " (" + t.getMessage() + ")");
            }
        }
    }
    if (sstables.isEmpty()) {
        out.println("No sstables for " + keyspace + "." + columnfamily);
        System.exit(1);
    }
    int gcBefore = (int) (System.currentTimeMillis() / 1000) - metadata.params.gcGraceSeconds;
    Multimap<SSTableReader, SSTableReader> blockers = checkForExpiredSSTableBlockers(sstables, gcBefore);
    for (SSTableReader blocker : blockers.keySet()) {
        out.println(String.format("%s blocks %d expired sstables from getting dropped: %s%n", formatForExpiryTracing(Collections.singleton(blocker)), blockers.get(blocker).size(), formatForExpiryTracing(blockers.get(blocker))));
    }
    System.exit(0);
}
Also used : TableMetadata(org.apache.cassandra.schema.TableMetadata) PrintStream(java.io.PrintStream) Set(java.util.Set) HashSet(java.util.HashSet) Directories(org.apache.cassandra.db.Directories) SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Descriptor(org.apache.cassandra.io.sstable.Descriptor) Map(java.util.Map) HashSet(java.util.HashSet)

Example 5 with Descriptor

use of org.apache.cassandra.io.sstable.Descriptor in project cassandra by apache.

the class SSTableLevelResetter method main.

/**
     * @param args a list of sstables whose metadata we are changing
     */
public static void main(String[] args) {
    PrintStream out = System.out;
    if (args.length == 0) {
        out.println("This command should be run with Cassandra stopped!");
        out.println("Usage: sstablelevelreset <keyspace> <table>");
        System.exit(1);
    }
    if (!args[0].equals("--really-reset") || args.length != 3) {
        out.println("This command should be run with Cassandra stopped, otherwise you will get very strange behavior");
        out.println("Verify that Cassandra is not running and then execute the command like this:");
        out.println("Usage: sstablelevelreset --really-reset <keyspace> <table>");
        System.exit(1);
    }
    Util.initDatabaseDescriptor();
    // So we have to explicitly call System.exit.
    try {
        // load keyspace descriptions.
        Schema.instance.loadFromDisk(false);
        String keyspaceName = args[1];
        String columnfamily = args[2];
        // validate columnfamily
        if (Schema.instance.getTableMetadataRef(keyspaceName, columnfamily) == null) {
            System.err.println("ColumnFamily not found: " + keyspaceName + "/" + columnfamily);
            System.exit(1);
        }
        Keyspace keyspace = Keyspace.openWithoutSSTables(keyspaceName);
        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(columnfamily);
        boolean foundSSTable = false;
        for (Map.Entry<Descriptor, Set<Component>> sstable : cfs.getDirectories().sstableLister(Directories.OnTxnErr.THROW).list().entrySet()) {
            if (sstable.getValue().contains(Component.STATS)) {
                foundSSTable = true;
                Descriptor descriptor = sstable.getKey();
                StatsMetadata metadata = (StatsMetadata) descriptor.getMetadataSerializer().deserialize(descriptor, MetadataType.STATS);
                if (metadata.sstableLevel > 0) {
                    out.println("Changing level from " + metadata.sstableLevel + " to 0 on " + descriptor.filenameFor(Component.DATA));
                    descriptor.getMetadataSerializer().mutateLevel(descriptor, 0);
                } else {
                    out.println("Skipped " + descriptor.filenameFor(Component.DATA) + " since it is already on level 0");
                }
            }
        }
        if (!foundSSTable) {
            out.println("Found no sstables, did you give the correct keyspace/table?");
        }
    } catch (Throwable t) {
        JVMStabilityInspector.inspectThrowable(t);
        t.printStackTrace();
        System.exit(1);
    }
    System.exit(0);
}
Also used : StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) PrintStream(java.io.PrintStream) Set(java.util.Set) Keyspace(org.apache.cassandra.db.Keyspace) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) Descriptor(org.apache.cassandra.io.sstable.Descriptor) Map(java.util.Map)

Aggregations

Descriptor (org.apache.cassandra.io.sstable.Descriptor)38 File (java.io.File)24 DatabaseDescriptor (org.apache.cassandra.config.DatabaseDescriptor)15 Test (org.junit.Test)12 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)9 Component (org.apache.cassandra.io.sstable.Component)8 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)8 TableMetadata (org.apache.cassandra.schema.TableMetadata)6 Pair (org.apache.cassandra.utils.Pair)6 IOException (java.io.IOException)5 Map (java.util.Map)4 Directories (org.apache.cassandra.db.Directories)4 Keyspace (org.apache.cassandra.db.Keyspace)4 PrintStream (java.io.PrintStream)3 ByteBuffer (java.nio.ByteBuffer)3 Set (java.util.Set)3 DecoratedKey (org.apache.cassandra.db.DecoratedKey)3 SerializationHeader (org.apache.cassandra.db.SerializationHeader)3 NonBlockingHashMap (org.cliffc.high_scale_lib.NonBlockingHashMap)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2