Search in sources :

Example 16 with QueryFilter

use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.

the class DefsTable method loadFromStorage.

/** loads a version of keyspace definitions from storage */
public static synchronized Collection<KSMetaData> loadFromStorage(UUID version) throws IOException {
    DecoratedKey vkey = StorageService.getPartitioner().decorateKey(Migration.toUTF8Bytes(version));
    Table defs = Table.open(Table.SYSTEM_TABLE);
    ColumnFamilyStore cfStore = defs.getColumnFamilyStore(Migration.SCHEMA_CF);
    QueryFilter filter = QueryFilter.getIdentityFilter(vkey, new QueryPath(Migration.SCHEMA_CF));
    ColumnFamily cf = cfStore.getColumnFamily(filter);
    IColumn avroschema = cf.getColumn(DEFINITION_SCHEMA_COLUMN_NAME);
    if (avroschema == null)
        // TODO: more polite way to handle this?
        throw new RuntimeException("Cannot read system table! Are you upgrading a pre-release version?");
    ByteBuffer value = avroschema.value();
    Schema schema = Schema.parse(ByteBufferUtil.string(value));
    // deserialize keyspaces using schema
    Collection<KSMetaData> keyspaces = new ArrayList<KSMetaData>();
    for (IColumn column : cf.getSortedColumns()) {
        if (column.name().equals(DEFINITION_SCHEMA_COLUMN_NAME))
            continue;
        org.apache.cassandra.db.migration.avro.KsDef ks = SerDeUtils.deserialize(schema, column.value(), new org.apache.cassandra.db.migration.avro.KsDef());
        keyspaces.add(KSMetaData.fromAvro(ks));
    }
    return keyspaces;
}
Also used : Schema(org.apache.avro.Schema) ByteBuffer(java.nio.ByteBuffer) QueryPath(org.apache.cassandra.db.filter.QueryPath) QueryFilter(org.apache.cassandra.db.filter.QueryFilter) KSMetaData(org.apache.cassandra.config.KSMetaData)

Example 17 with QueryFilter

use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.

the class CollationController method collectTimeOrderedData.

/**
     * Collects data in order of recency, using the sstable maxtimestamp data.
     * Once we have data for all requests columns that is newer than the newest remaining maxtimestamp,
     * we stop.
     */
private ColumnFamily collectTimeOrderedData() {
    logger.debug("collectTimeOrderedData");
    ISortedColumns.Factory factory = mutableColumns ? AtomicSortedColumns.factory() : TreeMapBackedSortedColumns.factory();
    ColumnFamily container = ColumnFamily.create(cfs.metadata, factory, filter.filter.isReversed());
    List<IColumnIterator> iterators = new ArrayList<IColumnIterator>();
    ColumnFamilyStore.ViewFragment view = cfs.markReferenced(filter.key);
    try {
        for (Memtable memtable : view.memtables) {
            IColumnIterator iter = filter.getMemtableColumnIterator(memtable);
            if (iter != null) {
                iterators.add(iter);
                container.delete(iter.getColumnFamily());
                while (iter.hasNext()) container.addColumn(iter.next());
            }
        }
        // avoid changing the filter columns of the original filter
        // (reduceNameFilter removes columns that are known to be irrelevant)
        TreeSet<ByteBuffer> filterColumns = new TreeSet<ByteBuffer>(((NamesQueryFilter) filter.filter).columns);
        QueryFilter reducedFilter = new QueryFilter(filter.key, filter.path, new NamesQueryFilter(filterColumns));
        /* add the SSTables on disk */
        Collections.sort(view.sstables, SSTable.maxTimestampComparator);
        // read sorted sstables
        for (SSTableReader sstable : view.sstables) {
            long currentMaxTs = sstable.getMaxTimestamp();
            reduceNameFilter(reducedFilter, container, currentMaxTs);
            if (((NamesQueryFilter) reducedFilter.filter).columns.isEmpty())
                break;
            IColumnIterator iter = reducedFilter.getSSTableColumnIterator(sstable);
            iterators.add(iter);
            if (iter.getColumnFamily() != null) {
                container.delete(iter.getColumnFamily());
                sstablesIterated++;
                while (iter.hasNext()) container.addColumn(iter.next());
            }
        }
        // and "there used to be data, but it's gone now" (we should cache the empty CF so we don't need to rebuild that slower)
        if (iterators.isEmpty())
            return null;
        // do a final collate.  toCollate is boilerplate required to provide a CloseableIterator
        final ColumnFamily c2 = container;
        CloseableIterator<IColumn> toCollate = new SimpleAbstractColumnIterator() {

            final Iterator<IColumn> iter = c2.iterator();

            protected IColumn computeNext() {
                return iter.hasNext() ? iter.next() : endOfData();
            }

            public ColumnFamily getColumnFamily() {
                return c2;
            }

            public DecoratedKey getKey() {
                return filter.key;
            }
        };
        ColumnFamily returnCF = container.cloneMeShallow();
        filter.collateColumns(returnCF, Collections.singletonList(toCollate), gcBefore);
        // "hoist up" the requested data into a more recent sstable
        if (sstablesIterated > cfs.getMinimumCompactionThreshold() && !cfs.isCompactionDisabled() && cfs.getCompactionStrategy() instanceof SizeTieredCompactionStrategy) {
            RowMutation rm = new RowMutation(cfs.table.name, new Row(filter.key, returnCF.cloneMe()));
            try {
                // skipping commitlog and index updates is fine since we're just de-fragmenting existing data
                Table.open(rm.getTable()).apply(rm, false, false);
            } catch (IOException e) {
                // log and allow the result to be returned
                logger.error("Error re-writing read results", e);
            }
        }
        // Caller is responsible for final removeDeletedCF.  This is important for cacheRow to work correctly:
        return returnCF;
    } finally {
        for (IColumnIterator iter : iterators) FileUtils.closeQuietly(iter);
        SSTableReader.releaseReferences(view.sstables);
    }
}
Also used : IColumnIterator(org.apache.cassandra.db.columniterator.IColumnIterator) SimpleAbstractColumnIterator(org.apache.cassandra.db.columniterator.SimpleAbstractColumnIterator) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) NamesQueryFilter(org.apache.cassandra.db.filter.NamesQueryFilter) NamesQueryFilter(org.apache.cassandra.db.filter.NamesQueryFilter) QueryFilter(org.apache.cassandra.db.filter.QueryFilter) SSTableReader(org.apache.cassandra.io.sstable.SSTableReader) CloseableIterator(org.apache.cassandra.utils.CloseableIterator) SimpleAbstractColumnIterator(org.apache.cassandra.db.columniterator.SimpleAbstractColumnIterator) IColumnIterator(org.apache.cassandra.db.columniterator.IColumnIterator) SizeTieredCompactionStrategy(org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy)

Example 18 with QueryFilter

use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.

the class Migration method getLocalMigrations.

/** load serialized migrations. */
public static Collection<IColumn> getLocalMigrations(UUID start, UUID end) {
    DecoratedKey<?> dkey = StorageService.getPartitioner().decorateKey(MIGRATIONS_KEY);
    Table defs = Table.open(Table.SYSTEM_TABLE);
    ColumnFamilyStore cfStore = defs.getColumnFamilyStore(Migration.MIGRATIONS_CF);
    QueryFilter filter = QueryFilter.getSliceFilter(dkey, new QueryPath(MIGRATIONS_CF), ByteBuffer.wrap(UUIDGen.decompose(start)), ByteBuffer.wrap(UUIDGen.decompose(end)), false, 100);
    ColumnFamily cf = cfStore.getColumnFamily(filter);
    return cf.getSortedColumns();
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) QueryFilter(org.apache.cassandra.db.filter.QueryFilter)

Example 19 with QueryFilter

use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.

the class SystemTable method incrementAndGetGeneration.

public static int incrementAndGetGeneration() throws IOException {
    Table table = Table.open(Table.SYSTEM_TABLE);
    QueryFilter filter = QueryFilter.getNamesFilter(decorate(LOCATION_KEY), new QueryPath(STATUS_CF), GENERATION);
    ColumnFamily cf = table.getColumnFamilyStore(STATUS_CF).getColumnFamily(filter);
    int generation;
    if (cf == null) {
        // seconds-since-epoch isn't a foolproof new generation
        // (where foolproof is "guaranteed to be larger than the last one seen at this ip address"),
        // but it's as close as sanely possible
        generation = (int) (System.currentTimeMillis() / 1000);
    } else {
        // Other nodes will ignore gossip messages about a node that have a lower generation than previously seen.
        final int storedGeneration = ByteBufferUtil.toInt(cf.getColumn(GENERATION).value()) + 1;
        final int now = (int) (System.currentTimeMillis() / 1000);
        if (storedGeneration >= now) {
            logger.warn("Using stored Gossip Generation {} as it is greater than current system time {}.  See CASSANDRA-3654 if you experience problems", storedGeneration, now);
            generation = storedGeneration;
        } else {
            generation = now;
        }
    }
    RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, LOCATION_KEY);
    cf = ColumnFamily.create(Table.SYSTEM_TABLE, SystemTable.STATUS_CF);
    cf.addColumn(new Column(GENERATION, ByteBufferUtil.bytes(generation), LamportClock.getVersion()));
    rm.add(cf);
    rm.apply();
    forceBlockingFlush(STATUS_CF);
    return generation;
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) QueryFilter(org.apache.cassandra.db.filter.QueryFilter)

Example 20 with QueryFilter

use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.

the class SystemTable method getOldLocalNodeIds.

public static List<NodeId.NodeIdRecord> getOldLocalNodeIds() {
    List<NodeId.NodeIdRecord> l = new ArrayList<NodeId.NodeIdRecord>();
    Table table = Table.open(Table.SYSTEM_TABLE);
    QueryFilter filter = QueryFilter.getIdentityFilter(decorate(ALL_LOCAL_NODE_ID_KEY), new QueryPath(NODE_ID_CF));
    ColumnFamily cf = table.getColumnFamilyStore(NODE_ID_CF).getColumnFamily(filter);
    NodeId previous = null;
    for (IColumn c : cf) {
        if (previous != null)
            l.add(new NodeId.NodeIdRecord(previous, c.timestamp()));
        // this will ignore the last column on purpose since it is the
        // current local node id
        previous = NodeId.wrap(c.name());
    }
    return l;
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) QueryFilter(org.apache.cassandra.db.filter.QueryFilter) NodeId(org.apache.cassandra.utils.NodeId)

Aggregations

QueryFilter (org.apache.cassandra.db.filter.QueryFilter)26 QueryPath (org.apache.cassandra.db.filter.QueryPath)22 ByteBuffer (java.nio.ByteBuffer)7 SSTableReader (org.apache.cassandra.io.sstable.SSTableReader)7 ColumnFamily (org.apache.cassandra.db.ColumnFamily)6 File (java.io.File)5 IColumnIterator (org.apache.cassandra.db.columniterator.IColumnIterator)5 SSTableUtils.tempSSTableFile (org.apache.cassandra.io.sstable.SSTableUtils.tempSSTableFile)5 Test (org.junit.Test)5 IColumn (org.apache.cassandra.db.IColumn)4 IOException (java.io.IOException)3 UnknownHostException (java.net.UnknownHostException)3 CloseableIterator (org.apache.cassandra.utils.CloseableIterator)3 ArrayList (java.util.ArrayList)2 ConfigurationException (org.apache.cassandra.config.ConfigurationException)2 DeletedColumn (org.apache.cassandra.db.DeletedColumn)2 ExpiringColumn (org.apache.cassandra.db.ExpiringColumn)2 NamesQueryFilter (org.apache.cassandra.db.filter.NamesQueryFilter)2 SliceQueryFilter (org.apache.cassandra.db.filter.SliceQueryFilter)2 Closeable (java.io.Closeable)1