Search in sources :

Example 1 with QueryPath

use of org.apache.cassandra.db.filter.QueryPath in project eiger by wlloyd.

the class ColumnFamilyStore method filter.

public List<Row> filter(AbstractScanIterator rowIterator, ExtendedFilter filter) {
    List<Row> rows = new ArrayList<Row>();
    int columnsCount = 0;
    try {
        while (rowIterator.hasNext() && rows.size() < filter.maxRows() && columnsCount < filter.maxColumns()) {
            // get the raw columns requested, and additional columns for the expressions if necessary
            Row rawRow = rowIterator.next();
            ColumnFamily data = rawRow.cf;
            // roughtly
            IFilter extraFilter = filter.getExtraFilter(data);
            if (extraFilter != null) {
                QueryPath path = new QueryPath(columnFamily);
                ColumnFamily cf = filter.cfs.getColumnFamily(new QueryFilter(rawRow.key, path, extraFilter));
                if (cf != null)
                    data.addAll(cf, HeapAllocator.instance);
            }
            if (!filter.isSatisfiedBy(data))
                continue;
            logger.debug("{} satisfies all filter expressions", data);
            // cut the resultset back to what was requested, if necessary
            data = filter.prune(data);
            rows.add(new Row(rawRow.key, data));
            if (data != null)
                columnsCount += data.getLiveColumnCount();
            // Update the underlying filter to avoid querying more columns per slice than necessary
            filter.updateColumnsLimit(columnsCount);
        }
        return rows;
    } finally {
        try {
            rowIterator.close();
        } catch (IOException e) {
            throw new IOError(e);
        }
    }
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) QueryFilter(org.apache.cassandra.db.filter.QueryFilter) IFilter(org.apache.cassandra.db.filter.IFilter)

Example 2 with QueryPath

use of org.apache.cassandra.db.filter.QueryPath in project eiger by wlloyd.

the class ColumnFamilyStore method getSequentialIterator.

/**
 * Iterate over a range of rows and columns from memtables/sstables.
 *
 * @param superColumn optional SuperColumn to slice subcolumns of; null to slice top-level columns
 * @param range Either a Bounds, which includes start key, or a Range, which does not.
 * @param columnFilter description of the columns we're interested in for each row
 */
public AbstractScanIterator getSequentialIterator(ByteBuffer superColumn, final AbstractBounds<RowPosition> range, IFilter columnFilter) {
    assert range instanceof Bounds || !((Range) range).isWrapAround() || range.right.isMinimum() : range;
    final RowPosition startWith = range.left;
    final RowPosition stopAt = range.right;
    QueryFilter filter = new QueryFilter(null, new QueryPath(columnFamily, superColumn, null), columnFilter);
    List<Row> rows;
    final ViewFragment view = markReferenced(startWith, stopAt);
    try {
        final CloseableIterator<Row> iterator = RowIteratorFactory.getIterator(view.memtables, view.sstables, startWith, stopAt, filter, this);
        final int gcBefore = (int) (System.currentTimeMillis() / 1000) - metadata.getGcGraceSeconds();
        return new AbstractScanIterator() {

            boolean first = true;

            protected Row computeNext() {
                // pull a row out of the iterator
                if (!iterator.hasNext())
                    return endOfData();
                Row current = iterator.next();
                DecoratedKey key = current.key;
                if (!stopAt.isMinimum() && stopAt.compareTo(key) < 0)
                    return endOfData();
                // skip first one
                if (range instanceof Bounds || !first || !key.equals(startWith)) {
                    if (logger.isDebugEnabled())
                        logger.debug("scanned " + key);
                    // their subcolumns for relevance, so we need to do a second prune post facto here.
                    return current.cf != null && current.cf.isSuper() ? new Row(current.key, removeDeleted(current.cf, gcBefore)) : current;
                }
                first = false;
                return computeNext();
            }

            public void close() throws IOException {
                SSTableReader.releaseReferences(view.sstables);
                try {
                    iterator.close();
                } catch (IOException e) {
                    throw new IOError(e);
                }
            }
        };
    } catch (RuntimeException e) {
        // In case getIterator() throws, otherwise the iteror close method releases the references.
        SSTableReader.releaseReferences(view.sstables);
        throw e;
    }
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) QueryFilter(org.apache.cassandra.db.filter.QueryFilter)

Example 3 with QueryPath

use of org.apache.cassandra.db.filter.QueryPath in project eiger by wlloyd.

the class HintedHandOffManager method deliverHintsToEndpointInternal.

private void deliverHintsToEndpointInternal(InetAddress endpoint) throws IOException, DigestMismatchException, InvalidRequestException, InterruptedException {
    ColumnFamilyStore hintStore = Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(HINTS_CF);
    if (hintStore.isEmpty())
        // nothing to do, don't confuse users by logging a no-op handoff
        return;
    logger_.debug("Checking remote({}) schema before delivering hints", endpoint);
    int waited;
    try {
        waited = waitForSchemaAgreement(endpoint);
    } catch (TimeoutException e) {
        return;
    }
    // (if we had to wait, then gossiper randomness took care of that for us already.)
    if (waited == 0) {
        // use a 'rounded' sleep interval because of a strange bug with windows: CASSANDRA-3375
        int sleep = FBUtilities.threadLocalRandom().nextInt(2000) * 30;
        logger_.debug("Sleeping {}ms to stagger hint delivery", sleep);
        Thread.sleep(sleep);
    }
    if (!FailureDetector.instance.isAlive(endpoint)) {
        logger_.info("Endpoint {} died before hint delivery, aborting", endpoint);
        return;
    }
    // 1. Get the key of the endpoint we need to handoff
    // 2. For each column, deserialize the mutation and send it to the endpoint
    // 3. Delete the subcolumn if the write was successful
    // 4. Force a flush
    // 5. Do major compaction to clean up all deletes etc.
    // find the hints for the node using its token.
    Token<?> token = StorageService.instance.getTokenMetadata().getToken(endpoint);
    logger_.info("Started hinted handoff for token: {} with IP: {}", token, endpoint);
    ByteBuffer tokenBytes = StorageService.getPartitioner().getTokenFactory().toByteArray(token);
    DecoratedKey<?> epkey = StorageService.getPartitioner().decorateKey(tokenBytes);
    int rowsReplayed = 0;
    ByteBuffer startColumn = ByteBufferUtil.EMPTY_BYTE_BUFFER;
    int pageSize = PAGE_SIZE;
    // read less columns (mutations) per page if they are very large
    if (hintStore.getMeanColumns() > 0) {
        int averageColumnSize = (int) (hintStore.getMeanRowSize() / hintStore.getMeanColumns());
        pageSize = Math.min(PAGE_SIZE, DatabaseDescriptor.getInMemoryCompactionLimit() / averageColumnSize);
        // page size of 1 does not allow actual paging b/c of >= behavior on startColumn
        pageSize = Math.max(2, pageSize);
        logger_.debug("average hinted-row column size is {}; using pageSize of {}", averageColumnSize, pageSize);
    }
    delivery: while (true) {
        QueryFilter filter = QueryFilter.getSliceFilter(epkey, new QueryPath(HINTS_CF), startColumn, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, pageSize);
        ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), Integer.MAX_VALUE);
        if (pagingFinished(hintsPage, startColumn))
            break;
        page: for (IColumn hint : hintsPage.getSortedColumns()) {
            startColumn = hint.name();
            for (IColumn subColumn : hint.getSubColumns()) {
                // both 0.8 and 1.0 column names are UTF8 strings, so this check is safe
                if (ByteBufferUtil.string(subColumn.name()).contains(SEPARATOR_08)) {
                    logger_.debug("0.8-style hint found.  This should have been taken care of by purgeIncompatibleHints");
                    deleteHint(tokenBytes, hint.name(), hint.maxTimestamp());
                    continue page;
                }
            }
            IColumn versionColumn = hint.getSubColumn(ByteBufferUtil.bytes("version"));
            IColumn tableColumn = hint.getSubColumn(ByteBufferUtil.bytes("table"));
            IColumn keyColumn = hint.getSubColumn(ByteBufferUtil.bytes("key"));
            IColumn mutationColumn = hint.getSubColumn(ByteBufferUtil.bytes("mutation"));
            assert versionColumn != null;
            assert tableColumn != null;
            assert keyColumn != null;
            assert mutationColumn != null;
            DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(mutationColumn.value()));
            RowMutation rm = RowMutation.serializer().deserialize(in, ByteBufferUtil.toInt(versionColumn.value()));
            try {
                sendMutation(endpoint, rm);
                deleteHint(tokenBytes, hint.name(), hint.maxTimestamp());
                rowsReplayed++;
            } catch (TimeoutException e) {
                logger_.info(String.format("Timed out replaying hints to %s; aborting further deliveries", endpoint));
                break delivery;
            }
        }
    }
    if (rowsReplayed > 0) {
        try {
            hintStore.forceBlockingFlush();
            CompactionManager.instance.submitMaximal(hintStore, Integer.MAX_VALUE).get();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
    logger_.info(String.format("Finished hinted handoff of %s rows to endpoint %s", rowsReplayed, endpoint));
}
Also used : DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) QueryPath(org.apache.cassandra.db.filter.QueryPath) NamesQueryFilter(org.apache.cassandra.db.filter.NamesQueryFilter) QueryFilter(org.apache.cassandra.db.filter.QueryFilter) TimeoutException(java.util.concurrent.TimeoutException)

Example 4 with QueryPath

use of org.apache.cassandra.db.filter.QueryPath in project eiger by wlloyd.

the class HintedHandOffManager method deleteHintsForEndpoint.

public void deleteHintsForEndpoint(final InetAddress endpoint) {
    if (!StorageService.instance.getTokenMetadata().isMember(endpoint))
        return;
    Token<?> token = StorageService.instance.getTokenMetadata().getToken(endpoint);
    ByteBuffer tokenBytes = StorageService.getPartitioner().getTokenFactory().toByteArray(token);
    final ColumnFamilyStore hintStore = Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(HINTS_CF);
    final RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, tokenBytes);
    rm.delete(new QueryPath(HINTS_CF), System.currentTimeMillis());
    // execute asynchronously to avoid blocking caller (which may be processing gossip)
    Runnable runnable = new Runnable() {

        public void run() {
            try {
                logger_.info("Deleting any stored hints for " + endpoint);
                rm.apply();
                hintStore.forceBlockingFlush();
                CompactionManager.instance.submitMaximal(hintStore, Integer.MAX_VALUE);
            } catch (Exception e) {
                logger_.warn("Could not delete hints for " + endpoint + ": " + e);
            }
        }
    };
    StorageService.optionalTasks.execute(runnable);
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) WrappedRunnable(org.apache.cassandra.utils.WrappedRunnable) ByteBuffer(java.nio.ByteBuffer) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException)

Example 5 with QueryPath

use of org.apache.cassandra.db.filter.QueryPath in project eiger by wlloyd.

the class SSTableReaderTest method testGetPositionsForRanges.

@Test
public void testGetPositionsForRanges() throws IOException, ExecutionException, InterruptedException {
    Table table = Table.open("Keyspace1");
    ColumnFamilyStore store = table.getColumnFamilyStore("Standard2");
    // insert data and compact to a single sstable
    CompactionManager.instance.disableAutoCompaction();
    for (int j = 0; j < 10; j++) {
        ByteBuffer key = ByteBufferUtil.bytes(String.valueOf(j));
        RowMutation rm = new RowMutation("Keyspace1", key);
        rm.add(new QueryPath("Standard2", null, ByteBufferUtil.bytes("0")), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
        rm.apply();
    }
    store.forceBlockingFlush();
    CompactionManager.instance.performMaximal(store);
    List<Range<Token>> ranges = new ArrayList<Range<Token>>();
    // 1 key
    ranges.add(new Range<Token>(t(0), t(1)));
    // 2 keys
    ranges.add(new Range<Token>(t(2), t(4)));
    // wrapping range from key to end
    ranges.add(new Range<Token>(t(6), StorageService.getPartitioner().getMinimumToken()));
    // empty range (should be ignored)
    ranges.add(new Range<Token>(t(9), t(91)));
    // confirm that positions increase continuously
    SSTableReader sstable = store.getSSTables().iterator().next();
    long previous = -1;
    for (Pair<Long, Long> section : sstable.getPositionsForRanges(ranges)) {
        assert previous <= section.left : previous + " ! < " + section.left;
        assert section.left < section.right : section.left + " ! < " + section.right;
        previous = section.right;
    }
}
Also used : ArrayList(java.util.ArrayList) Token(org.apache.cassandra.dht.Token) Range(org.apache.cassandra.dht.Range) ByteBuffer(java.nio.ByteBuffer) QueryPath(org.apache.cassandra.db.filter.QueryPath) Test(org.junit.Test)

Aggregations

QueryPath (org.apache.cassandra.db.filter.QueryPath)127 Test (org.junit.Test)67 ByteBuffer (java.nio.ByteBuffer)40 QueryFilter (org.apache.cassandra.db.filter.QueryFilter)22 ColumnFamily (org.apache.cassandra.db.ColumnFamily)14 RowMutation (org.apache.cassandra.db.RowMutation)12 File (java.io.File)10 SSTableReader (org.apache.cassandra.io.sstable.SSTableReader)10 IOException (java.io.IOException)8 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)8 DecoratedKey (org.apache.cassandra.db.DecoratedKey)8 Table (org.apache.cassandra.db.Table)8 SSTableUtils.tempSSTableFile (org.apache.cassandra.io.sstable.SSTableUtils.tempSSTableFile)8 WrappedRunnable (org.apache.cassandra.utils.WrappedRunnable)8 ArrayList (java.util.ArrayList)5 IColumn (org.apache.cassandra.db.IColumn)5 PrintStream (java.io.PrintStream)4 UnknownHostException (java.net.UnknownHostException)4 HashSet (java.util.HashSet)4 DropColumnFamily (org.apache.cassandra.db.migration.DropColumnFamily)4