Search in sources :

Example 1 with QueryFilter

use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.

the class ColumnFamilyStore method filter.

public List<Row> filter(AbstractScanIterator rowIterator, ExtendedFilter filter) {
    List<Row> rows = new ArrayList<Row>();
    int columnsCount = 0;
    try {
        while (rowIterator.hasNext() && rows.size() < filter.maxRows() && columnsCount < filter.maxColumns()) {
            // get the raw columns requested, and additional columns for the expressions if necessary
            Row rawRow = rowIterator.next();
            ColumnFamily data = rawRow.cf;
            // roughtly
            IFilter extraFilter = filter.getExtraFilter(data);
            if (extraFilter != null) {
                QueryPath path = new QueryPath(columnFamily);
                ColumnFamily cf = filter.cfs.getColumnFamily(new QueryFilter(rawRow.key, path, extraFilter));
                if (cf != null)
                    data.addAll(cf, HeapAllocator.instance);
            }
            if (!filter.isSatisfiedBy(data))
                continue;
            logger.debug("{} satisfies all filter expressions", data);
            // cut the resultset back to what was requested, if necessary
            data = filter.prune(data);
            rows.add(new Row(rawRow.key, data));
            if (data != null)
                columnsCount += data.getLiveColumnCount();
            // Update the underlying filter to avoid querying more columns per slice than necessary
            filter.updateColumnsLimit(columnsCount);
        }
        return rows;
    } finally {
        try {
            rowIterator.close();
        } catch (IOException e) {
            throw new IOError(e);
        }
    }
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) QueryFilter(org.apache.cassandra.db.filter.QueryFilter) IFilter(org.apache.cassandra.db.filter.IFilter)

Example 2 with QueryFilter

use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.

the class ColumnFamilyStore method getSequentialIterator.

/**
 * Iterate over a range of rows and columns from memtables/sstables.
 *
 * @param superColumn optional SuperColumn to slice subcolumns of; null to slice top-level columns
 * @param range Either a Bounds, which includes start key, or a Range, which does not.
 * @param columnFilter description of the columns we're interested in for each row
 */
public AbstractScanIterator getSequentialIterator(ByteBuffer superColumn, final AbstractBounds<RowPosition> range, IFilter columnFilter) {
    assert range instanceof Bounds || !((Range) range).isWrapAround() || range.right.isMinimum() : range;
    final RowPosition startWith = range.left;
    final RowPosition stopAt = range.right;
    QueryFilter filter = new QueryFilter(null, new QueryPath(columnFamily, superColumn, null), columnFilter);
    List<Row> rows;
    final ViewFragment view = markReferenced(startWith, stopAt);
    try {
        final CloseableIterator<Row> iterator = RowIteratorFactory.getIterator(view.memtables, view.sstables, startWith, stopAt, filter, this);
        final int gcBefore = (int) (System.currentTimeMillis() / 1000) - metadata.getGcGraceSeconds();
        return new AbstractScanIterator() {

            boolean first = true;

            protected Row computeNext() {
                // pull a row out of the iterator
                if (!iterator.hasNext())
                    return endOfData();
                Row current = iterator.next();
                DecoratedKey key = current.key;
                if (!stopAt.isMinimum() && stopAt.compareTo(key) < 0)
                    return endOfData();
                // skip first one
                if (range instanceof Bounds || !first || !key.equals(startWith)) {
                    if (logger.isDebugEnabled())
                        logger.debug("scanned " + key);
                    // their subcolumns for relevance, so we need to do a second prune post facto here.
                    return current.cf != null && current.cf.isSuper() ? new Row(current.key, removeDeleted(current.cf, gcBefore)) : current;
                }
                first = false;
                return computeNext();
            }

            public void close() throws IOException {
                SSTableReader.releaseReferences(view.sstables);
                try {
                    iterator.close();
                } catch (IOException e) {
                    throw new IOError(e);
                }
            }
        };
    } catch (RuntimeException e) {
        // In case getIterator() throws, otherwise the iteror close method releases the references.
        SSTableReader.releaseReferences(view.sstables);
        throw e;
    }
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) QueryFilter(org.apache.cassandra.db.filter.QueryFilter)

Example 3 with QueryFilter

use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.

the class HintedHandOffManager method deliverHintsToEndpointInternal.

private void deliverHintsToEndpointInternal(InetAddress endpoint) throws IOException, DigestMismatchException, InvalidRequestException, InterruptedException {
    ColumnFamilyStore hintStore = Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(HINTS_CF);
    if (hintStore.isEmpty())
        // nothing to do, don't confuse users by logging a no-op handoff
        return;
    logger_.debug("Checking remote({}) schema before delivering hints", endpoint);
    int waited;
    try {
        waited = waitForSchemaAgreement(endpoint);
    } catch (TimeoutException e) {
        return;
    }
    // (if we had to wait, then gossiper randomness took care of that for us already.)
    if (waited == 0) {
        // use a 'rounded' sleep interval because of a strange bug with windows: CASSANDRA-3375
        int sleep = FBUtilities.threadLocalRandom().nextInt(2000) * 30;
        logger_.debug("Sleeping {}ms to stagger hint delivery", sleep);
        Thread.sleep(sleep);
    }
    if (!FailureDetector.instance.isAlive(endpoint)) {
        logger_.info("Endpoint {} died before hint delivery, aborting", endpoint);
        return;
    }
    // 1. Get the key of the endpoint we need to handoff
    // 2. For each column, deserialize the mutation and send it to the endpoint
    // 3. Delete the subcolumn if the write was successful
    // 4. Force a flush
    // 5. Do major compaction to clean up all deletes etc.
    // find the hints for the node using its token.
    Token<?> token = StorageService.instance.getTokenMetadata().getToken(endpoint);
    logger_.info("Started hinted handoff for token: {} with IP: {}", token, endpoint);
    ByteBuffer tokenBytes = StorageService.getPartitioner().getTokenFactory().toByteArray(token);
    DecoratedKey<?> epkey = StorageService.getPartitioner().decorateKey(tokenBytes);
    int rowsReplayed = 0;
    ByteBuffer startColumn = ByteBufferUtil.EMPTY_BYTE_BUFFER;
    int pageSize = PAGE_SIZE;
    // read less columns (mutations) per page if they are very large
    if (hintStore.getMeanColumns() > 0) {
        int averageColumnSize = (int) (hintStore.getMeanRowSize() / hintStore.getMeanColumns());
        pageSize = Math.min(PAGE_SIZE, DatabaseDescriptor.getInMemoryCompactionLimit() / averageColumnSize);
        // page size of 1 does not allow actual paging b/c of >= behavior on startColumn
        pageSize = Math.max(2, pageSize);
        logger_.debug("average hinted-row column size is {}; using pageSize of {}", averageColumnSize, pageSize);
    }
    delivery: while (true) {
        QueryFilter filter = QueryFilter.getSliceFilter(epkey, new QueryPath(HINTS_CF), startColumn, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, pageSize);
        ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), Integer.MAX_VALUE);
        if (pagingFinished(hintsPage, startColumn))
            break;
        page: for (IColumn hint : hintsPage.getSortedColumns()) {
            startColumn = hint.name();
            for (IColumn subColumn : hint.getSubColumns()) {
                // both 0.8 and 1.0 column names are UTF8 strings, so this check is safe
                if (ByteBufferUtil.string(subColumn.name()).contains(SEPARATOR_08)) {
                    logger_.debug("0.8-style hint found.  This should have been taken care of by purgeIncompatibleHints");
                    deleteHint(tokenBytes, hint.name(), hint.maxTimestamp());
                    continue page;
                }
            }
            IColumn versionColumn = hint.getSubColumn(ByteBufferUtil.bytes("version"));
            IColumn tableColumn = hint.getSubColumn(ByteBufferUtil.bytes("table"));
            IColumn keyColumn = hint.getSubColumn(ByteBufferUtil.bytes("key"));
            IColumn mutationColumn = hint.getSubColumn(ByteBufferUtil.bytes("mutation"));
            assert versionColumn != null;
            assert tableColumn != null;
            assert keyColumn != null;
            assert mutationColumn != null;
            DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(mutationColumn.value()));
            RowMutation rm = RowMutation.serializer().deserialize(in, ByteBufferUtil.toInt(versionColumn.value()));
            try {
                sendMutation(endpoint, rm);
                deleteHint(tokenBytes, hint.name(), hint.maxTimestamp());
                rowsReplayed++;
            } catch (TimeoutException e) {
                logger_.info(String.format("Timed out replaying hints to %s; aborting further deliveries", endpoint));
                break delivery;
            }
        }
    }
    if (rowsReplayed > 0) {
        try {
            hintStore.forceBlockingFlush();
            CompactionManager.instance.submitMaximal(hintStore, Integer.MAX_VALUE).get();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
    logger_.info(String.format("Finished hinted handoff of %s rows to endpoint %s", rowsReplayed, endpoint));
}
Also used : DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) QueryPath(org.apache.cassandra.db.filter.QueryPath) NamesQueryFilter(org.apache.cassandra.db.filter.NamesQueryFilter) QueryFilter(org.apache.cassandra.db.filter.QueryFilter) TimeoutException(java.util.concurrent.TimeoutException)

Example 4 with QueryFilter

use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.

the class SSTableImportTest method testImportSimpleCf.

@Test
public void testImportSimpleCf() throws IOException, URISyntaxException {
    // Import JSON to temp SSTable file
    String jsonUrl = resourcePath("SimpleCF.json");
    File tempSS = tempSSTableFile("Keyspace1", "Standard1");
    SSTableImport.importJson(jsonUrl, "Keyspace1", "Standard1", tempSS.getPath());
    // Verify results
    SSTableReader reader = SSTableReader.open(Descriptor.fromFilename(tempSS.getPath()));
    QueryFilter qf = QueryFilter.getIdentityFilter(Util.dk("rowA"), new QueryPath("Standard1"));
    IColumnIterator iter = qf.getSSTableColumnIterator(reader);
    ColumnFamily cf = iter.getColumnFamily();
    while (iter.hasNext()) cf.addColumn(iter.next());
    assert cf.getColumn(ByteBufferUtil.bytes("colAA")).value().equals(hexToBytes("76616c4141"));
    assert !(cf.getColumn(ByteBufferUtil.bytes("colAA")) instanceof DeletedColumn);
    IColumn expCol = cf.getColumn(ByteBufferUtil.bytes("colAC"));
    assert expCol.value().equals(hexToBytes("76616c4143"));
    assert expCol instanceof ExpiringColumn;
    assert ((ExpiringColumn) expCol).getTimeToLive() == 42 && expCol.getLocalDeletionTime() == 2000000000;
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) SSTableReader(org.apache.cassandra.io.sstable.SSTableReader) QueryFilter(org.apache.cassandra.db.filter.QueryFilter) IColumn(org.apache.cassandra.db.IColumn) ExpiringColumn(org.apache.cassandra.db.ExpiringColumn) IColumnIterator(org.apache.cassandra.db.columniterator.IColumnIterator) DeletedColumn(org.apache.cassandra.db.DeletedColumn) File(java.io.File) SSTableUtils.tempSSTableFile(org.apache.cassandra.io.sstable.SSTableUtils.tempSSTableFile) ColumnFamily(org.apache.cassandra.db.ColumnFamily) Test(org.junit.Test)

Example 5 with QueryFilter

use of org.apache.cassandra.db.filter.QueryFilter in project eiger by wlloyd.

the class SystemTable method checkHealth.

/**
 * One of three things will happen if you try to read the system table:
 * 1. files are present and you can read them: great
 * 2. no files are there: great (new node is assumed)
 * 3. files are present but you can't read them: bad
 * @throws ConfigurationException
 */
public static void checkHealth() throws ConfigurationException, IOException {
    Table table = null;
    try {
        table = Table.open(Table.SYSTEM_TABLE);
    } catch (AssertionError err) {
        // this happens when a user switches from OPP to RP.
        ConfigurationException ex = new ConfigurationException("Could not read system table!");
        ex.initCause(err);
        throw ex;
    }
    SortedSet<ByteBuffer> cols = new TreeSet<ByteBuffer>(BytesType.instance);
    cols.add(CLUSTERNAME);
    QueryFilter filter = QueryFilter.getNamesFilter(decorate(LOCATION_KEY), new QueryPath(STATUS_CF), cols);
    ColumnFamily cf = table.getColumnFamilyStore(STATUS_CF).getColumnFamily(filter);
    if (cf == null) {
        // this is a brand new node
        ColumnFamilyStore cfs = table.getColumnFamilyStore(STATUS_CF);
        if (!cfs.getSSTables().isEmpty())
            throw new ConfigurationException("Found system table files, but they couldn't be loaded!");
        // no system files.  this is a new node.
        RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, LOCATION_KEY);
        cf = ColumnFamily.create(Table.SYSTEM_TABLE, SystemTable.STATUS_CF);
        cf.addColumn(new Column(CLUSTERNAME, ByteBufferUtil.bytes(DatabaseDescriptor.getClusterName()), LamportClock.getVersion()));
        rm.add(cf);
        rm.apply();
        return;
    }
    IColumn clusterCol = cf.getColumn(CLUSTERNAME);
    assert clusterCol != null;
    String savedClusterName = ByteBufferUtil.string(clusterCol.value());
    if (!DatabaseDescriptor.getClusterName().equals(savedClusterName))
        throw new ConfigurationException("Saved cluster name " + savedClusterName + " != configured name " + DatabaseDescriptor.getClusterName());
}
Also used : ByteBuffer(java.nio.ByteBuffer) QueryPath(org.apache.cassandra.db.filter.QueryPath) QueryFilter(org.apache.cassandra.db.filter.QueryFilter) ConfigurationException(org.apache.cassandra.config.ConfigurationException)

Aggregations

QueryFilter (org.apache.cassandra.db.filter.QueryFilter)26 QueryPath (org.apache.cassandra.db.filter.QueryPath)22 ByteBuffer (java.nio.ByteBuffer)7 SSTableReader (org.apache.cassandra.io.sstable.SSTableReader)7 ColumnFamily (org.apache.cassandra.db.ColumnFamily)6 File (java.io.File)5 IColumnIterator (org.apache.cassandra.db.columniterator.IColumnIterator)5 SSTableUtils.tempSSTableFile (org.apache.cassandra.io.sstable.SSTableUtils.tempSSTableFile)5 Test (org.junit.Test)5 IColumn (org.apache.cassandra.db.IColumn)4 IOException (java.io.IOException)3 UnknownHostException (java.net.UnknownHostException)3 CloseableIterator (org.apache.cassandra.utils.CloseableIterator)3 ArrayList (java.util.ArrayList)2 ConfigurationException (org.apache.cassandra.config.ConfigurationException)2 DeletedColumn (org.apache.cassandra.db.DeletedColumn)2 ExpiringColumn (org.apache.cassandra.db.ExpiringColumn)2 NamesQueryFilter (org.apache.cassandra.db.filter.NamesQueryFilter)2 SliceQueryFilter (org.apache.cassandra.db.filter.SliceQueryFilter)2 Closeable (java.io.Closeable)1