Search in sources :

Example 1 with ColumnParent

use of org.apache.cassandra.thrift.ColumnParent in project scale7-pelops by s7.

the class Selector method newColumnParent.

private static ColumnParent newColumnParent(String columnFamily, Bytes superColName) {
    ColumnParent parent = new ColumnParent(columnFamily);
    parent.setSuper_column(nullSafeGet(superColName));
    return parent;
}
Also used : ColumnParent(org.apache.cassandra.thrift.ColumnParent)

Example 2 with ColumnParent

use of org.apache.cassandra.thrift.ColumnParent in project scale7-pelops by s7.

the class Selector method getSuperColumnsFromRowsUtf8Keys.

/**
     * Retrieve super columns from a set of rows.
     * @param columnFamily                  The column family containing the rows
     * @param rowKeys                        The keys of the rows containing the super columns
     * @param colPredicate                   The super column selector predicate
     * @param cLevel                         The Cassandra consistency level with which to perform the operation
     * @return                               A map from row keys to the matching lists of super columns
     * @throws PelopsException if an error occurs
     */
public LinkedHashMap<String, List<SuperColumn>> getSuperColumnsFromRowsUtf8Keys(String columnFamily, List<String> rowKeys, SlicePredicate colPredicate, ConsistencyLevel cLevel) throws PelopsException {
    ColumnParent columnParent = newColumnParent(columnFamily);
    List<ByteBuffer> keys = Bytes.transformUTF8ToList(validateRowKeysUtf8(rowKeys));
    return transformUtf8(getColumnOrSuperColumnsFromRows(columnParent, keys, colPredicate, cLevel), rowKeys, keys, SUPER_COLUMN);
}
Also used : ColumnParent(org.apache.cassandra.thrift.ColumnParent) ByteBuffer(java.nio.ByteBuffer) Bytes.fromByteBuffer(org.scale7.cassandra.pelops.Bytes.fromByteBuffer)

Example 3 with ColumnParent

use of org.apache.cassandra.thrift.ColumnParent in project scale7-pelops by s7.

the class Selector method getSuperColumnsFromRows.

/**
     * Retrieve super columns from a set of rows.
     * @param columnFamily                  The column family containing the rows
     * @param rowKeys                        The keys of the rows containing the super columns
     * @param colPredicate                   The super column selector predicate
     * @param cLevel                         The Cassandra consistency level with which to perform the operation
     * @return                               A map from row keys to the matching lists of super columns
     * @throws PelopsException if an error occurs
     */
public LinkedHashMap<Bytes, List<SuperColumn>> getSuperColumnsFromRows(String columnFamily, List<Bytes> rowKeys, SlicePredicate colPredicate, ConsistencyLevel cLevel) throws PelopsException {
    ColumnParent columnParent = newColumnParent(columnFamily);
    List<ByteBuffer> keys = Bytes.transformBytesToList(validateRowKeys(rowKeys));
    return transform(getColumnOrSuperColumnsFromRows(columnParent, keys, colPredicate, cLevel), rowKeys, SUPER_COLUMN);
}
Also used : ColumnParent(org.apache.cassandra.thrift.ColumnParent) ByteBuffer(java.nio.ByteBuffer) Bytes.fromByteBuffer(org.scale7.cassandra.pelops.Bytes.fromByteBuffer)

Example 4 with ColumnParent

use of org.apache.cassandra.thrift.ColumnParent in project eiger by wlloyd.

the class KeyCacheTest method testKeyCache.

@Test
public void testKeyCache() throws IOException, ExecutionException, InterruptedException {
    CompactionManager.instance.disableAutoCompaction();
    Table table = Table.open(TABLE1);
    ColumnFamilyStore cfs = table.getColumnFamilyStore(COLUMN_FAMILY1);
    // just to make sure that everything is clean
    CacheService.instance.invalidateKeyCache();
    // KeyCache should start at size 0 if we're caching X% of zero data.
    int keyCacheSize = CacheService.instance.keyCache.size();
    assert keyCacheSize == 0 : keyCacheSize;
    DecoratedKey key1 = Util.dk("key1");
    DecoratedKey key2 = Util.dk("key2");
    RowMutation rm;
    // inserts
    rm = new RowMutation(TABLE1, key1.key);
    rm.add(new QueryPath(COLUMN_FAMILY1, null, ByteBufferUtil.bytes("1")), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
    rm.apply();
    rm = new RowMutation(TABLE1, key2.key);
    rm.add(new QueryPath(COLUMN_FAMILY1, null, ByteBufferUtil.bytes("2")), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
    rm.apply();
    // to make sure we have SSTable
    cfs.forceBlockingFlush();
    // reads to cache key position
    cfs.getColumnFamily(QueryFilter.getSliceFilter(key1, new QueryPath(new ColumnParent(COLUMN_FAMILY1)), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 10));
    cfs.getColumnFamily(QueryFilter.getSliceFilter(key2, new QueryPath(new ColumnParent(COLUMN_FAMILY1)), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 10));
    assert CacheService.instance.keyCache.size() == 2;
    Util.compactAll(cfs).get();
    keyCacheSize = CacheService.instance.keyCache.size();
    // new SSTables, if we had 2 keys in cache previously it should become 4
    assert keyCacheSize == 4 : keyCacheSize;
    // re-read same keys to verify that key cache didn't grow further
    cfs.getColumnFamily(QueryFilter.getSliceFilter(key1, new QueryPath(new ColumnParent(COLUMN_FAMILY1)), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 10));
    cfs.getColumnFamily(QueryFilter.getSliceFilter(key2, new QueryPath(new ColumnParent(COLUMN_FAMILY1)), ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 10));
    assert CacheService.instance.keyCache.size() == 4;
}
Also used : QueryPath(org.apache.cassandra.db.filter.QueryPath) ColumnParent(org.apache.cassandra.thrift.ColumnParent) Test(org.junit.Test)

Aggregations

ColumnParent (org.apache.cassandra.thrift.ColumnParent)4 ByteBuffer (java.nio.ByteBuffer)2 Bytes.fromByteBuffer (org.scale7.cassandra.pelops.Bytes.fromByteBuffer)2 QueryPath (org.apache.cassandra.db.filter.QueryPath)1 Test (org.junit.Test)1