Search in sources :

Example 1 with CachedPartition

use of org.apache.cassandra.db.partitions.CachedPartition in project cassandra by apache.

the class RowCacheTest method testRowCacheRange.

@Test
public void testRowCacheRange() {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
    String cf = "CachedIntCF";
    ColumnFamilyStore cachedStore = keyspace.getColumnFamilyStore(cf);
    long startRowCacheHits = cachedStore.metric.rowCacheHit.getCount();
    long startRowCacheOutOfRange = cachedStore.metric.rowCacheHitOutOfRange.getCount();
    // empty the row cache
    CacheService.instance.invalidateRowCache();
    // set global row cache size to 1 MiB
    CacheService.instance.setRowCacheCapacityInMB(1);
    ByteBuffer key = ByteBufferUtil.bytes("rowcachekey");
    DecoratedKey dk = cachedStore.decorateKey(key);
    RowCacheKey rck = new RowCacheKey(cachedStore.metadata(), dk);
    String[] values = new String[200];
    for (int i = 0; i < 200; i++) {
        RowUpdateBuilder rub = new RowUpdateBuilder(cachedStore.metadata(), System.currentTimeMillis(), key);
        rub.clustering(String.valueOf(i));
        values[i] = "val" + i;
        rub.add("val", ByteBufferUtil.bytes(values[i]));
        rub.build().applyUnsafe();
    }
    Arrays.sort(values);
    // populate row cache, we should not get a row cache hit;
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(10).build());
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    // do another query, limit is 20, which is < 100 that we cache, we should get a hit and it should be in range
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(10).build());
    assertEquals(++startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());
    // get a slice from 95 to 105, 95->99 are in cache, we should not get a hit and then row cache is out of range
    Util.getAll(Util.cmd(cachedStore, dk).fromIncl(String.valueOf(210)).toExcl(String.valueOf(215)).build());
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(++startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());
    // get a slice with limit > 100, we should get a hit out of range.
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(101).build());
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(++startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());
    CacheService.instance.invalidateRowCache();
    // try to populate row cache with a limit > rows to cache, we should still populate row cache;
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(105).build());
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    // validate the stuff in cache;
    CachedPartition cachedCf = (CachedPartition) CacheService.instance.rowCache.get(rck);
    assertEquals(cachedCf.rowCount(), 100);
    int i = 0;
    for (Unfiltered unfiltered : Util.once(cachedCf.unfilteredIterator(ColumnFilter.selection(cachedCf.columns()), Slices.ALL, false))) {
        Row r = (Row) unfiltered;
        ValueAccessors.assertDataEquals(r.clustering().get(0), ByteBufferUtil.bytes(values[i].substring(3)));
        for (ColumnData c : r) {
            assertEquals(((Cell<?>) c).buffer(), ByteBufferUtil.bytes(values[i]));
        }
        i++;
    }
    cachedStore.truncateBlocking();
}
Also used : CachedPartition(org.apache.cassandra.db.partitions.CachedPartition) ByteBuffer(java.nio.ByteBuffer) RowCacheKey(org.apache.cassandra.cache.RowCacheKey) Test(org.junit.Test)

Example 2 with CachedPartition

use of org.apache.cassandra.db.partitions.CachedPartition in project cassandra by apache.

the class RowCacheTest method testRowCache.

@Test
public void testRowCache() throws Exception {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
    ColumnFamilyStore cachedStore = keyspace.getColumnFamilyStore(CF_CACHED);
    // empty the row cache
    CacheService.instance.invalidateRowCache();
    // set global row cache size to 1 MiB
    CacheService.instance.setRowCacheCapacityInMB(1);
    // inserting 100 rows into both column families
    SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHED, 0, 100);
    // now reading rows one by one and checking if row change grows
    for (int i = 0; i < 100; i++) {
        DecoratedKey key = Util.dk("key" + i);
        Util.getAll(Util.cmd(cachedStore, key).build());
        assert CacheService.instance.rowCache.size() == i + 1;
        // current key should be stored in the cache
        assert cachedStore.containsCachedParition(key);
        // checking if cell is read correctly after cache
        CachedPartition cp = cachedStore.getRawCachedPartition(key);
        try (UnfilteredRowIterator ai = cp.unfilteredIterator(ColumnFilter.selection(cp.columns()), Slices.ALL, false)) {
            assert ai.hasNext();
            Row r = (Row) ai.next();
            assertFalse(ai.hasNext());
            Iterator<Cell<?>> ci = r.cells().iterator();
            assert (ci.hasNext());
            Cell<?> cell = ci.next();
            assert cell.column().name.bytes.equals(ByteBufferUtil.bytes("val"));
            assert cell.buffer().equals(ByteBufferUtil.bytes("val" + i));
        }
    }
    // insert 10 more keys
    SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHED, 100, 10);
    for (int i = 100; i < 110; i++) {
        DecoratedKey key = Util.dk("key" + i);
        Util.getAll(Util.cmd(cachedStore, key).build());
        // cache should be populated with the latest rows read (old ones should be popped)
        assert cachedStore.containsCachedParition(key);
        // checking if cell is read correctly after cache
        CachedPartition cp = cachedStore.getRawCachedPartition(key);
        try (UnfilteredRowIterator ai = cp.unfilteredIterator(ColumnFilter.selection(cp.columns()), Slices.ALL, false)) {
            assert ai.hasNext();
            Row r = (Row) ai.next();
            assertFalse(ai.hasNext());
            Iterator<Cell<?>> ci = r.cells().iterator();
            assert (ci.hasNext());
            Cell<?> cell = ci.next();
            assert cell.column().name.bytes.equals(ByteBufferUtil.bytes("val"));
            assert cell.buffer().equals(ByteBufferUtil.bytes("val" + i));
        }
    }
    // clear 100 rows from the cache
    int keysLeft = 109;
    for (int i = 109; i >= 10; i--) {
        cachedStore.invalidateCachedPartition(Util.dk("key" + i));
        assert CacheService.instance.rowCache.size() == keysLeft;
        keysLeft--;
    }
    CacheService.instance.setRowCacheCapacityInMB(0);
}
Also used : CachedPartition(org.apache.cassandra.db.partitions.CachedPartition) Test(org.junit.Test)

Example 3 with CachedPartition

use of org.apache.cassandra.db.partitions.CachedPartition in project cassandra by apache.

the class RowCacheTest method testRoundTrip.

@Test
public void testRoundTrip() throws Exception {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
    String cf = "CachedIntCF";
    ColumnFamilyStore cachedStore = keyspace.getColumnFamilyStore(cf);
    long startRowCacheHits = cachedStore.metric.rowCacheHit.getCount();
    long startRowCacheOutOfRange = cachedStore.metric.rowCacheHitOutOfRange.getCount();
    // empty the row cache
    CacheService.instance.invalidateRowCache();
    // set global row cache size to 1 MiB
    CacheService.instance.setRowCacheCapacityInMB(1);
    ByteBuffer key = ByteBufferUtil.bytes("rowcachekey");
    DecoratedKey dk = cachedStore.decorateKey(key);
    RowCacheKey rck = new RowCacheKey(cachedStore.metadata(), dk);
    RowUpdateBuilder rub = new RowUpdateBuilder(cachedStore.metadata(), System.currentTimeMillis(), key);
    rub.clustering(String.valueOf(0));
    rub.add("val", ByteBufferUtil.bytes("val" + 0));
    rub.build().applyUnsafe();
    // populate row cache, we should not get a row cache hit;
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(1).build());
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    // do another query, limit is 20, which is < 100 that we cache, we should get a hit and it should be in range
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(1).build());
    assertEquals(++startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());
    CachedPartition cachedCf = (CachedPartition) CacheService.instance.rowCache.get(rck);
    assertEquals(1, cachedCf.rowCount());
    for (Unfiltered unfiltered : Util.once(cachedCf.unfilteredIterator(ColumnFilter.selection(cachedCf.columns()), Slices.ALL, false))) {
        Row r = (Row) unfiltered;
        for (ColumnData c : r) {
            assertEquals(((Cell<?>) c).buffer(), ByteBufferUtil.bytes("val" + 0));
        }
    }
    cachedStore.truncateBlocking();
}
Also used : CachedPartition(org.apache.cassandra.db.partitions.CachedPartition) RowCacheKey(org.apache.cassandra.cache.RowCacheKey) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 4 with CachedPartition

use of org.apache.cassandra.db.partitions.CachedPartition in project cassandra by apache.

the class RowCacheTest method testRowCacheNoClustering.

@Test
public void testRowCacheNoClustering() throws Exception {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
    ColumnFamilyStore cachedStore = keyspace.getColumnFamilyStore(CF_CACHEDNOCLUSTER);
    // empty the row cache
    CacheService.instance.invalidateRowCache();
    // set global row cache size to 1 MiB
    CacheService.instance.setRowCacheCapacityInMB(1);
    // inserting 100 rows into column family
    SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHEDNOCLUSTER, 0, 100);
    // now reading rows one by one and checking if row cache grows
    for (int i = 0; i < 100; i++) {
        DecoratedKey key = Util.dk("key" + i);
        Util.getAll(Util.cmd(cachedStore, key).build());
        assertEquals(CacheService.instance.rowCache.size(), i + 1);
        // current key should be stored in the cache
        assert (cachedStore.containsCachedParition(key));
    }
    // insert 10 more keys
    SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHEDNOCLUSTER, 100, 10);
    for (int i = 100; i < 110; i++) {
        DecoratedKey key = Util.dk("key" + i);
        Util.getAll(Util.cmd(cachedStore, key).build());
        // cache should be populated with the latest rows read (old ones should be popped)
        assert cachedStore.containsCachedParition(key);
        // checking if cell is read correctly after cache
        CachedPartition cp = cachedStore.getRawCachedPartition(key);
        try (UnfilteredRowIterator ai = cp.unfilteredIterator(ColumnFilter.selection(cp.columns()), Slices.ALL, false)) {
            assert ai.hasNext();
            Row r = (Row) ai.next();
            assertFalse(ai.hasNext());
            Iterator<Cell<?>> ci = r.cells().iterator();
            assert (ci.hasNext());
            Cell<?> cell = ci.next();
            assert cell.column().name.bytes.equals(ByteBufferUtil.bytes("val"));
            assert cell.buffer().equals(ByteBufferUtil.bytes("val" + i));
        }
    }
    // clear 100 rows from the cache
    int keysLeft = 109;
    for (int i = 109; i >= 10; i--) {
        cachedStore.invalidateCachedPartition(Util.dk("key" + i));
        assert CacheService.instance.rowCache.size() == keysLeft;
        keysLeft--;
    }
    CacheService.instance.setRowCacheCapacityInMB(0);
}
Also used : CachedPartition(org.apache.cassandra.db.partitions.CachedPartition) Test(org.junit.Test)

Aggregations

CachedPartition (org.apache.cassandra.db.partitions.CachedPartition)4 Test (org.junit.Test)4 ByteBuffer (java.nio.ByteBuffer)2 RowCacheKey (org.apache.cassandra.cache.RowCacheKey)2