Search in sources :

Example 1 with RowCacheKey

use of org.apache.cassandra.cache.RowCacheKey in project cassandra by apache.

the class RowCacheTest method testRowCacheRange.

@Test
public void testRowCacheRange() {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
    String cf = "CachedIntCF";
    ColumnFamilyStore cachedStore = keyspace.getColumnFamilyStore(cf);
    long startRowCacheHits = cachedStore.metric.rowCacheHit.getCount();
    long startRowCacheOutOfRange = cachedStore.metric.rowCacheHitOutOfRange.getCount();
    // empty the row cache
    CacheService.instance.invalidateRowCache();
    // set global row cache size to 1 MiB
    CacheService.instance.setRowCacheCapacityInMB(1);
    ByteBuffer key = ByteBufferUtil.bytes("rowcachekey");
    DecoratedKey dk = cachedStore.decorateKey(key);
    RowCacheKey rck = new RowCacheKey(cachedStore.metadata(), dk);
    String[] values = new String[200];
    for (int i = 0; i < 200; i++) {
        RowUpdateBuilder rub = new RowUpdateBuilder(cachedStore.metadata(), System.currentTimeMillis(), key);
        rub.clustering(String.valueOf(i));
        values[i] = "val" + i;
        rub.add("val", ByteBufferUtil.bytes(values[i]));
        rub.build().applyUnsafe();
    }
    Arrays.sort(values);
    // populate row cache, we should not get a row cache hit;
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(10).build());
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    // do another query, limit is 20, which is < 100 that we cache, we should get a hit and it should be in range
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(10).build());
    assertEquals(++startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());
    // get a slice from 95 to 105, 95->99 are in cache, we should not get a hit and then row cache is out of range
    Util.getAll(Util.cmd(cachedStore, dk).fromIncl(String.valueOf(210)).toExcl(String.valueOf(215)).build());
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(++startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());
    // get a slice with limit > 100, we should get a hit out of range.
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(101).build());
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(++startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());
    CacheService.instance.invalidateRowCache();
    // try to populate row cache with a limit > rows to cache, we should still populate row cache;
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(105).build());
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    // validate the stuff in cache;
    CachedPartition cachedCf = (CachedPartition) CacheService.instance.rowCache.get(rck);
    assertEquals(cachedCf.rowCount(), 100);
    int i = 0;
    for (Unfiltered unfiltered : Util.once(cachedCf.unfilteredIterator(ColumnFilter.selection(cachedCf.columns()), Slices.ALL, false))) {
        Row r = (Row) unfiltered;
        ValueAccessors.assertDataEquals(r.clustering().get(0), ByteBufferUtil.bytes(values[i].substring(3)));
        for (ColumnData c : r) {
            assertEquals(((Cell<?>) c).buffer(), ByteBufferUtil.bytes(values[i]));
        }
        i++;
    }
    cachedStore.truncateBlocking();
}
Also used : CachedPartition(org.apache.cassandra.db.partitions.CachedPartition) ByteBuffer(java.nio.ByteBuffer) RowCacheKey(org.apache.cassandra.cache.RowCacheKey) Test(org.junit.Test)

Example 2 with RowCacheKey

use of org.apache.cassandra.cache.RowCacheKey in project cassandra by apache.

the class ImportTest method testImportInvalidateCache.

@Test
public void testImportInvalidateCache() throws Throwable {
    createTable("create table %s (id int primary key, d int) WITH caching = { 'keys': 'NONE', 'rows_per_partition': 'ALL' }");
    for (int i = 0; i < 10; i++) execute("insert into %s (id, d) values (?, ?)", i, i);
    getCurrentColumnFamilyStore().forceBlockingFlush();
    CacheService.instance.setRowCacheCapacityInMB(1);
    Set<RowCacheKey> keysToInvalidate = new HashSet<>();
    // populate the row cache with keys from the sstable we are about to remove
    for (int i = 0; i < 10; i++) {
        execute("SELECT * FROM %s WHERE id = ?", i);
    }
    Iterator<RowCacheKey> it = CacheService.instance.rowCache.keyIterator();
    while (it.hasNext()) {
        keysToInvalidate.add(it.next());
    }
    SSTableReader sstableToImport = getCurrentColumnFamilyStore().getLiveSSTables().iterator().next();
    getCurrentColumnFamilyStore().clearUnsafe();
    for (int i = 10; i < 20; i++) execute("insert into %s (id, d) values (?, ?)", i, i);
    getCurrentColumnFamilyStore().forceBlockingFlush();
    Set<RowCacheKey> allCachedKeys = new HashSet<>();
    // populate row cache with sstable we are keeping
    for (int i = 10; i < 20; i++) {
        execute("SELECT * FROM %s WHERE id = ?", i);
    }
    it = CacheService.instance.rowCache.keyIterator();
    while (it.hasNext()) {
        allCachedKeys.add(it.next());
    }
    assertEquals(20, CacheService.instance.rowCache.size());
    File backupdir = moveToBackupDir(Collections.singleton(sstableToImport));
    // make sure we don't wipe caches with invalidateCaches = false:
    Set<SSTableReader> beforeFirstImport = getCurrentColumnFamilyStore().getLiveSSTables();
    SSTableImporter.Options options = SSTableImporter.Options.options(backupdir.toString()).verifySSTables(true).verifyTokens(true).build();
    SSTableImporter importer = new SSTableImporter(getCurrentColumnFamilyStore());
    importer.importNewSSTables(options);
    assertEquals(20, CacheService.instance.rowCache.size());
    Set<SSTableReader> toMove = Sets.difference(getCurrentColumnFamilyStore().getLiveSSTables(), beforeFirstImport);
    getCurrentColumnFamilyStore().clearUnsafe();
    // move away the sstable we just imported again:
    backupdir = moveToBackupDir(toMove);
    beforeFirstImport.forEach(s -> s.selfRef().release());
    options = SSTableImporter.Options.options(backupdir.toString()).verifySSTables(true).verifyTokens(true).invalidateCaches(true).build();
    importer.importNewSSTables(options);
    assertEquals(10, CacheService.instance.rowCache.size());
    it = CacheService.instance.rowCache.keyIterator();
    while (it.hasNext()) {
        // make sure the keys from the sstable we are importing are invalidated and that the other one is still there
        RowCacheKey rck = it.next();
        assertTrue(allCachedKeys.contains(rck));
        assertFalse(keysToInvalidate.contains(rck));
    }
}
Also used : SSTableReader(org.apache.cassandra.io.sstable.format.SSTableReader) RowCacheKey(org.apache.cassandra.cache.RowCacheKey) File(org.apache.cassandra.io.util.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 3 with RowCacheKey

use of org.apache.cassandra.cache.RowCacheKey in project eiger by wlloyd.

the class StatusLogger method log.

public static void log() {
    MBeanServer server = ManagementFactory.getPlatformMBeanServer();
    // everything from o.a.c.concurrent
    logger.info(String.format("%-25s%10s%10s%10s", "Pool Name", "Active", "Pending", "Blocked"));
    Set<ObjectName> request, internal;
    try {
        request = server.queryNames(new ObjectName("org.apache.cassandra.request:type=*"), null);
        internal = server.queryNames(new ObjectName("org.apache.cassandra.internal:type=*"), null);
    } catch (MalformedObjectNameException e) {
        throw new RuntimeException(e);
    }
    for (ObjectName objectName : Iterables.concat(request, internal)) {
        String poolName = objectName.getKeyProperty("type");
        JMXEnabledThreadPoolExecutorMBean threadPoolProxy = JMX.newMBeanProxy(server, objectName, JMXEnabledThreadPoolExecutorMBean.class);
        logger.info(String.format("%-25s%10s%10s%10s", poolName, threadPoolProxy.getActiveCount(), threadPoolProxy.getPendingTasks(), threadPoolProxy.getCurrentlyBlockedTasks()));
    }
    // one offs
    logger.info(String.format("%-25s%10s%10s", "CompactionManager", "n/a", CompactionManager.instance.getPendingTasks()));
    int pendingCommands = 0;
    for (int n : MessagingService.instance().getCommandPendingTasks().values()) {
        pendingCommands += n;
    }
    int pendingResponses = 0;
    for (int n : MessagingService.instance().getResponsePendingTasks().values()) {
        pendingResponses += n;
    }
    logger.info(String.format("%-25s%10s%10s", "MessagingService", "n/a", pendingCommands + "," + pendingResponses));
    // Global key/row cache information
    AutoSavingCache<KeyCacheKey, Long> keyCache = CacheService.instance.keyCache;
    AutoSavingCache<RowCacheKey, ColumnFamily> rowCache = CacheService.instance.rowCache;
    int keyCacheKeysToSave = DatabaseDescriptor.getKeyCacheKeysToSave();
    int rowCacheKeysToSave = DatabaseDescriptor.getRowCacheKeysToSave();
    logger.info(String.format("%-25s%10s%25s%25s%65s", "Cache Type", "Size", "Capacity", "KeysToSave", "Provider"));
    logger.info(String.format("%-25s%10s%25s%25s%65s", "KeyCache", keyCache.weightedSize(), keyCache.getCapacity(), keyCacheKeysToSave == Integer.MAX_VALUE ? "all" : keyCacheKeysToSave, ""));
    logger.info(String.format("%-25s%10s%25s%25s%65s", "RowCache", rowCache.weightedSize(), rowCache.getCapacity(), rowCacheKeysToSave == Integer.MAX_VALUE ? "all" : rowCacheKeysToSave, DatabaseDescriptor.getRowCacheProvider().getClass().getName()));
    // per-CF stats
    logger.info(String.format("%-25s%20s", "ColumnFamily", "Memtable ops,data"));
    for (ColumnFamilyStore cfs : ColumnFamilyStore.all()) {
        logger.info(String.format("%-25s%20s", cfs.table.name + "." + cfs.columnFamily, cfs.getMemtableColumnsCount() + "," + cfs.getMemtableDataSize()));
    }
}
Also used : MalformedObjectNameException(javax.management.MalformedObjectNameException) JMXEnabledThreadPoolExecutorMBean(org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutorMBean) KeyCacheKey(org.apache.cassandra.cache.KeyCacheKey) ObjectName(javax.management.ObjectName) ColumnFamily(org.apache.cassandra.db.ColumnFamily) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) RowCacheKey(org.apache.cassandra.cache.RowCacheKey) MBeanServer(javax.management.MBeanServer)

Example 4 with RowCacheKey

use of org.apache.cassandra.cache.RowCacheKey in project cassandra by apache.

the class RowCacheTest method getBounds.

private ArrayList<Bounds<Token>> getBounds(int nElements) {
    ColumnFamilyStore store = Keyspace.open(KEYSPACE_CACHED).getColumnFamilyStore(CF_CACHED);
    TreeSet<DecoratedKey> orderedKeys = new TreeSet<>();
    for (Iterator<RowCacheKey> it = CacheService.instance.rowCache.keyIterator(); it.hasNext(); ) orderedKeys.add(store.decorateKey(ByteBuffer.wrap(it.next().key)));
    ArrayList<Bounds<Token>> boundsToInvalidate = new ArrayList<>();
    Iterator<DecoratedKey> iterator = orderedKeys.iterator();
    while (iterator.hasNext()) {
        Token startRange = iterator.next().getToken();
        for (int i = 0; i < nElements - 2; i++) iterator.next();
        Token endRange = iterator.next().getToken();
        boundsToInvalidate.add(new Bounds<>(startRange, endRange));
    }
    return boundsToInvalidate;
}
Also used : Bounds(org.apache.cassandra.dht.Bounds) ArrayList(java.util.ArrayList) BytesToken(org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken) Token(org.apache.cassandra.dht.Token) TreeSet(java.util.TreeSet) RowCacheKey(org.apache.cassandra.cache.RowCacheKey)

Example 5 with RowCacheKey

use of org.apache.cassandra.cache.RowCacheKey in project cassandra by apache.

the class RowCacheTest method testRoundTrip.

@Test
public void testRoundTrip() throws Exception {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
    String cf = "CachedIntCF";
    ColumnFamilyStore cachedStore = keyspace.getColumnFamilyStore(cf);
    long startRowCacheHits = cachedStore.metric.rowCacheHit.getCount();
    long startRowCacheOutOfRange = cachedStore.metric.rowCacheHitOutOfRange.getCount();
    // empty the row cache
    CacheService.instance.invalidateRowCache();
    // set global row cache size to 1 MiB
    CacheService.instance.setRowCacheCapacityInMB(1);
    ByteBuffer key = ByteBufferUtil.bytes("rowcachekey");
    DecoratedKey dk = cachedStore.decorateKey(key);
    RowCacheKey rck = new RowCacheKey(cachedStore.metadata(), dk);
    RowUpdateBuilder rub = new RowUpdateBuilder(cachedStore.metadata(), System.currentTimeMillis(), key);
    rub.clustering(String.valueOf(0));
    rub.add("val", ByteBufferUtil.bytes("val" + 0));
    rub.build().applyUnsafe();
    // populate row cache, we should not get a row cache hit;
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(1).build());
    assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    // do another query, limit is 20, which is < 100 that we cache, we should get a hit and it should be in range
    Util.getAll(Util.cmd(cachedStore, dk).withLimit(1).build());
    assertEquals(++startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
    assertEquals(startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());
    CachedPartition cachedCf = (CachedPartition) CacheService.instance.rowCache.get(rck);
    assertEquals(1, cachedCf.rowCount());
    for (Unfiltered unfiltered : Util.once(cachedCf.unfilteredIterator(ColumnFilter.selection(cachedCf.columns()), Slices.ALL, false))) {
        Row r = (Row) unfiltered;
        for (ColumnData c : r) {
            assertEquals(((Cell<?>) c).buffer(), ByteBufferUtil.bytes("val" + 0));
        }
    }
    cachedStore.truncateBlocking();
}
Also used : CachedPartition(org.apache.cassandra.db.partitions.CachedPartition) RowCacheKey(org.apache.cassandra.cache.RowCacheKey) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Aggregations

RowCacheKey (org.apache.cassandra.cache.RowCacheKey)6 Test (org.junit.Test)3 ByteBuffer (java.nio.ByteBuffer)2 CachedPartition (org.apache.cassandra.db.partitions.CachedPartition)2 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 TreeSet (java.util.TreeSet)1 MBeanServer (javax.management.MBeanServer)1 MalformedObjectNameException (javax.management.MalformedObjectNameException)1 ObjectName (javax.management.ObjectName)1 IRowCacheEntry (org.apache.cassandra.cache.IRowCacheEntry)1 KeyCacheKey (org.apache.cassandra.cache.KeyCacheKey)1 RowCacheSentinel (org.apache.cassandra.cache.RowCacheSentinel)1 JMXEnabledThreadPoolExecutorMBean (org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutorMBean)1 ColumnFamily (org.apache.cassandra.db.ColumnFamily)1 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)1 Bounds (org.apache.cassandra.dht.Bounds)1 BytesToken (org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken)1 Token (org.apache.cassandra.dht.Token)1 SSTableReader (org.apache.cassandra.io.sstable.format.SSTableReader)1