Search in sources :

Example 26 with LRUStatistics

use of org.apache.geode.internal.cache.lru.LRUStatistics in project geode by apache.

the class DiskRegionDUnitTest method testCacheEvents.

/**
   * Tests cache listeners in an overflow region are invoked and that their events are reasonable.
   */
@Test
public void testCacheEvents() throws Exception {
    final String name = this.getUniqueName();
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.LOCAL);
    factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
    File d = new File("DiskRegions" + OSProcess.getId());
    d.mkdirs();
    TestCacheListener listener = new TestCacheListener() {

        public void afterCreate2(EntryEvent event) {
        }
    };
    factory.addCacheListener(listener);
    DiskStoreFactory dsf = getCache().createDiskStoreFactory();
    dsf.setDiskDirs(new File[] { d });
    factory.setDiskSynchronous(true);
    DiskStore ds = dsf.create(name);
    factory.setDiskStoreName(ds.getName());
    Region region = createRegion(name, factory.create());
    // DiskRegion dr = ((LocalRegion) region).getDiskRegion();
    // DiskRegionStats diskStats = dr.getStats();
    LRUStatistics lruStats = getLRUStats(region);
    int total;
    for (total = 0; lruStats.getEvictions() < 20; total++) {
        region.put(new Integer(total), String.valueOf(total));
        assertEquals(String.valueOf(total), region.get(new Integer(total)));
    }
    assertTrue(listener.wasInvoked());
    listener = new TestCacheListener() {

        public void close2() {
        }
    };
    region.getAttributesMutator().setCacheListener(listener);
    for (int i = 0; i < total; i++) {
        String value = (String) region.get(new Integer(i));
        assertNotNull(value);
        assertEquals(String.valueOf(i), value);
    }
    assertFalse(listener.wasInvoked());
    listener = new TestCacheListener() {

        public void afterUpdate2(EntryEvent event) {
            Integer key = (Integer) event.getKey();
            assertEquals(null, event.getOldValue());
            assertEquals(false, event.isOldValueAvailable());
            byte[] value = (byte[]) event.getNewValue();
            assertEquals(key.intValue(), value.length);
        }
    };
    region.getAttributesMutator().setCacheListener(listener);
    for (int i = 0; i < 20; i++) {
        region.put(new Integer(i), new byte[i]);
    }
    assertTrue(listener.wasInvoked());
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) EntryEvent(org.apache.geode.cache.EntryEvent) LRUStatistics(org.apache.geode.internal.cache.lru.LRUStatistics) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 27 with LRUStatistics

use of org.apache.geode.internal.cache.lru.LRUStatistics in project geode by apache.

the class DiskRegionDUnitTest method testLRUCCSizeOne.

/**
   * Tests a disk-based region with an {@link LRUCapacityController} with size 1 and an eviction
   * action of "overflow".
   */
@Test
public void testLRUCCSizeOne() throws CacheException {
    int threshold = 1;
    final String name = this.getUniqueName();
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.LOCAL);
    factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(threshold, EvictionAction.OVERFLOW_TO_DISK));
    factory.setCacheLoader(new CacheLoader() {

        public Object load(LoaderHelper helper) throws CacheLoaderException {
            return "LOADED VALUE";
        }

        public void close() {
        }
    });
    DiskStoreFactory dsf = getCache().createDiskStoreFactory();
    factory.setDiskSynchronous(true);
    File d = new File("DiskRegions" + OSProcess.getId());
    d.mkdirs();
    dsf.setDiskDirs(new File[] { d });
    DiskStore ds = dsf.create(name);
    factory.setDiskStoreName(ds.getName());
    Region region = createRegion(name, factory.create());
    LRUStatistics lruStats = getLRUStats(region);
    assertNotNull(lruStats);
    for (int i = 1; i <= 1; i++) {
        Object key = new Integer(i);
        Object value = String.valueOf(i);
        region.put(key, value);
        assertEquals(1, lruStats.getCounter());
        assertEquals(0, lruStats.getEvictions());
    }
    for (int i = 2; i <= 10; i++) {
        Object key = new Integer(i);
        Object value = String.valueOf(i);
        region.put(key, value);
        assertEquals(1, lruStats.getCounter());
        assertEquals(i - 1, lruStats.getEvictions());
    }
    for (int i = 11; i <= 20; i++) {
        Object key = new Integer(i);
        // Object value = String.valueOf(i);
        // Invoke loader
        region.get(key);
        assertEquals(1, lruStats.getCounter());
        assertEquals(i - 1, lruStats.getEvictions());
    }
}
Also used : DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) LoaderHelper(org.apache.geode.cache.LoaderHelper) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) LRUStatistics(org.apache.geode.internal.cache.lru.LRUStatistics) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) CacheLoader(org.apache.geode.cache.CacheLoader) File(java.io.File) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 28 with LRUStatistics

use of org.apache.geode.internal.cache.lru.LRUStatistics in project geode by apache.

the class DiskRegionDUnitTest method testOverflowMirror.

/**
   * Tests overflow with mirrored regions. Note that we have to use <code>byte</code> array values
   * in this test. Otherwise, the size of the data in the "puter" VM would be different from the
   * size of the data in the receiver VM, thus cause the two VMs to have different LRU eviction
   * behavior.
   */
@Test
public void testOverflowMirror() throws Exception {
    final String name = this.getUniqueName();
    SerializableRunnable create = new CacheSerializableRunnable("Create region") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.DISTRIBUTED_ACK);
            factory.setEarlyAck(false);
            factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
            factory.setDataPolicy(DataPolicy.REPLICATE);
            File d = new File("DiskRegions" + OSProcess.getId());
            d.mkdirs();
            DiskStoreFactory dsf = getCache().createDiskStoreFactory();
            dsf.setDiskDirs(new File[] { d });
            factory.setDiskSynchronous(true);
            DiskStore ds = dsf.create(name);
            factory.setDiskStoreName(ds.getName());
            createRegion(name, factory.create());
        }
    };
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    vm0.invoke(create);
    vm1.invoke(create);
    vm0.invoke(new CacheSerializableRunnable("Fill Region") {

        public void run2() throws CacheException {
            LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
            // DiskRegion dr = region.getDiskRegion();
            LRUStatistics lruStats = getLRUStats(region);
            for (int i = 0; lruStats.getEvictions() < 10; i++) {
                LogWriterUtils.getLogWriter().info("Put " + i);
                region.put(new Integer(i), new byte[1]);
            }
            assertEquals(10, lruStats.getEvictions());
        }
    });
    vm1.invoke(new CacheSerializableRunnable("Verify overflow") {

        public void run2() throws CacheException {
            LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
            // DiskRegion dr = region.getDiskRegion();
            LRUStatistics lruStats = getLRUStats(region);
            assertEquals(10, lruStats.getEvictions());
            // entries.
            for (int i = 0; i < 10; i++) {
                region.get(new Integer(i));
                assertEquals("No eviction for " + i, 10 + 1 + i, lruStats.getEvictions());
            }
        }
    });
}
Also used : CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) LocalRegion(org.apache.geode.internal.cache.LocalRegion) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) VM(org.apache.geode.test.dunit.VM) LRUStatistics(org.apache.geode.internal.cache.lru.LRUStatistics) File(java.io.File) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 29 with LRUStatistics

use of org.apache.geode.internal.cache.lru.LRUStatistics in project geode by apache.

the class DiskRegionDUnitTest method testDiskRegionOverflow.

//////// Test Methods
/**
   * Tests that data overflows correctly to a disk region
   */
@Test
public void testDiskRegionOverflow() throws Exception {
    final String name = this.getUniqueName();
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.LOCAL);
    factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
    File d = new File("DiskRegions" + OSProcess.getId());
    d.mkdirs();
    DiskStoreFactory dsf = getCache().createDiskStoreFactory();
    dsf.setDiskDirs(new File[] { d });
    factory.setDiskSynchronous(true);
    DiskStore ds = dsf.create(name);
    factory.setDiskStoreName(ds.getName());
    Region region = createRegion(name, factory.create());
    DiskRegion dr = ((LocalRegion) region).getDiskRegion();
    assertNotNull(dr);
    DiskRegionStats diskStats = dr.getStats();
    LRUStatistics lruStats = getLRUStats(region);
    assertNotNull(diskStats);
    assertNotNull(lruStats);
    flush(region);
    assertEquals(0, diskStats.getWrites());
    assertEquals(0, diskStats.getReads());
    assertEquals(0, lruStats.getEvictions());
    // Put in larger stuff until we start evicting
    int total;
    for (total = 0; lruStats.getEvictions() <= 0; total++) {
        // getLogWriter().info("DEBUG: total " + total + ", evictions " + lruStats.getEvictions());
        int[] array = new int[250];
        array[0] = total;
        region.put(new Integer(total), array);
    }
    flush(region);
    LogWriterUtils.getLogWriter().info("DEBUG: writes=" + diskStats.getWrites() + " reads=" + diskStats.getReads() + " evictions=" + lruStats.getEvictions() + " total=" + total + " numEntriesInVM=" + diskStats.getNumEntriesInVM() + " numOverflows=" + diskStats.getNumOverflowOnDisk());
    assertEquals(1, diskStats.getWrites());
    assertEquals(0, diskStats.getReads());
    assertEquals(1, lruStats.getEvictions());
    assertEquals(1, diskStats.getNumOverflowOnDisk());
    assertEquals(total - 1, diskStats.getNumEntriesInVM());
    Object value = region.get(new Integer(0));
    flush(region);
    assertNotNull(value);
    assertEquals(0, ((int[]) value)[0]);
    LogWriterUtils.getLogWriter().info("DEBUG: writes=" + diskStats.getWrites() + " reads=" + diskStats.getReads() + " evictions=" + lruStats.getEvictions() + " total=" + total + " numEntriesInVM=" + diskStats.getNumEntriesInVM() + " numOverflows=" + diskStats.getNumOverflowOnDisk());
    assertEquals(2, diskStats.getWrites());
    assertEquals(1, diskStats.getReads());
    assertEquals(2, lruStats.getEvictions());
    for (int i = 0; i < total; i++) {
        int[] array = (int[]) region.get(new Integer(i));
        assertNotNull(array);
        assertEquals(i, array[0]);
    }
}
Also used : DiskRegionStats(org.apache.geode.internal.cache.DiskRegionStats) LocalRegion(org.apache.geode.internal.cache.LocalRegion) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LRUStatistics(org.apache.geode.internal.cache.lru.LRUStatistics) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) File(java.io.File) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 30 with LRUStatistics

use of org.apache.geode.internal.cache.lru.LRUStatistics in project geode by apache.

the class DiskRegionDUnitTest method testOverflowUpdatedValue.

/**
   * Tests that the updated value gets overflowed
   */
@Test
public void testOverflowUpdatedValue() throws Exception {
    final String name = this.getUniqueName();
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.LOCAL);
    factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
    File d = new File("DiskRegions" + OSProcess.getId());
    d.mkdirs();
    DiskStoreFactory dsf = getCache().createDiskStoreFactory();
    dsf.setDiskDirs(new File[] { d });
    DiskStore ds = dsf.create(name);
    factory.setDiskStoreName(ds.getName());
    Region region = createRegion(name, factory.create());
    // DiskRegion dr = ((LocalRegion) region).getDiskRegion();
    // DiskRegionStats diskStats = dr.getStats();
    LRUStatistics lruStats = getLRUStats(region);
    // Put in larger stuff until we start evicting
    int total;
    for (total = 0; lruStats.getEvictions() <= 10; total++) {
        int[] array = new int[250];
        array[0] = total;
        region.put(new Integer(total), array);
    }
    // Update a value
    final Object newValue = "NEW VALUE";
    final Object key = new Integer(0);
    region.put(key, newValue);
    assertEquals(newValue, region.get(key));
    // overflowed
    for (int i = 1; i < total; i++) {
        region.get(new Integer(i));
    }
    // Make sure that the updated value got written to disk
    assertEquals(newValue, region.get(key));
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) LRUStatistics(org.apache.geode.internal.cache.lru.LRUStatistics) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Aggregations

LRUStatistics (org.apache.geode.internal.cache.lru.LRUStatistics)34 LocalRegion (org.apache.geode.internal.cache.LocalRegion)25 AttributesFactory (org.apache.geode.cache.AttributesFactory)24 Test (org.junit.Test)23 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)20 Region (org.apache.geode.cache.Region)18 File (java.io.File)16 DiskStore (org.apache.geode.cache.DiskStore)16 DiskStoreFactory (org.apache.geode.cache.DiskStoreFactory)16 DiskRegion (org.apache.geode.internal.cache.DiskRegion)16 Cache (org.apache.geode.cache.Cache)7 DistributedSystem (org.apache.geode.distributed.DistributedSystem)7 DiskRegionStats (org.apache.geode.internal.cache.DiskRegionStats)7 Properties (java.util.Properties)4 Iterator (java.util.Iterator)3 CacheException (org.apache.geode.cache.CacheException)3 CacheLoader (org.apache.geode.cache.CacheLoader)3 CacheLoaderException (org.apache.geode.cache.CacheLoaderException)3 LoaderHelper (org.apache.geode.cache.LoaderHelper)3 ObjectSizer (org.apache.geode.cache.util.ObjectSizer)3