Search in sources :

Example 31 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class DiskRegionDUnitTest method fillUpOverflowRegion.

/**
   * Tests that an {@link IllegalStateException} is thrown when the region is full of keys and
   * entries.
   */
public void fillUpOverflowRegion() throws Exception {
    final String name = this.getUniqueName();
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.LOCAL);
    factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
    File d = new File("DiskRegions" + OSProcess.getId());
    d.mkdirs();
    DiskStoreFactory dsf = getCache().createDiskStoreFactory();
    dsf.setDiskDirs(new File[] { d });
    DiskStore ds = dsf.create(name);
    factory.setDiskStoreName(ds.getName());
    Region region = createRegion(name, factory.create());
    for (int i = 0; i < 10000; i++) {
        int[] array = new int[1000];
        array[0] = i;
        try {
            region.put(array, new Integer(i));
        } catch (IllegalStateException ex) {
            String message = ex.getMessage();
            assertTrue(message.indexOf("is full with") != -1);
            return;
        }
    }
    fail("Should have thrown an IllegalStateException");
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory)

Example 32 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class DiskRegionDUnitTest method testDistributedInvalidate.

/**
   * Tests that invalidates and updates received from different VMs are handled appropriately by
   * overflow regions.
   */
@Test
public void testDistributedInvalidate() throws Exception {
    final String name = this.getUniqueName();
    SerializableRunnable create = new CacheSerializableRunnable("Create region") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.DISTRIBUTED_ACK);
            factory.setEarlyAck(false);
            factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
            File d = new File("DiskRegions" + OSProcess.getId());
            d.mkdirs();
            DiskStoreFactory dsf = getCache().createDiskStoreFactory();
            dsf.setDiskDirs(new File[] { d });
            DiskStore ds = dsf.create(name);
            factory.setDiskStoreName(ds.getName());
            createRegion(name, factory.create());
        }
    };
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    vm0.invoke(create);
    vm1.invoke(create);
    vm0.invoke(new CacheSerializableRunnable("Fill Region") {

        public void run2() throws CacheException {
            LocalRegion region = (LocalRegion) getRootRegion().getSubregion(name);
            // DiskRegion dr = region.getDiskRegion();
            LRUStatistics lruStats = getLRUStats(region);
            for (int i = 0; lruStats.getEvictions() < 10; i++) {
                LogWriterUtils.getLogWriter().info("Put " + i);
                region.put(new Integer(i), new byte[1]);
            }
            assertEquals(10, lruStats.getEvictions());
        }
    });
    final Object key = new Integer(20);
    vm1.invoke(new CacheSerializableRunnable("Invalidate entry") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            assertNotNull(region.get(key));
            region.invalidate(key);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Verify invalidate") {

        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            WaitCriterion ev = new WaitCriterion() {

                public boolean done() {
                    return region.get(key) == null;
                }

                public String description() {
                    return "value for key remains: " + key;
                }
            };
            Wait.waitForCriterion(ev, 500, 200, true);
        }
    });
    final String newValue = "NEW VALUE";
    vm1.invoke(new CacheSerializableRunnable("Update entry") {

        public void run2() throws CacheException {
            Region region = getRootRegion().getSubregion(name);
            region.put(key, newValue);
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Verify update") {

        public void run2() throws CacheException {
            final Region region = getRootRegion().getSubregion(name);
            WaitCriterion ev = new WaitCriterion() {

                public boolean done() {
                    return newValue.equals(region.get(key));
                }

                public String description() {
                    return "verify update";
                }
            };
            Wait.waitForCriterion(ev, 500, 200, true);
        }
    });
}
Also used : CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) LocalRegion(org.apache.geode.internal.cache.LocalRegion) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) VM(org.apache.geode.test.dunit.VM) LRUStatistics(org.apache.geode.internal.cache.lru.LRUStatistics) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) File(java.io.File) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 33 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class DiskRegionDUnitTest method testInvalidate.

/**
   * Tests that once an overflowed entry is {@linkplain Region#invalidate invalidated} its value is
   * gone.
   */
@Test
public void testInvalidate() throws Exception {
    final String name = this.getUniqueName();
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.LOCAL);
    factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
    File d = new File("DiskRegions" + OSProcess.getId());
    d.mkdirs();
    DiskStoreFactory dsf = getCache().createDiskStoreFactory();
    dsf.setDiskDirs(new File[] { d });
    DiskStore ds = dsf.create(name);
    factory.setDiskStoreName(ds.getName());
    Region region = createRegion(name, factory.create());
    // DiskRegion dr = ((LocalRegion) region).getDiskRegion();
    // DiskRegionStats diskStats = dr.getStats();
    LRUStatistics lruStats = getLRUStats(region);
    // Put in larger stuff until we start evicting
    int total;
    for (total = 0; lruStats.getEvictions() <= 10; total++) {
        int[] array = new int[250];
        array[0] = total;
        region.put(new Integer(total), array);
    }
    region.invalidate(new Integer(0));
    assertNull(region.get(new Integer(0)));
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) LRUStatistics(org.apache.geode.internal.cache.lru.LRUStatistics) DiskRegion(org.apache.geode.internal.cache.DiskRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 34 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class CacheXml66DUnitTest method testDiskStoreInTemplates.

/**
   * Tests that a region created with a named attributes with diskstore
   */
@Test
public void testDiskStoreInTemplates() throws Exception {
    File dir = new File("west");
    dir.mkdir();
    dir.deleteOnExit();
    dir = new File("east");
    dir.mkdir();
    dir.deleteOnExit();
    setXmlFile(findFile("ewtest.xml"));
    String regionName_west = "orders/west";
    String regionName_east = "orders/east";
    Cache cache = getCache();
    // verify diskstores
    DiskStore ds = cache.findDiskStore("persistentDiskStore1");
    assertNotNull(ds);
    assertEquals(500, ds.getQueueSize());
    File[] dirs = ds.getDiskDirs();
    assertEquals("west", dirs[0].getPath());
    ds = cache.findDiskStore("persistentDiskStore2");
    assertNotNull(ds);
    assertEquals(500, ds.getQueueSize());
    dirs = ds.getDiskDirs();
    assertEquals("east", dirs[0].getPath());
    // verify templates
    assertNotNull(cache.getRegionAttributes("nack"));
    RegionAttributes attrs = cache.getRegionAttributes("persistent");
    assertEquals(DataPolicy.PERSISTENT_REPLICATE, attrs.getDataPolicy());
    assertEquals(false, attrs.isDiskSynchronous());
    assertEquals("persistentDiskStore1", attrs.getDiskStoreName());
    Region region = cache.getRegion(regionName_west);
    assertNotNull(region);
    attrs = region.getAttributes();
    assertEquals(DataPolicy.PERSISTENT_REPLICATE, attrs.getDataPolicy());
    assertEquals(false, attrs.isDiskSynchronous());
    assertEquals("persistentDiskStore1", attrs.getDiskStoreName());
    region = cache.getRegion(regionName_east);
    assertNotNull(region);
    // Make sure that attributes can be "overridden"
    attrs = region.getAttributes();
    assertEquals(DataPolicy.PERSISTENT_REPLICATE, attrs.getDataPolicy());
    assertEquals(false, attrs.isDiskSynchronous());
    assertEquals("persistentDiskStore2", attrs.getDiskStoreName());
    // bug 41934
    String regionName_datap = "data-p";
    region = cache.getRegion(regionName_datap);
    assertNotNull(region);
    attrs = region.getAttributes();
    PartitionAttributes pa = attrs.getPartitionAttributes();
    assertEquals(1, pa.getRedundantCopies());
    assertEquals(3, pa.getTotalNumBuckets());
    assertEquals(DataPolicy.PERSISTENT_PARTITION, attrs.getDataPolicy());
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) RegionAttributes(org.apache.geode.cache.RegionAttributes) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) FixedPartitionAttributes(org.apache.geode.cache.FixedPartitionAttributes) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) File(java.io.File) Cache(org.apache.geode.cache.Cache) ClientCache(org.apache.geode.cache.client.ClientCache) Test(org.junit.Test)

Example 35 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class PartitionedRegionEvictionDUnitTest method testEntryLRUWithOverflowToDisk.

@Test
public void testEntryLRUWithOverflowToDisk() {
    final Host host = Host.getHost(0);
    final VM vm2 = host.getVM(2);
    final VM vm3 = host.getVM(3);
    final String uniqName = getUniqueName();
    final int redundantCopies = 1;
    final int maxBuckets = 8;
    final int maxEntries = 16;
    final String name = uniqName + "-PR";
    final SerializableRunnable create = new SerializableRunnable("Create Entry LRU with Overflow to disk partitioned Region") {

        public void run() {
            try {
                final AttributesFactory factory = new AttributesFactory();
                factory.setOffHeap(isOffHeap());
                factory.setPartitionAttributes(new PartitionAttributesFactory().setRedundantCopies(redundantCopies).setTotalNumBuckets(maxBuckets).create());
                factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(maxEntries, EvictionAction.OVERFLOW_TO_DISK));
                factory.setDiskSynchronous(true);
                DiskStoreFactory dsf = getCache().createDiskStoreFactory();
                final File[] diskDirs = new File[1];
                diskDirs[0] = new File("overflowDir/" + uniqName + "_" + OSProcess.getId());
                diskDirs[0].mkdirs();
                dsf.setDiskDirs(diskDirs);
                DiskStore ds = dsf.create(name);
                factory.setDiskStoreName(ds.getName());
                final PartitionedRegion pr = (PartitionedRegion) createRootRegion(name, factory.create());
                assertNotNull(pr);
            } catch (final CacheException ex) {
                Assert.fail("While creating Partitioned region", ex);
            }
        }
    };
    vm3.invoke(create);
    vm2.invoke(create);
    final int extraEntries = 4;
    final SerializableRunnable createBuckets = new SerializableRunnable("Create Buckets") {

        public void run() {
            final PartitionedRegion pr = (PartitionedRegion) getRootRegion(name);
            assertNotNull(pr);
            for (int counter = 1; counter <= maxEntries + extraEntries; counter++) {
                pr.put(new Integer(counter), new byte[1 * 1024 * 1024]);
            }
        }
    };
    vm3.invoke(createBuckets);
    final SerializableCallable assertBucketAttributesAndEviction = new SerializableCallable("Assert bucket attributes and eviction") {

        public Object call() throws Exception {
            final PartitionedRegion pr = (PartitionedRegion) getRootRegion(name);
            assertNotNull(pr);
            // assert over-flow behavior in local buckets and number of
            // entries
            // overflowed
            int entriesEvicted = 0;
            for (final Iterator i = pr.getDataStore().getAllLocalBuckets().iterator(); i.hasNext(); ) {
                final Map.Entry entry = (Map.Entry) i.next();
                final BucketRegion bucketRegion = (BucketRegion) entry.getValue();
                if (bucketRegion == null) {
                    continue;
                }
                assertTrue(bucketRegion.getAttributes().getEvictionAttributes().getAlgorithm().isLRUEntry());
                assertTrue(bucketRegion.getAttributes().getEvictionAttributes().getAction().isOverflowToDisk());
            }
            entriesEvicted += pr.getDiskRegionStats().getNumOverflowOnDisk();
            return new Integer(entriesEvicted);
        }
    };
    final Integer vm2i = (Integer) vm2.invoke(assertBucketAttributesAndEviction);
    final Integer vm3i = (Integer) vm3.invoke(assertBucketAttributesAndEviction);
    final int totalEvicted = vm2i.intValue() + vm3i.intValue();
    assertEquals(extraEntries * 2, totalEvicted);
}
Also used : CacheException(org.apache.geode.cache.CacheException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) Iterator(java.util.Iterator) File(java.io.File) Map(java.util.Map) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Aggregations

DiskStore (org.apache.geode.cache.DiskStore)190 Test (org.junit.Test)120 AttributesFactory (org.apache.geode.cache.AttributesFactory)91 DiskStoreFactory (org.apache.geode.cache.DiskStoreFactory)91 File (java.io.File)79 Region (org.apache.geode.cache.Region)71 Cache (org.apache.geode.cache.Cache)61 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)54 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)46 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)44 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)39 LocalRegion (org.apache.geode.internal.cache.LocalRegion)32 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)31 VM (org.apache.geode.test.dunit.VM)28 DiskRegion (org.apache.geode.internal.cache.DiskRegion)24 Host (org.apache.geode.test.dunit.Host)23 Expectations (org.jmock.Expectations)23 InternalCache (org.apache.geode.internal.cache.InternalCache)21 UnitTest (org.apache.geode.test.junit.categories.UnitTest)21 IOException (java.io.IOException)20