Search in sources :

Example 1 with BucketVisitor

use of org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor in project geode by apache.

the class PartitionedRegionBucketCreationDistributionDUnitTest method validateBucketsDistribution.

private CacheSerializableRunnable validateBucketsDistribution(final int startIndexForRegion, final int endIndexForRegion, final int noBucketsExpectedOnEachNode) {
    CacheSerializableRunnable validateBucketDist = new CacheSerializableRunnable("validateBucketsDistribution") {

        String innerPrPrefix = prPrefix;

        public void run2() {
            Cache cache = getCache();
            final Region root = cache.getRegion(PartitionedRegionHelper.PR_ROOT_REGION_NAME);
            assertNotNull("Root regions is null", root);
            for (int i = startIndexForRegion; i < endIndexForRegion; i++) {
                final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + innerPrPrefix + i);
                assertNotNull("This region can not be null" + pr.getName(), pr);
                assertNotNull(pr.getDataStore());
                final int localBSize = pr.getDataStore().getBucketsManaged();
                LogWriterUtils.getLogWriter().info("validateBucketsDistribution() - Number of bukctes for " + pr.getName() + " : " + localBSize);
                assertTrue("Bucket Distribution for region = " + pr.getFullPath() + " is not correct for member " + pr.getDistributionManager().getId() + " existing size " + localBSize + " smaller than expected " + noBucketsExpectedOnEachNode, localBSize >= noBucketsExpectedOnEachNode);
                pr.getDataStore().visitBuckets(new BucketVisitor() {

                    public void visit(Integer bucketId, Region r) {
                        Region bucketRegion = root.getSubregion(pr.getBucketName(bucketId.intValue()));
                        assertEquals(bucketRegion.getFullPath(), r.getFullPath());
                    }
                });
            }
        }
    };
    return validateBucketDist;
}
Also used : CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Region(org.apache.geode.cache.Region) BucketVisitor(org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor) Cache(org.apache.geode.cache.Cache)

Example 2 with BucketVisitor

use of org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor in project geode by apache.

the class Bug38741DUnitTest method testPartitionedRegionAndCopyOnRead.

/**
   * Test to ensure that a PartitionedRegion doesn't make more than the expected number of copies
   * when copy-on-read is set to true
   * 
   * @throws Exception
   */
@Test
public void testPartitionedRegionAndCopyOnRead() throws Exception {
    final Host h = Host.getHost(0);
    final VM accessor = h.getVM(2);
    final VM datastore = h.getVM(3);
    final String rName = getUniqueName();
    final String k1 = "k1";
    datastore.invoke(new CacheSerializableRunnable("Create PR DataStore") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setPartitionAttributes(new PartitionAttributesFactory().setRedundantCopies(0).create());
            createRootRegion(rName, factory.create());
        }
    });
    accessor.invoke(new CacheSerializableRunnable("Create PR Accessor and put new value") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setPartitionAttributes(new PartitionAttributesFactory().setLocalMaxMemory(0).setRedundantCopies(0).create());
            Region r = createRootRegion(rName, factory.create());
            SerializationCountingValue val = new SerializationCountingValue();
            r.put(k1, val);
            // First put to a bucket will serialize once to determine the size of the value
            // to know how much extra space the new bucket with the new entry will consume
            // and serialize again to send the bytes
            assertEquals(2, val.count.get());
            // A put to an already created bucket should only be serialized once
            val = new SerializationCountingValue();
            r.put(k1, val);
            assertEquals(1, val.count.get());
        }
    });
    datastore.invoke(new CacheSerializableRunnable("assert datastore entry serialization count") {

        public void run2() throws CacheException {
            PartitionedRegion pr = (PartitionedRegion) getRootRegion(rName);
            // Visit the one bucket (since there is only one value in the entire PR)
            // to directly copy the entry bytes and assert the serialization count.
            // All this extra work is to assure the serialization count does not increase
            // (by de-serializing the value stored in the map, which would then have to be
            // re-serialized).
            pr.getDataStore().visitBuckets(new BucketVisitor() {

                public void visit(Integer bucketId, Region r) {
                    BucketRegion br = (BucketRegion) r;
                    try {
                        KeyInfo keyInfo = new KeyInfo(k1, null, bucketId);
                        RawValue rv = br.getSerialized(keyInfo, false, false, null, null, false);
                        Object val = rv.getRawValue();
                        assertTrue(val instanceof CachedDeserializable);
                        CachedDeserializable cd = (CachedDeserializable) val;
                        SerializationCountingValue scv = (SerializationCountingValue) cd.getDeserializedForReading();
                        assertEquals(1, scv.count.get());
                    } catch (IOException fail) {
                        Assert.fail("Unexpected IOException", fail);
                    }
                }
            });
        }
    });
    accessor.invoke(new CacheSerializableRunnable("assert accessor entry serialization count") {

        public void run2() throws CacheException {
            Region r = getRootRegion(rName);
            SerializationCountingValue v1 = (SerializationCountingValue) r.get(k1);
            // The counter was incremented once to send the data to the datastore
            assertEquals(1, v1.count.get());
            getCache().setCopyOnRead(true);
            // Once to send the data to the datastore, no need to do a serialization
            // when we make copy since it is serialized from datastore to us.
            SerializationCountingValue v2 = (SerializationCountingValue) r.get(k1);
            assertEquals(1, v2.count.get());
            assertTrue(v1 != v2);
        }
    });
    datastore.invoke(new CacheSerializableRunnable("assert value serialization") {

        public void run2() throws CacheException {
            Region r = getRootRegion(rName);
            SerializationCountingValue v1 = (SerializationCountingValue) r.get(k1);
            // Once to send the value from the accessor to the data store
            assertEquals(1, v1.count.get());
            getCache().setCopyOnRead(true);
            // Once to send the value from the accessor to the data store
            // once to make a local copy
            SerializationCountingValue v2 = (SerializationCountingValue) r.get(k1);
            assertEquals(2, v2.count.get());
            assertTrue(v1 != v2);
        }
    });
}
Also used : CachedDeserializable(org.apache.geode.internal.cache.CachedDeserializable) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) BucketVisitor(org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) BucketRegion(org.apache.geode.internal.cache.BucketRegion) KeyInfo(org.apache.geode.internal.cache.KeyInfo) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) LocalRegion(org.apache.geode.internal.cache.LocalRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) RawValue(org.apache.geode.internal.cache.BucketRegion.RawValue) ClientServerTest(org.apache.geode.test.junit.categories.ClientServerTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 3 with BucketVisitor

use of org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor in project geode by apache.

the class SerializableMonth method partitionedRegionTest.

public void partitionedRegionTest(final String prName) {
    /*
     * Do put() operations through VM with PR having both Accessor and Datastore
     */
    vm0.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations1") {

        public void run2() throws CacheException {
            Calendar cal = Calendar.getInstance();
            final Region pr = cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            int size = 0;
            size = pr.size();
            assertEquals("Size doesnt return expected value", 0, size);
            assertEquals("isEmpty doesnt return proper state of the PartitionedRegion", true, pr.isEmpty());
            assertEquals(0, pr.keySet().size());
            for (int i = 0; i <= 11; i++) {
                int yr = (new Integer((int) (Math.random() * 2100))).intValue();
                int month = i;
                int date = (new Integer((int) (Math.random() * 30))).intValue();
                cal.set(yr, month, date);
                Object key = cal.getTime();
                listOfKeys1.add(key);
                assertNotNull(pr);
                pr.put(key, Integer.toString(i));
                assertEquals(Integer.toString(i), pr.get(key));
            }
            PartitionedRegion ppr = (PartitionedRegion) pr;
            try {
                ppr.dumpAllBuckets(false);
            } catch (ReplyException re) {
                Assert.fail("dumpAllBuckets", re);
            }
        }
    });
    vm1.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations2") {

        public void run2() throws CacheException {
            Calendar cal = Calendar.getInstance();
            final Region pr = cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            for (int i = 0; i <= 11; i++) {
                int yr = (new Integer((int) (Math.random() * 2200))).intValue();
                int month = i;
                int date = (new Integer((int) (Math.random() * 30))).intValue();
                cal.set(yr, month, date);
                Object key = cal.getTime();
                listOfKeys2.add(key);
                assertNotNull(pr);
                pr.put(key, Integer.toString(i));
                assertEquals(Integer.toString(i), pr.get(key));
            }
            PartitionedRegion ppr = (PartitionedRegion) pr;
            try {
                ppr.dumpAllBuckets(false);
            } catch (ReplyException re) {
                Assert.fail("dumpAllBuckets", re);
            }
        }
    });
    vm2.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations2") {

        public void run2() throws CacheException {
            Calendar cal = Calendar.getInstance();
            final Region pr = cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            for (int i = 0; i <= 11; i++) {
                int yr = (new Integer((int) (Math.random() * 2300))).intValue();
                int month = i;
                int date = (new Integer((int) (Math.random() * 30))).intValue();
                cal.set(yr, month, date);
                Object key = cal.getTime();
                listOfKeys3.add(key);
                assertNotNull(pr);
                pr.put(key, Integer.toString(i));
                assertEquals(Integer.toString(i), pr.get(key));
            }
            PartitionedRegion ppr = (PartitionedRegion) pr;
            try {
                ppr.dumpAllBuckets(false);
            } catch (ReplyException re) {
                Assert.fail("dumpAllBuckets", re);
            }
        }
    });
    vm3.invoke(new CacheSerializableRunnable("doPutCreateInvalidateOperations3") {

        public void run2() throws CacheException {
            Calendar cal = Calendar.getInstance();
            final Region pr = cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            for (int i = 0; i <= 11; i++) {
                int yr = (new Integer((int) (Math.random() * 2400))).intValue();
                int month = i;
                int date = (new Integer((int) (Math.random() * 30))).intValue();
                cal.set(yr, month, date);
                Object key = cal.getTime();
                listOfKeys4.add(key);
                assertNotNull(pr);
                pr.put(key, Integer.toString(i));
                assertEquals(Integer.toString(i), pr.get(key));
            }
            PartitionedRegion ppr = (PartitionedRegion) pr;
            try {
                ppr.dumpAllBuckets(false);
            } catch (ReplyException re) {
                Assert.fail("dumpAllBuckets", re);
            }
        }
    });
    vm0.invoke(new CacheSerializableRunnable("verifyKeysonVM0") {

        public void run2() throws CacheException {
            // Calendar cal = Calendar.getInstance();
            final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            Iterator itr = listOfKeys1.iterator();
            while (itr.hasNext()) {
                assertTrue(searchForKey(pr, (Date) itr.next()));
            }
            pr.getDataStore().visitBuckets(new BucketVisitor() {

                public void visit(Integer bucketId, Region r) {
                    Set s = pr.getBucketKeys(bucketId.intValue());
                    Iterator it = s.iterator();
                    while (it.hasNext()) {
                        EntryOperation eo = new EntryOperationImpl(pr, null, it.next(), null, null);
                        PartitionResolver rr = pr.getPartitionResolver();
                        Object o = rr.getRoutingObject(eo);
                        Integer i = new Integer(o.hashCode() % totalNumBuckets);
                        assertEquals(bucketId, i);
                    }
                // getLogWriter().severe("Key " + key + " found in bucket " + b);
                }
            });
        }
    });
    vm1.invoke(new CacheSerializableRunnable("verifyKeysonVM1") {

        public void run2() throws CacheException {
            // Calendar cal = Calendar.getInstance();
            final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            Iterator itr = listOfKeys2.iterator();
            while (itr.hasNext()) {
                assertTrue(searchForKey(pr, (Date) itr.next()));
            }
            pr.getDataStore().visitBuckets(new BucketVisitor() {

                public void visit(Integer bucketId, Region r) {
                    Set s = pr.getBucketKeys(bucketId.intValue());
                    Iterator it = s.iterator();
                    while (it.hasNext()) {
                        EntryOperation eo = new EntryOperationImpl(pr, null, it.next(), null, null);
                        PartitionResolver rr = pr.getPartitionResolver();
                        Object o = rr.getRoutingObject(eo);
                        Integer i = new Integer(o.hashCode() % totalNumBuckets);
                        assertEquals(bucketId, i);
                    }
                // getLogWriter().severe("Key " + key + " found in bucket " + b);
                }
            });
        }
    });
    vm2.invoke(new CacheSerializableRunnable("verifyKeysonVM2") {

        public void run2() throws CacheException {
            // Calendar cal = Calendar.getInstance();
            final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            Iterator itr = listOfKeys3.iterator();
            itr = listOfKeys3.iterator();
            while (itr.hasNext()) {
                assertTrue(searchForKey(pr, (Date) itr.next()));
            }
            pr.getDataStore().visitBuckets(new BucketVisitor() {

                public void visit(Integer bucketId, Region r) {
                    Set s = pr.getBucketKeys(bucketId.intValue());
                    Iterator it = s.iterator();
                    while (it.hasNext()) {
                        EntryOperation eo = new EntryOperationImpl(pr, null, it.next(), null, null);
                        PartitionResolver rr = pr.getPartitionResolver();
                        Object o = rr.getRoutingObject(eo);
                        Integer i = new Integer(o.hashCode() % totalNumBuckets);
                        // assertIndexDetailsEquals(bucketId, bucketId);
                        assertEquals(bucketId, i);
                    }
                // getLogWriter().severe("Key " + key + " found in bucket " + b);
                }
            });
        }
    });
    vm3.invoke(new CacheSerializableRunnable("verifyKeysonVM3") {

        public void run2() throws CacheException {
            // Calendar cal = Calendar.getInstance();
            final PartitionedRegion pr = (PartitionedRegion) cache.getRegion(prName);
            if (pr == null) {
                fail(prName + " not created");
            }
            Iterator itr = listOfKeys4.iterator();
            itr = listOfKeys4.iterator();
            while (itr.hasNext()) {
                assertTrue(searchForKey(pr, (Date) itr.next()));
            }
            assertEquals(pr.getDataStore(), null);
        }
    });
}
Also used : BucketVisitor(org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor) ReplyException(org.apache.geode.distributed.internal.ReplyException) EntryOperationImpl(org.apache.geode.internal.cache.EntryOperationImpl) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion)

Aggregations

BucketVisitor (org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor)3 Region (org.apache.geode.cache.Region)2 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)2 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)2 IOException (java.io.IOException)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 AttributesFactory (org.apache.geode.cache.AttributesFactory)1 Cache (org.apache.geode.cache.Cache)1 CacheException (org.apache.geode.cache.CacheException)1 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)1 ReplyException (org.apache.geode.distributed.internal.ReplyException)1 BucketRegion (org.apache.geode.internal.cache.BucketRegion)1 RawValue (org.apache.geode.internal.cache.BucketRegion.RawValue)1 CachedDeserializable (org.apache.geode.internal.cache.CachedDeserializable)1 EntryOperationImpl (org.apache.geode.internal.cache.EntryOperationImpl)1 KeyInfo (org.apache.geode.internal.cache.KeyInfo)1 LocalRegion (org.apache.geode.internal.cache.LocalRegion)1 Host (org.apache.geode.test.dunit.Host)1 VM (org.apache.geode.test.dunit.VM)1 ClientServerTest (org.apache.geode.test.junit.categories.ClientServerTest)1