Search in sources :

Example 11 with Scope

use of org.apache.geode.cache.Scope in project geode by apache.

the class PartitionedRegionCreationJUnitTest method test002PartionedRegionInitialization.

/**
   * Test for initialization of PartitionedRegion. Following are tested for the PartitionedRegion:
   * <p>
   * (1) Test for Root == null
   * <p>
   * (2) Test for Root region scope is DIST_ACK
   * <p>
   * (3) Test if MirrorType.NONE for root region.
   * <p>
   * (4) Test if PARTITIONED_REGION_CONFIG_NAME exist and isDistributedAck and the mirror type is
   * MirrorType.KEYS_VALUES.
   * 
   * @throws RegionExistsException
   */
@Test
public void test002PartionedRegionInitialization() throws RegionExistsException {
    String PRName = "testpartionedRegionInitialization";
    PartitionedRegionTestHelper.createPartionedRegion(PRName);
    Region root = (PartitionedRegionTestHelper.getExistingRegion(PartitionedRegionHelper.PR_ROOT_REGION_NAME));
    if (root == null)
        fail("testpartionedRegionInitialization() - the " + PartitionedRegionHelper.PR_ROOT_REGION_NAME + " do not exists");
    RegionAttributes regionAttribs = root.getAttributes();
    Scope scope = regionAttribs.getScope();
    if (!scope.isDistributedAck())
        fail("testpartionedRegionInitialization() - the " + PartitionedRegionHelper.PR_ROOT_REGION_NAME + " scope is not distributed_ack");
    assertEquals(DataPolicy.REPLICATE, regionAttribs.getDataPolicy());
    if (logger.fineEnabled()) {
        logger.fine("testpartionedRegionInitialization() Successfully Complete ..  ");
    }
// System.out.println("testpartionedRegionInitialization");
}
Also used : Scope(org.apache.geode.cache.Scope) RegionAttributes(org.apache.geode.cache.RegionAttributes) Region(org.apache.geode.cache.Region) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 12 with Scope

use of org.apache.geode.cache.Scope in project geode by apache.

the class SearchLoadAndWriteProcessor method doSearchAndLoad.

void doSearchAndLoad(EntryEventImpl event, TXStateInterface txState, Object localValue) throws CacheLoaderException, TimeoutException {
    this.requestInProgress = true;
    RegionAttributes attrs = region.getAttributes();
    Scope scope = attrs.getScope();
    CacheLoader loader = ((AbstractRegion) region).basicGetLoader();
    if (scope.isLocal()) {
        Object obj = doLocalLoad(loader, false);
        event.setNewValue(obj);
    } else {
        searchAndLoad(event, txState, localValue);
    }
    this.requestInProgress = false;
    if (this.netSearch) {
        if (event.getOperation().isCreate()) {
            event.setOperation(Operation.SEARCH_CREATE);
        } else {
            event.setOperation(Operation.SEARCH_UPDATE);
        }
    } else if (this.netLoad) {
        if (event.getOperation().isCreate()) {
            event.setOperation(Operation.NET_LOAD_CREATE);
        } else {
            event.setOperation(Operation.NET_LOAD_UPDATE);
        }
    } else if (this.localLoad) {
        if (event.getOperation().isCreate()) {
            event.setOperation(Operation.LOCAL_LOAD_CREATE);
        } else {
            event.setOperation(Operation.LOCAL_LOAD_UPDATE);
        }
    }
}
Also used : Scope(org.apache.geode.cache.Scope) RegionAttributes(org.apache.geode.cache.RegionAttributes) CacheLoader(org.apache.geode.cache.CacheLoader)

Example 13 with Scope

use of org.apache.geode.cache.Scope in project geode by apache.

the class SearchLoadAndWriteProcessor method searchAndLoad.

/**
   * If we have a local cache loader and the region is not global, then invoke the loader If the
   * region is local, or the result is non-null, then return whatever the loader returned do a
   * netSearch amongst selected peers if netSearch returns a blob, deserialize the blob and return
   * that as the result netSearch failed, so all we can do at this point is do a load return result
   * from load
   */
private void searchAndLoad(EntryEventImpl event, TXStateInterface txState, Object localValue) throws CacheLoaderException, TimeoutException {
    RegionAttributes attrs = region.getAttributes();
    Scope scope = attrs.getScope();
    DataPolicy dataPolicy = attrs.getDataPolicy();
    if (txState != null) {
        TXEntryState tx = txState.txReadEntry(event.getKeyInfo(), region, false, true);
        if (tx != null) {
            if (tx.noValueInSystem()) {
                // If the tx view has it invalid or destroyed everywhere
                // then don't do a netsearch. We want to see the
                // transactional view.
                load(event);
                return;
            }
        }
    }
    // if mirrored then we can optimize by skipping netsearch in some cases,
    // and if also skip netSearch if we find an INVALID token since we
    // know it was distributed. (Otherwise it would be a LOCAL_INVALID token)
    {
        if (localValue == Token.INVALID || dataPolicy.withReplication()) {
            load(event);
            return;
        }
    }
    Object obj = null;
    if (!scope.isGlobal()) {
        // copy into local var to prevent race condition
        CacheLoader loader = ((AbstractRegion) region).basicGetLoader();
        if (loader != null) {
            obj = doLocalLoad(loader, true);
            Assert.assertTrue(obj != Token.INVALID && obj != Token.LOCAL_INVALID);
            event.setNewValue(obj);
            this.isSerialized = false;
            this.result = obj;
            return;
        }
        if (scope.isLocal()) {
            return;
        }
    }
    netSearchForBlob();
    if (this.result != null) {
        Assert.assertTrue(this.result != Token.INVALID && this.result != Token.LOCAL_INVALID);
        if (this.isSerialized) {
            event.setSerializedNewValue((byte[]) this.result);
        } else {
            event.setNewValue(this.result);
        }
        event.setVersionTag(this.versionTag);
        return;
    }
    load(event);
}
Also used : Scope(org.apache.geode.cache.Scope) RegionAttributes(org.apache.geode.cache.RegionAttributes) CacheLoader(org.apache.geode.cache.CacheLoader) DataPolicy(org.apache.geode.cache.DataPolicy)

Example 14 with Scope

use of org.apache.geode.cache.Scope in project geode by apache.

the class SearchLoadAndWriteProcessor method initialize.

void initialize(LocalRegion theRegion, Object theKey, Object theCallbackArg) {
    this.region = theRegion;
    this.regionName = theRegion.getFullPath();
    this.key = theKey;
    this.aCallbackArgument = theCallbackArg;
    RegionAttributes attrs = theRegion.getAttributes();
    Scope scope = attrs.getScope();
    if (scope.isDistributed()) {
        this.advisor = ((CacheDistributionAdvisee) this.region).getCacheDistributionAdvisor();
        this.distributionManager = theRegion.getDistributionManager();
        this.timeout = getSearchTimeout();
        this.advisor.addMembershipListener(this);
    }
}
Also used : Scope(org.apache.geode.cache.Scope) RegionAttributes(org.apache.geode.cache.RegionAttributes)

Example 15 with Scope

use of org.apache.geode.cache.Scope in project geode by apache.

the class DiskRegionTestImpl method testBackupFillValues.

/**
   * Tests fillValues on backup regions.
   *
   * Note: The regions in the following description all have the same unique name. 1) Create backup
   * region in VM0 and add some values so they get backed up 2) Close that region 3) Create
   * non-mirrored distributed region in VM1 and populate with over 1M of data 4) Create a mirrored
   * KEYS region in VM2. This will cause VM2 to have all the keys but no values. 5) Re-create the
   * backup region in VM0 with mirroring KEY_VALUES. This will get the keys from VM2 and the values
   * from VM1 using fillValues. The region should end up with the keys created in step 1, and they
   * should not be faulted into the VM.
   */
public void testBackupFillValues() throws CacheException {
    RegionAttributes attrs = this.rtc.getRegionAttributes();
    assertTrue("This test not appropriate for non-backup regions", attrs.getPersistBackup());
    final String name = this.rtc.getUniqueName();
    final String key1 = "KEY1";
    final String key2 = "KEY2";
    final String value1 = "VALUE1";
    final String value2 = "VALUE2";
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    // VM vm2 = host.getVM(2);
    vm0.invoke(new CacheSerializableRunnable("Create backup Region in VM0") {

        public void run2() throws CacheException {
            Region rgn = DiskRegionTestImpl.this.rtc.createRegion(name);
            rgn.create(key1, value1);
            rgn.create(key2, value2);
            // create entries that will be overwritten by getInitialImage below
            rgn.create(new Integer(0), "TEMP-0");
            rgn.create(new Integer(1), "TEMP-1");
        // no longer to close cache in 6.5, otherwise the 2 vms will splitbrain
        // CacheTestCase.closeCache();
        }
    });
    vm1.invoke(new CacheSerializableRunnable("Create & Populate non-mirrored in VM1") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            // set scope to be same as test region
            Scope scope = DiskRegionTestImpl.this.rtc.getRegionAttributes().getScope();
            factory.setScope(scope);
            DataPolicy dataPolicy = DiskRegionTestImpl.this.rtc.getRegionAttributes().getDataPolicy();
            factory.setDataPolicy(dataPolicy);
            RegionAttributes attrs2 = factory.create();
            Region rgn = DiskRegionTestImpl.this.rtc.createRegion(name, attrs2);
            // Fill the region with some keys.
            for (int i = 0; i < NUM_ENTRIES; i++) {
                byte[] value = new byte[VALUE_SIZE];
                Arrays.fill(value, (byte) 0xAB);
                rgn.put(new Integer(i), value);
            }
            // just for sanity:
            assertEquals(NUM_ENTRIES + 2, rgn.keySet().size());
        }
    });
    vm0.invoke(new CacheSerializableRunnable("Close Cache in VM0") {

        public void run2() throws CacheException {
            CacheTestCase.closeCache();
        }
    });
    // vm2.invoke(new CacheSerializableRunnable("Create mirrored KEYS region in VM2") {
    // public void run2() throws CacheException {
    // AttributesFactory factory = new AttributesFactory();
    // // set scope to be same as test region
    // Scope scope = DiskRegionTestImpl.this.rtc.getRegionAttributes().getScope();
    // factory.setScope(scope);
    // // set mirror KEYS
    // factory.setMirrorType(MirrorType.KEYS);
    // RegionAttributes attrs2 = factory.create();
    // Region rgn = DiskRegionTestImpl.this.rtc.createRegion(name, attrs2);
    // }
    // });
    String runnableName = "Re-create backup region in VM0 with mirror " + "KEYS_VALUES and Do Verification";
    vm0.invoke(new CacheSerializableRunnable(runnableName) {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory(DiskRegionTestImpl.this.rtc.getRegionAttributes());
            factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
            RegionAttributes attrs2 = factory.create();
            // DebuggerSupport.waitForJavaDebugger(rtc.getLogWriter(), "About to create region...");
            Region rgn = DiskRegionTestImpl.this.rtc.createRegion(name, attrs2);
            // verify
            assertEquals(NUM_ENTRIES + 2, rgn.keySet().size());
            boolean RECOVER_VALUES = true;
            if (RECOVER_VALUES) {
                assertEquals(value1, rgn.getEntry(key1).getValue());
                assertEquals(value2, rgn.getEntry(key2).getValue());
            } else {
                assertNull(valueInVM(rgn, key1));
                assertNull(valueInVM(rgn, key2));
            }
            assertEquals(value1, valueOnDisk(rgn, key1));
            assertEquals(value2, valueOnDisk(rgn, key2));
            // The following also verifies TEMP values were overwritten
            for (int i = 0; i < NUM_ENTRIES; i++) {
                Region.Entry entry = rgn.getEntry(new Integer(i));
                assertNotNull("No entry for key " + i, entry);
                byte[] v = (byte[]) entry.getValue();
                assertNotNull("Null value for key " + i, v);
                assertEquals(VALUE_SIZE, v.length);
                // test a byte
                assertEquals((byte) 0xAB, v[i % VALUE_SIZE]);
            }
            rgn.close();
            rgn = DiskRegionTestImpl.this.rtc.createRegion(name, attrs2);
            // verify
            assertEquals(NUM_ENTRIES + 2, rgn.keySet().size());
            if (RECOVER_VALUES) {
                assertEquals(value1, rgn.getEntry(key1).getValue());
                assertEquals(value2, rgn.getEntry(key2).getValue());
            } else {
                assertNull(valueInVM(rgn, key1));
                assertNull(valueInVM(rgn, key2));
            }
            assertEquals(value1, valueOnDisk(rgn, key1));
            assertEquals(value2, valueOnDisk(rgn, key2));
            // The following also verifies TEMP values were overwritten
            for (int i = 0; i < NUM_ENTRIES; i++) {
                Region.Entry entry = rgn.getEntry(new Integer(i));
                assertNotNull("No entry for key " + i, entry);
                byte[] v = (byte[]) entry.getValue();
                assertNotNull("Null value for key " + i, v);
                assertEquals(VALUE_SIZE, v.length);
                // test a byte
                assertEquals((byte) 0xAB, v[i % VALUE_SIZE]);
            }
        }

        private Object valueInVM(Region rgn, Object key) throws EntryNotFoundException {
            org.apache.geode.internal.cache.LocalRegion lrgn = (org.apache.geode.internal.cache.LocalRegion) rgn;
            return lrgn.getValueInVM(key);
        }

        private Object valueOnDisk(Region rgn, Object key) throws EntryNotFoundException {
            org.apache.geode.internal.cache.LocalRegion lrgn = (org.apache.geode.internal.cache.LocalRegion) rgn;
            return lrgn.getValueOnDisk(key);
        }
    });
}
Also used : RegionAttributes(org.apache.geode.cache.RegionAttributes) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) AttributesFactory(org.apache.geode.cache.AttributesFactory) Scope(org.apache.geode.cache.Scope) VM(org.apache.geode.test.dunit.VM) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) Region(org.apache.geode.cache.Region) DataPolicy(org.apache.geode.cache.DataPolicy)

Aggregations

Scope (org.apache.geode.cache.Scope)16 RegionAttributes (org.apache.geode.cache.RegionAttributes)11 AttributesFactory (org.apache.geode.cache.AttributesFactory)4 Cache (org.apache.geode.cache.Cache)4 CacheLoader (org.apache.geode.cache.CacheLoader)3 DataPolicy (org.apache.geode.cache.DataPolicy)3 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)3 Region (org.apache.geode.cache.Region)3 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)3 Test (org.junit.Test)3 HashSet (java.util.HashSet)2 CacheException (org.apache.geode.cache.CacheException)2 FixedPartitionAttributes (org.apache.geode.cache.FixedPartitionAttributes)2 PartitionAttributes (org.apache.geode.cache.PartitionAttributes)2 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)2 File (java.io.File)1 Set (java.util.Set)1 CancelCriterion (org.apache.geode.CancelCriterion)1 InternalGemFireException (org.apache.geode.InternalGemFireException)1 CacheEvent (org.apache.geode.cache.CacheEvent)1