use of org.apache.geode.cache.RegionFactory in project geode by apache.
the class DiskRegionDUnitTest method testPersistentReplicateB4NonPersistent.
@Test
public void testPersistentReplicateB4NonPersistent() {
Host host = Host.getHost(0);
VM vm1 = host.getVM(0);
VM vm2 = host.getVM(1);
VM vm3 = host.getVM(2);
final String regionName = getName();
vm1.invoke(new SerializableCallable() {
@Override
public Object call() throws Exception {
RegionFactory rf = getCache().createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
Region r = rf.create(regionName);
assertTrue(r.getAttributes().getConcurrencyChecksEnabled());
return null;
}
});
vm2.invoke(new SerializableCallable() {
@Override
public Object call() throws Exception {
RegionFactory rf = getCache().createRegionFactory(RegionShortcut.REPLICATE);
Region region = rf.create(regionName);
assertTrue(region.getAttributes().getConcurrencyChecksEnabled());
return null;
}
});
vm3.invoke(new SerializableCallable() {
@Override
public Object call() throws Exception {
Cache cache = getCache();
AttributesFactory af = new AttributesFactory();
af.setScope(Scope.DISTRIBUTED_ACK);
RegionFactory rf = cache.createRegionFactory(af.create());
Region r = rf.create(regionName);
assertNotNull(r);
assertTrue(r.getAttributes().getConcurrencyChecksEnabled());
return null;
}
});
}
use of org.apache.geode.cache.RegionFactory in project geode by apache.
the class DistributionManagerDUnitTest method testAckSevereAlertThreshold.
/**
* Tests that a severe-level alert is generated if a member does not respond with an ack quickly
* enough. vm0 and vm1 create a region and set ack-severe-alert-threshold. vm1 has a cache
* listener in its region that sleeps when notified, forcing the operation to take longer than
* ack-wait-threshold + ack-severe-alert-threshold
*/
@Test
public void testAckSevereAlertThreshold() throws Exception {
disconnectAllFromDS();
Host host = Host.getHost(0);
// VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
// in order to set a small ack-wait-threshold, we have to remove the
// system property established by the dunit harness
String oldAckWait = (String) System.getProperties().remove(DistributionConfig.GEMFIRE_PREFIX + ACK_WAIT_THRESHOLD);
try {
final Properties props = getDistributedSystemProperties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(ACK_WAIT_THRESHOLD, "3");
props.setProperty(ACK_SEVERE_ALERT_THRESHOLD, "3");
props.setProperty(NAME, "putter");
getSystem(props);
Region rgn = (new RegionFactory()).setScope(Scope.DISTRIBUTED_ACK).setEarlyAck(false).setDataPolicy(DataPolicy.REPLICATE).create("testRegion");
vm1.invoke(new SerializableRunnable("Connect to distributed system") {
public void run() {
props.setProperty(NAME, "sleeper");
getSystem(props);
IgnoredException.addIgnoredException("elapsed while waiting for replies");
RegionFactory rf = new RegionFactory();
Region r = rf.setScope(Scope.DISTRIBUTED_ACK).setDataPolicy(DataPolicy.REPLICATE).setEarlyAck(false).addCacheListener(getSleepingListener(false)).create("testRegion");
myCache = r.getCache();
try {
createAlertListener();
} catch (Exception e) {
throw new RuntimeException("failed to create alert listener", e);
}
}
});
// now we have two caches set up. vm1 has a listener that will sleep
// and cause the severe-alert threshold to be crossed
// this will hang until vm1 responds
rgn.put("bomb", "pow!");
rgn.getCache().close();
basicGetSystem().disconnect();
vm1.invoke(new SerializableRunnable("disconnect from ds") {
public void run() {
if (!myCache.isClosed()) {
if (basicGetSystem().isConnected()) {
basicGetSystem().disconnect();
}
myCache = null;
}
if (basicGetSystem().isConnected()) {
basicGetSystem().disconnect();
}
synchronized (alertGuard) {
assertTrue(alertReceived);
}
}
});
} finally {
if (oldAckWait != null) {
System.setProperty(DistributionConfig.GEMFIRE_PREFIX + ACK_WAIT_THRESHOLD, oldAckWait);
}
}
}
use of org.apache.geode.cache.RegionFactory in project geode by apache.
the class PartitionedRegionEquiJoinIntegrationTest method createColocatedPartitionRegion.
public Region createColocatedPartitionRegion(String regionName, final String colocatedRegion) {
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setColocatedWith(colocatedRegion);
RegionFactory factory = CacheUtils.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(paf.create());
return factory.create(regionName);
}
use of org.apache.geode.cache.RegionFactory in project geode by apache.
the class PartitionedRegionEquiJoinIntegrationTest method createPartitionRegion.
public Region createPartitionRegion(String regionName) {
PartitionAttributesFactory paf = new PartitionAttributesFactory();
RegionFactory factory = CacheUtils.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(paf.create());
return factory.create(regionName);
}
use of org.apache.geode.cache.RegionFactory in project geode by apache.
the class InitializeIndexEntryDestroyQueryDUnitTest method testAsyncIndexInitDuringEntryDestroyAndQuery.
@Test
public void testAsyncIndexInitDuringEntryDestroyAndQuery() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
setCacheInVMs(vm0);
name = "PartionedPortfolios";
// Create Local Region
vm0.invoke(new CacheSerializableRunnable("Create local region with asynchronous index maintenance") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region localRegion = null;
try {
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(PortfolioData.class);
attr.setScope(Scope.LOCAL);
attr.setIndexMaintenanceSynchronous(false);
RegionFactory regionFactory = cache.createRegionFactory(attr.create());
localRegion = regionFactory.create(name);
} catch (IllegalStateException ex) {
LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
}
assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
assertNotNull("Region ref null", localRegion);
assertTrue("Region ref claims to be destroyed", !localRegion.isDestroyed());
}
});
final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
// Putting the data into the PR's created
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
AsyncInvocation asyInvk0 = vm0.invokeAsync(new CacheSerializableRunnable("Create Index with Hook") {
@Override
public void run2() throws CacheException {
for (int i = 0; i < cntDest; i++) {
// Create Index first to go in hook.
Cache cache = getCache();
Index index = null;
try {
index = cache.getQueryService().createIndex("statusIndex", "p.status", "/" + name + " p");
} catch (Exception e1) {
e1.printStackTrace();
fail("Index creation failed");
}
assertNotNull(index);
Wait.pause(100);
getCache().getQueryService().removeIndex(index);
Wait.pause(100);
}
}
});
// Change the value in Region
AsyncInvocation asyInvk1 = vm0.invokeAsync(new CacheSerializableRunnable("Change value in region") {
@Override
public void run2() throws CacheException {
// Do a put in region.
Region r = getCache().getRegion(name);
for (int i = 0, j = 0; i < 1000; i++, j++) {
PortfolioData p = (PortfolioData) r.get(j);
getCache().getLogger().fine("Going to destroy the value" + p);
r.destroy(j);
Wait.pause(100);
// Put the value back again.
getCache().getLogger().fine("Putting the value back" + p);
r.put(j, p);
// Reset j
if (j == cntDest - 1) {
j = 0;
}
}
}
});
vm0.invoke(new CacheSerializableRunnable("Run query on region") {
@Override
public void run2() throws CacheException {
// Do a put in region.
Region r = getCache().getRegion(name);
Query query = getCache().getQueryService().newQuery("select * from /" + name + " p where p.status = 'active'");
// Now run the query
SelectResults results = null;
for (int i = 0; i < 500; i++) {
try {
getCache().getLogger().fine("Querying the region");
results = (SelectResults) query.execute();
} catch (Exception e) {
e.printStackTrace();
}
for (Object obj : results) {
if (obj instanceof Undefined) {
fail("Found an undefined element" + Arrays.toString(results.toArray()));
}
}
}
}
});
ThreadUtils.join(asyInvk0, 1000 * 1000);
if (asyInvk0.exceptionOccurred()) {
Assert.fail("asyInvk0 failed", asyInvk0.getException());
}
ThreadUtils.join(asyInvk1, 1000 * 1000);
if (asyInvk1.exceptionOccurred()) {
Assert.fail("asyInvk1 failed", asyInvk1.getException());
}
}
Aggregations