use of org.apache.geode.cache.util.CacheWriterAdapter in project geode by apache.
the class AttributesFactoryJUnitTest method testConnectionPool.
/**
* @since GemFire 5.7
*/
@Test
public void testConnectionPool() {
CacheLoader cl = new CacheLoader() {
public Object load(LoaderHelper helper) throws CacheLoaderException {
return null;
}
public void close() {
}
};
AttributesFactory factory = new AttributesFactory();
factory.setPoolName("mypool");
factory = new AttributesFactory();
factory.setCacheWriter(new CacheWriterAdapter());
factory.setPoolName("mypool");
factory = new AttributesFactory();
factory.setCacheLoader(cl);
factory.setPoolName("mypool");
}
use of org.apache.geode.cache.util.CacheWriterAdapter in project geode by apache.
the class PartitionedRegionHelper method getPRRoot.
/**
* Return a region that is the root for all PartitionedRegion meta data on this Node. The main
* administrative Regions contained within are <code>allPartitionedRegion</code> (Scope
* DISTRIBUTED_ACK) and <code>bucket2Node</code> (Scope DISTRIBUTED_ACK) and dataStore regions.
*
* @return a GLOBLAL scoped root region used for PartitionedRegion administration
*/
public static LocalRegion getPRRoot(final InternalCache cache, boolean createIfAbsent) {
DistributedRegion root = (DistributedRegion) cache.getRegion(PR_ROOT_REGION_NAME, true);
if (root == null) {
if (!createIfAbsent) {
return null;
}
if (logger.isDebugEnabled()) {
logger.debug("Creating root Partitioned Admin Region {}", PartitionedRegionHelper.PR_ROOT_REGION_NAME);
}
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
factory.addCacheListener(new FixedPartitionAttributesListener());
if (Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "PRDebug")) {
factory.addCacheListener(new CacheListenerAdapter() {
@Override
public void afterCreate(EntryEvent event) {
if (logger.isDebugEnabled()) {
logger.debug("Create Event for allPR: key = {} oldVal = {} newVal = {} Op = {} origin = {} isNetSearch = {}", event.getKey(), event.getOldValue(), event.getNewValue(), event.getOperation(), event.getDistributedMember(), event.getOperation().isNetSearch());
}
}
@Override
public void afterUpdate(EntryEvent event) {
if (logger.isDebugEnabled()) {
logger.debug("Update Event for allPR: key = {} oldVal = {} newVal = {} Op = {} origin = {} isNetSearch = {}", event.getKey(), event.getOldValue(), event.getNewValue(), event.getOperation(), event.getDistributedMember(), event.getOperation().isNetSearch());
}
}
@Override
public void afterDestroy(EntryEvent event) {
if (logger.isDebugEnabled()) {
logger.debug("Destroy Event for allPR: key = {} oldVal = {} newVal = {} Op = {} origin = {} isNetSearch = {}", event.getKey(), event.getOldValue(), event.getNewValue(), event.getOperation(), event.getDistributedMember(), event.getOperation().isNetSearch());
}
}
});
factory.setCacheWriter(new CacheWriterAdapter() {
@Override
public void beforeUpdate(EntryEvent event) throws CacheWriterException {
// the prConfig node list must advance (otherwise meta data becomes out of sync)
final PartitionRegionConfig newConf = (PartitionRegionConfig) event.getNewValue();
final PartitionRegionConfig oldConf = (PartitionRegionConfig) event.getOldValue();
if (newConf != oldConf && !newConf.isGreaterNodeListVersion(oldConf)) {
throw new CacheWriterException(LocalizedStrings.PartitionedRegionHelper_NEW_PARTITIONEDREGIONCONFIG_0_DOES_NOT_HAVE_NEWER_VERSION_THAN_PREVIOUS_1.toLocalizedString(new Object[] { newConf, oldConf }));
}
}
});
}
RegionAttributes ra = factory.create();
// Create anonymous stats holder for Partitioned Region meta data
final HasCachePerfStats prMetaStatsHolder = new HasCachePerfStats() {
public CachePerfStats getCachePerfStats() {
return new CachePerfStats(cache.getDistributedSystem(), "partitionMetaData");
}
};
try {
root = (DistributedRegion) cache.createVMRegion(PR_ROOT_REGION_NAME, ra, new InternalRegionArguments().setIsUsedForPartitionedRegionAdmin(true).setInternalRegion(true).setCachePerfStatsHolder(prMetaStatsHolder));
root.getDistributionAdvisor().addMembershipListener(new MemberFailureListener());
} catch (RegionExistsException ignore) {
// we avoid this before hand, but yet we have to catch it
root = (DistributedRegion) cache.getRegion(PR_ROOT_REGION_NAME, true);
} catch (IOException ieo) {
Assert.assertTrue(false, "IOException creating Partitioned Region root: " + ieo);
} catch (ClassNotFoundException cne) {
Assert.assertTrue(false, "ClassNotFoundExcpetion creating Partitioned Region root: " + cne);
}
}
Assert.assertTrue(root != null, "Can not obtain internal Partitioned Region configuration root");
return root;
}
use of org.apache.geode.cache.util.CacheWriterAdapter in project geode by apache.
the class DiskRegCbkChkJUnitTest method testAfterCallbacks.
@Test
public void testAfterCallbacks() {
region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, getDiskRegionProperties(), Scope.LOCAL);
// testing create callbacks
region.getAttributesMutator().setCacheListener(new CacheListenerAdapter() {
public void afterCreate(EntryEvent event) {
intoCreateAfterCbk = true;
}
});
region.getAttributesMutator().setCacheWriter(new CacheWriterAdapter() {
public void beforeCreate(EntryEvent event) {
region.clear();
}
});
region.create("key1", "createValue");
assertTrue("Create callback not called", intoCreateAfterCbk);
// testing update callbacks
region.getAttributesMutator().setCacheListener(new CacheListenerAdapter() {
public void afterUpdate(EntryEvent event) {
intoUpdateAfterCbk = true;
}
});
region.getAttributesMutator().setCacheWriter(new CacheWriterAdapter() {
public void beforeUpdate(EntryEvent event) {
region.clear();
}
});
region.create("key2", "createValue");
region.put("key2", "updateValue");
assertTrue("Update callback not called", intoUpdateAfterCbk);
// testing destroy callbacks
region.getAttributesMutator().setCacheListener(new CacheListenerAdapter() {
public void afterDestroy(EntryEvent event) {
intoDestroyAfterCbk = true;
}
});
region.getAttributesMutator().setCacheWriter(new CacheWriterAdapter() {
public void beforeDestroy(EntryEvent event) {
region.clear();
}
});
region.create("key3", "createValue");
region.destroy("key3");
assertTrue("Destroy callback not called", intoDestroyAfterCbk);
}
use of org.apache.geode.cache.util.CacheWriterAdapter in project geode by apache.
the class MapInterfaceJUnitTest method testBeforeRegionClearCallBack.
@Test
public void testBeforeRegionClearCallBack() {
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "");
DistributedSystem ds = DistributedSystem.connect(props);
Cache cache = null;
Region region = null;
AttributesFactory factory = null;
try {
cache = CacheFactory.create(ds);
factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setCacheWriter(new CacheWriterAdapter() {
@Override
public void beforeRegionClear(RegionEvent event) throws CacheWriterException {
synchronized (this) {
this.notify();
MapInterfaceJUnitTest.this.hasBeenNotified = true;
}
}
});
region = cache.createRegion("testingRegion", factory.create());
DoesClear doesClear = new DoesClear(region);
new Thread(doesClear).start();
synchronized (this) {
if (!this.hasBeenNotified) {
this.wait(3000);
}
}
if (!this.hasBeenNotified) {
fail(" beforeRegionClear call back did not come");
}
} catch (Exception e) {
throw new AssertionError(" failed due to ", e);
}
for (int i = 0; i < 100; i++) {
region.put(new Integer(i), new Integer(i));
}
assertEquals(new Integer(50), region.get(new Integer(50)));
region.localClear();
assertEquals(null, region.get(new Integer(50)));
region.close();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
try {
region = cache.createRegion("testingRegion", factory.create());
} catch (Exception e) {
throw new AssertionError(" failed in creating region due to ", e);
}
boolean exceptionOccurred = false;
try {
region.localClear();
} catch (UnsupportedOperationException e) {
exceptionOccurred = true;
}
if (!exceptionOccurred) {
fail(" exception did not occur when it was supposed to occur");
}
region.close();
cache.close();
ds.disconnect();
}
use of org.apache.geode.cache.util.CacheWriterAdapter in project geode by apache.
the class EventIDVerificationDUnitTest method createClientCache.
public static void createClientCache(String host, Integer port1, Integer port2) throws Exception {
PORT1 = port1.intValue();
PORT2 = port2.intValue();
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "");
new EventIDVerificationDUnitTest().createCache(props);
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setMirrorType(MirrorType.NONE);
ClientServerTestCase.configureConnectionPool(factory, host, new int[] { PORT1, PORT2 }, true, -1, 2, null, -1, -1, false, -2);
CacheWriter writer = new CacheWriterAdapter() {
public void beforeCreate(EntryEvent event) {
EventID eventId = ((EntryEventImpl) event).getEventId();
vm0.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
vm1.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
try {
super.beforeCreate(event);
} catch (CacheWriterException e) {
e.printStackTrace();
fail("Test failed bcoz of exception =" + e);
}
}
public void beforeUpdate(EntryEvent event) {
EventID eventId = ((EntryEventImpl) event).getEventId();
vm0.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
vm1.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
try {
super.beforeUpdate(event);
} catch (CacheWriterException e) {
e.printStackTrace();
fail("Test failed bcoz of exception =" + e);
}
}
public void beforeDestroy(EntryEvent event) {
EventID eventId = ((EntryEventImpl) event).getEventId();
vm0.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
vm1.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
try {
super.beforeDestroy(event);
} catch (CacheWriterException e) {
e.printStackTrace();
fail("Test failed bcoz of exception =" + e);
}
}
public void beforeRegionDestroy(RegionEvent event) {
EventID eventId = ((RegionEventImpl) event).getEventId();
vm0.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
vm1.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
try {
super.beforeRegionDestroy(event);
} catch (CacheWriterException e) {
e.printStackTrace();
fail("Test failed bcoz of exception =" + e);
}
}
public void beforeRegionClear(RegionEvent event) {
EventID eventId = ((RegionEventImpl) event).getEventId();
vm0.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
vm1.invoke(() -> EventIDVerificationDUnitTest.setEventIDData(eventId));
try {
super.beforeRegionClear(event);
} catch (CacheWriterException e) {
e.printStackTrace();
fail("Test failed bcoz of exception =" + e);
}
}
};
factory.setCacheWriter(writer);
/*
* factory.setCacheListener(new CacheListenerAdapter() { public void afterCreate(EntryEvent
* event) { synchronized (this) { threadId = ((EntryEventImpl)event).getEventId().getThreadID();
* membershipId = ((EntryEventImpl)event).getEventId().getMembershipID(); } }
*
* public void afterUpdate(EntryEvent event) { synchronized (this) { verifyEventIDs(event); } }
*
* public void afterDestroy(EntryEvent event) { synchronized (this) { verifyEventIDs(event); } }
* public void afterRegionDestroy(RegionEvent event) { synchronized (this) { threadId =
* ((RegionEventImpl)event).getEventId().getThreadID(); membershipId =
* ((RegionEventImpl)event).getEventId().getMembershipID(); } } });
*/
RegionAttributes attrs = factory.create();
Region r = cache.createRegion(REGION_NAME, attrs);
r.registerInterest("ALL_KEYS");
}
Aggregations