use of org.apache.geode.cache.util.CacheListenerAdapter in project geode by apache.
the class Bug34948DUnitTest method doCreateOtherVm.
private void doCreateOtherVm() {
VM vm = getOtherVm();
vm.invoke(new CacheSerializableRunnable("create root") {
public void run2() throws CacheException {
getSystem();
AttributesFactory af = new AttributesFactory();
af.setScope(Scope.DISTRIBUTED_ACK);
af.setDataPolicy(DataPolicy.PRELOADED);
CacheListener cl = new CacheListenerAdapter() {
public void afterCreate(EntryEvent event) {
// getLogWriter().info("afterCreate " + event.getKey());
if (event.getCallbackArgument() != null) {
lastCallback = event.getCallbackArgument();
}
}
public void afterUpdate(EntryEvent event) {
// getLogWriter().info("afterUpdate " + event.getKey());
if (event.getCallbackArgument() != null) {
lastCallback = event.getCallbackArgument();
}
}
public void afterInvalidate(EntryEvent event) {
if (event.getCallbackArgument() != null) {
lastCallback = event.getCallbackArgument();
}
}
public void afterDestroy(EntryEvent event) {
if (event.getCallbackArgument() != null) {
lastCallback = event.getCallbackArgument();
}
}
};
af.setCacheListener(cl);
createRootRegion("bug34948", af.create());
}
});
}
use of org.apache.geode.cache.util.CacheListenerAdapter in project geode by apache.
the class Bug35214DUnitTest method testNoEntryExpireDuringGII.
/**
* make sure entries do not expire during a GII
*/
@Test
public void testNoEntryExpireDuringGII() throws Exception {
initOtherVm();
AsyncInvocation updater = null;
try {
updater = updateOtherVm();
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable e1) {
Assert.fail("failed due to " + e1, e1);
}
System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 30;
callbackFailure = false;
try {
AttributesFactory af = new AttributesFactory();
af.setDataPolicy(DataPolicy.REPLICATE);
af.setScope(Scope.DISTRIBUTED_ACK);
af.setStatisticsEnabled(true);
af.setEntryIdleTimeout(new ExpirationAttributes(1, ExpirationAction.INVALIDATE));
CacheListener cl1 = new CacheListenerAdapter() {
public void afterRegionCreate(RegionEvent re) {
afterRegionCreateSeen = true;
}
public void afterInvalidate(EntryEvent e) {
callbackAssertTrue("afterregionCreate not seen", afterRegionCreateSeen);
// make sure region is initialized
callbackAssertTrue("not initialized", ((LocalRegion) e.getRegion()).isInitialized());
expirationCount++;
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
}
};
af.addCacheListener(cl1);
final Region r1 = createRootRegion("r1", af.create());
ThreadUtils.join(updater, 60 * 1000);
WaitCriterion ev = new WaitCriterion() {
public boolean done() {
return r1.values().size() == 0;
}
public String description() {
return "region never became empty";
}
};
Wait.waitForCriterion(ev, 2 * 1000, 200, true);
{
assertEquals(0, r1.values().size());
assertEquals(ENTRY_COUNT, r1.keySet().size());
}
} finally {
org.apache.geode.internal.cache.InitialImageOperation.slowImageProcessing = 0;
System.getProperties().remove(LocalRegion.EXPIRY_MS_PROPERTY);
assertEquals(null, System.getProperty(LocalRegion.EXPIRY_MS_PROPERTY));
}
assertFalse("Errors in callbacks; check logs for details", callbackFailure);
}
use of org.apache.geode.cache.util.CacheListenerAdapter in project geode by apache.
the class TXExpiryJUnitTest method generalEntryExpirationTest.
public void generalEntryExpirationTest(final Region<String, String> exprReg, ExpirationAttributes exprAtt, boolean useTTL) throws CacheException {
final LocalRegion lr = (LocalRegion) exprReg;
final boolean[] wasDestroyed = { false };
AttributesMutator<String, String> mutator = exprReg.getAttributesMutator();
final AtomicInteger ac = new AtomicInteger();
final AtomicInteger au = new AtomicInteger();
final AtomicInteger ai = new AtomicInteger();
final AtomicInteger ad = new AtomicInteger();
if (useTTL) {
mutator.setEntryTimeToLive(exprAtt);
} else {
mutator.setEntryIdleTimeout(exprAtt);
}
final CacheListener<String, String> cl = new CacheListenerAdapter<String, String>() {
public void afterCreate(EntryEvent<String, String> e) {
ac.incrementAndGet();
}
public void afterUpdate(EntryEvent<String, String> e) {
au.incrementAndGet();
}
public void afterInvalidate(EntryEvent<String, String> e) {
ai.incrementAndGet();
}
public void afterDestroy(EntryEvent<String, String> e) {
ad.incrementAndGet();
if (e.getKey().equals("key0")) {
synchronized (wasDestroyed) {
wasDestroyed[0] = true;
wasDestroyed.notifyAll();
}
}
}
public void afterRegionInvalidate(RegionEvent<String, String> event) {
fail("Unexpected invocation of afterRegionInvalidate");
}
public void afterRegionDestroy(RegionEvent<String, String> event) {
if (!event.getOperation().isClose()) {
fail("Unexpected invocation of afterRegionDestroy");
}
}
};
mutator.addCacheListener(cl);
try {
ExpiryTask.suspendExpiration();
// Test to ensure an expiration does not cause a conflict
for (int i = 0; i < 2; i++) {
exprReg.put("key" + i, "value" + i);
}
this.txMgr.begin();
exprReg.put("key0", "value");
waitForEntryExpiration(lr, "key0");
assertEquals("value", exprReg.getEntry("key0").getValue());
try {
ExpiryTask.suspendExpiration();
this.txMgr.commit();
} catch (CommitConflictException error) {
fail("Expiration should not cause commit to fail");
}
assertEquals("value", exprReg.getEntry("key0").getValue());
waitForEntryExpiration(lr, "key0");
synchronized (wasDestroyed) {
assertEquals(true, wasDestroyed[0]);
}
assertTrue(!exprReg.containsKey("key0"));
// key1 is the canary for the rest of the entries
waitForEntryToBeDestroyed(exprReg, "key1");
// rollback and failed commit test, ensure expiration continues
for (int j = 0; j < 2; j++) {
synchronized (wasDestroyed) {
wasDestroyed[0] = false;
}
ExpiryTask.suspendExpiration();
for (int i = 0; i < 2; i++) {
exprReg.put("key" + i, "value" + i);
}
this.txMgr.begin();
exprReg.put("key0", "value");
waitForEntryExpiration(lr, "key0");
assertEquals("value", exprReg.getEntry("key0").getValue());
String checkVal;
ExpiryTask.suspendExpiration();
if (j == 0) {
checkVal = "value0";
this.txMgr.rollback();
} else {
checkVal = "conflictVal";
final TXManagerImpl txMgrImpl = (TXManagerImpl) this.txMgr;
TXStateProxy tx = txMgrImpl.internalSuspend();
exprReg.put("key0", checkVal);
txMgrImpl.internalResume(tx);
try {
this.txMgr.commit();
fail("Expected CommitConflictException!");
} catch (CommitConflictException expected) {
}
}
waitForEntryExpiration(lr, "key0");
synchronized (wasDestroyed) {
assertEquals(true, wasDestroyed[0]);
}
assertTrue(!exprReg.containsKey("key0"));
// key1 is the canary for the rest of the entries
waitForEntryToBeDestroyed(exprReg, "key1");
}
} finally {
mutator.removeCacheListener(cl);
ExpiryTask.permitExpiration();
}
}
use of org.apache.geode.cache.util.CacheListenerAdapter in project geode by apache.
the class CopyJUnitTest method createCache.
private void createCache(boolean copyOnRead) throws CacheException {
Properties p = new Properties();
// loner
p.setProperty(MCAST_PORT, "0");
this.cache = CacheFactory.create(DistributedSystem.connect(p));
this.cache.setCopyOnRead(copyOnRead);
AttributesFactory af = new AttributesFactory();
af.setScope(Scope.LOCAL);
af.setCacheListener(new CacheListenerAdapter() {
public void afterCreate(EntryEvent event) {
oldValue = event.getOldValue();
newValue = event.getNewValue();
}
public void afterUpdate(EntryEvent event) {
oldValue = event.getOldValue();
newValue = event.getNewValue();
}
public void afterInvalidate(EntryEvent event) {
oldValue = event.getOldValue();
newValue = event.getNewValue();
}
public void afterDestroy(EntryEvent event) {
oldValue = event.getOldValue();
newValue = event.getNewValue();
}
public void afterRegionInvalidate(RegionEvent event) {
// ignore
}
public void afterRegionDestroy(RegionEvent event) {
// ignore
}
public void close() {
oldValue = null;
newValue = null;
}
});
this.region = this.cache.createRegion("CopyJUnitTest", af.create());
}
use of org.apache.geode.cache.util.CacheListenerAdapter in project geode by apache.
the class PartitionedRegionHelper method getPRRoot.
/**
* Return a region that is the root for all PartitionedRegion meta data on this Node. The main
* administrative Regions contained within are <code>allPartitionedRegion</code> (Scope
* DISTRIBUTED_ACK) and <code>bucket2Node</code> (Scope DISTRIBUTED_ACK) and dataStore regions.
*
* @return a GLOBLAL scoped root region used for PartitionedRegion administration
*/
public static LocalRegion getPRRoot(final InternalCache cache, boolean createIfAbsent) {
DistributedRegion root = (DistributedRegion) cache.getRegion(PR_ROOT_REGION_NAME, true);
if (root == null) {
if (!createIfAbsent) {
return null;
}
if (logger.isDebugEnabled()) {
logger.debug("Creating root Partitioned Admin Region {}", PartitionedRegionHelper.PR_ROOT_REGION_NAME);
}
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
factory.addCacheListener(new FixedPartitionAttributesListener());
if (Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "PRDebug")) {
factory.addCacheListener(new CacheListenerAdapter() {
@Override
public void afterCreate(EntryEvent event) {
if (logger.isDebugEnabled()) {
logger.debug("Create Event for allPR: key = {} oldVal = {} newVal = {} Op = {} origin = {} isNetSearch = {}", event.getKey(), event.getOldValue(), event.getNewValue(), event.getOperation(), event.getDistributedMember(), event.getOperation().isNetSearch());
}
}
@Override
public void afterUpdate(EntryEvent event) {
if (logger.isDebugEnabled()) {
logger.debug("Update Event for allPR: key = {} oldVal = {} newVal = {} Op = {} origin = {} isNetSearch = {}", event.getKey(), event.getOldValue(), event.getNewValue(), event.getOperation(), event.getDistributedMember(), event.getOperation().isNetSearch());
}
}
@Override
public void afterDestroy(EntryEvent event) {
if (logger.isDebugEnabled()) {
logger.debug("Destroy Event for allPR: key = {} oldVal = {} newVal = {} Op = {} origin = {} isNetSearch = {}", event.getKey(), event.getOldValue(), event.getNewValue(), event.getOperation(), event.getDistributedMember(), event.getOperation().isNetSearch());
}
}
});
factory.setCacheWriter(new CacheWriterAdapter() {
@Override
public void beforeUpdate(EntryEvent event) throws CacheWriterException {
// the prConfig node list must advance (otherwise meta data becomes out of sync)
final PartitionRegionConfig newConf = (PartitionRegionConfig) event.getNewValue();
final PartitionRegionConfig oldConf = (PartitionRegionConfig) event.getOldValue();
if (newConf != oldConf && !newConf.isGreaterNodeListVersion(oldConf)) {
throw new CacheWriterException(LocalizedStrings.PartitionedRegionHelper_NEW_PARTITIONEDREGIONCONFIG_0_DOES_NOT_HAVE_NEWER_VERSION_THAN_PREVIOUS_1.toLocalizedString(new Object[] { newConf, oldConf }));
}
}
});
}
RegionAttributes ra = factory.create();
// Create anonymous stats holder for Partitioned Region meta data
final HasCachePerfStats prMetaStatsHolder = new HasCachePerfStats() {
public CachePerfStats getCachePerfStats() {
return new CachePerfStats(cache.getDistributedSystem(), "partitionMetaData");
}
};
try {
root = (DistributedRegion) cache.createVMRegion(PR_ROOT_REGION_NAME, ra, new InternalRegionArguments().setIsUsedForPartitionedRegionAdmin(true).setInternalRegion(true).setCachePerfStatsHolder(prMetaStatsHolder));
root.getDistributionAdvisor().addMembershipListener(new MemberFailureListener());
} catch (RegionExistsException ignore) {
// we avoid this before hand, but yet we have to catch it
root = (DistributedRegion) cache.getRegion(PR_ROOT_REGION_NAME, true);
} catch (IOException ieo) {
Assert.assertTrue(false, "IOException creating Partitioned Region root: " + ieo);
} catch (ClassNotFoundException cne) {
Assert.assertTrue(false, "ClassNotFoundExcpetion creating Partitioned Region root: " + cne);
}
}
Assert.assertTrue(root != null, "Can not obtain internal Partitioned Region configuration root");
return root;
}
Aggregations