use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class TXJUnitTest method testTxEvent.
@Test
public void testTxEvent() throws CacheException {
TransactionId myTxId;
Region<String, String> reg1 = this.region;
this.txMgr.setListener(new TransactionListener() {
@Override
public void afterCommit(TransactionEvent event) {
listenerAfterCommit = 1;
te = event;
}
@Override
public void afterFailedCommit(TransactionEvent event) {
listenerAfterFailedCommit = 1;
te = event;
}
@Override
public void afterRollback(TransactionEvent event) {
listenerAfterRollback = 1;
te = event;
}
@Override
public void close() {
listenerClose = 1;
}
});
// make sure each operation has the correct transaction event
// check create
this.txMgr.begin();
myTxId = this.txMgr.getTransactionId();
reg1.create("key1", "value1");
this.txMgr.rollback();
assertEquals(1, this.te.getEvents().size());
assertEquals(0, this.te.getPutEvents().size());
assertEquals(0, this.te.getInvalidateEvents().size());
assertEquals(0, this.te.getDestroyEvents().size());
{
Cache teCache = this.te.getCache();
assertEquals(teCache, this.cache);
List<EntryEvent<?, ?>> creates = this.te.getCreateEvents();
assertEquals(myTxId, this.te.getTransactionId());
assertEquals(1, creates.size());
for (EntryEvent ev : creates) {
assertEquals(myTxId, ev.getTransactionId());
assertTrue(ev.getRegion() == reg1);
assertEquals("key1", ev.getKey());
assertEquals("value1", ev.getNewValue());
assertEquals(null, ev.getOldValue());
verifyEventProps(ev);
assertEquals(null, ev.getCallbackArgument());
assertEquals(true, ev.isCallbackArgumentAvailable());
assertTrue(!ev.isOriginRemote());
assertTrue(!ev.getOperation().isExpiration());
assertTrue(ev.getOperation().isDistributed());
}
}
// check put of existing entry
reg1.create("key1", "value0");
this.txMgr.begin();
myTxId = this.txMgr.getTransactionId();
reg1.put("key1", "value1");
this.txMgr.rollback();
assertEquals(1, this.te.getEvents().size());
assertEquals(0, this.te.getCreateEvents().size());
assertEquals(0, this.te.getInvalidateEvents().size());
assertEquals(0, this.te.getDestroyEvents().size());
{
Cache teCache = this.te.getCache();
assertEquals(teCache, this.cache);
List<EntryEvent<?, ?>> creates = this.te.getPutEvents();
assertEquals(myTxId, this.te.getTransactionId());
assertEquals(1, creates.size());
for (EntryEvent ev : creates) {
assertEquals(myTxId, ev.getTransactionId());
assertTrue(ev.getRegion() == reg1);
assertEquals("key1", ev.getKey());
assertEquals("value1", ev.getNewValue());
assertEquals("value0", ev.getOldValue());
verifyEventProps(ev);
assertEquals(null, ev.getCallbackArgument());
assertEquals(true, ev.isCallbackArgumentAvailable());
assertTrue(!ev.isOriginRemote());
assertTrue(!ev.getOperation().isExpiration());
assertTrue(ev.getOperation().isDistributed());
}
}
reg1.localDestroy("key1");
// check put of non-existent entry
this.txMgr.begin();
myTxId = this.txMgr.getTransactionId();
reg1.put("key1", "value0");
this.txMgr.rollback();
assertEquals(1, this.te.getEvents().size());
assertEquals(0, this.te.getPutEvents().size());
assertEquals(0, this.te.getInvalidateEvents().size());
assertEquals(0, this.te.getDestroyEvents().size());
{
Cache teCache = this.te.getCache();
assertEquals(teCache, this.cache);
List<EntryEvent<?, ?>> creates = this.te.getCreateEvents();
assertEquals(myTxId, this.te.getTransactionId());
assertEquals(1, creates.size());
for (EntryEvent ev : creates) {
assertEquals(myTxId, ev.getTransactionId());
assertTrue(ev.getRegion() == reg1);
assertEquals("key1", ev.getKey());
assertEquals("value0", ev.getNewValue());
assertEquals(null, ev.getOldValue());
verifyEventProps(ev);
assertEquals(null, ev.getCallbackArgument());
assertEquals(true, ev.isCallbackArgumentAvailable());
assertTrue(!ev.isOriginRemote());
assertTrue(!ev.getOperation().isExpiration());
assertTrue(ev.getOperation().isDistributed());
}
}
// check d invalidate of existing entry
reg1.create("key1", "value0");
this.txMgr.begin();
myTxId = this.txMgr.getTransactionId();
reg1.invalidate("key1");
this.txMgr.rollback();
assertEquals(1, this.te.getEvents().size());
assertEquals(0, this.te.getCreateEvents().size());
assertEquals(0, this.te.getPutEvents().size());
assertEquals(0, this.te.getDestroyEvents().size());
{
Cache teCache = this.te.getCache();
assertEquals(teCache, this.cache);
List<EntryEvent<?, ?>> creates = this.te.getInvalidateEvents();
assertEquals(myTxId, this.te.getTransactionId());
assertEquals(1, creates.size());
for (EntryEvent ev : creates) {
assertEquals(myTxId, ev.getTransactionId());
assertTrue(ev.getRegion() == reg1);
assertEquals("key1", ev.getKey());
assertEquals(null, ev.getNewValue());
assertEquals("value0", ev.getOldValue());
verifyEventProps(ev);
assertEquals(null, ev.getCallbackArgument());
assertEquals(true, ev.isCallbackArgumentAvailable());
assertTrue(!ev.isOriginRemote());
assertTrue(!ev.getOperation().isExpiration());
assertTrue(ev.getOperation().isDistributed());
}
}
reg1.localDestroy("key1");
// check l invalidate of existing entry
reg1.create("key1", "value0");
this.txMgr.begin();
myTxId = this.txMgr.getTransactionId();
reg1.localInvalidate("key1");
this.txMgr.rollback();
assertEquals(1, this.te.getEvents().size());
assertEquals(0, this.te.getCreateEvents().size());
assertEquals(0, this.te.getPutEvents().size());
assertEquals(0, this.te.getDestroyEvents().size());
{
Cache teCache = this.te.getCache();
assertEquals(teCache, this.cache);
List<EntryEvent<?, ?>> creates = this.te.getInvalidateEvents();
assertEquals(myTxId, this.te.getTransactionId());
assertEquals(1, creates.size());
for (EntryEvent ev : creates) {
assertEquals(myTxId, ev.getTransactionId());
assertTrue(ev.getRegion() == reg1);
assertEquals("key1", ev.getKey());
assertEquals(null, ev.getNewValue());
assertEquals("value0", ev.getOldValue());
verifyEventProps(ev);
assertEquals(null, ev.getCallbackArgument());
assertEquals(true, ev.isCallbackArgumentAvailable());
assertTrue(!ev.isOriginRemote());
assertTrue(!ev.getOperation().isExpiration());
if (!isPR())
assertTrue(!ev.getOperation().isDistributed());
}
}
reg1.localDestroy("key1");
// check d destroy of existing entry
reg1.create("key1", "value0");
this.txMgr.begin();
myTxId = this.txMgr.getTransactionId();
reg1.destroy("key1");
this.txMgr.rollback();
assertEquals(1, this.te.getEvents().size());
assertEquals(0, this.te.getCreateEvents().size());
assertEquals(0, this.te.getPutEvents().size());
assertEquals(0, this.te.getInvalidateEvents().size());
{
Cache teCache = this.te.getCache();
assertEquals(teCache, this.cache);
List<EntryEvent<?, ?>> creates = this.te.getDestroyEvents();
assertEquals(myTxId, this.te.getTransactionId());
assertEquals(1, creates.size());
for (EntryEvent ev : creates) {
assertEquals(myTxId, ev.getTransactionId());
assertTrue(ev.getRegion() == reg1);
assertEquals("key1", ev.getKey());
assertEquals(null, ev.getNewValue());
assertEquals("value0", ev.getOldValue());
verifyEventProps(ev);
assertEquals(null, ev.getCallbackArgument());
assertEquals(true, ev.isCallbackArgumentAvailable());
assertTrue(!ev.isOriginRemote());
assertTrue(!ev.getOperation().isExpiration());
assertTrue(ev.getOperation().isDistributed());
}
}
reg1.localDestroy("key1");
// check l destroy of existing entry
reg1.create("key1", "value0");
this.txMgr.begin();
myTxId = this.txMgr.getTransactionId();
reg1.localDestroy("key1");
this.txMgr.rollback();
assertEquals(1, this.te.getEvents().size());
assertEquals(0, this.te.getCreateEvents().size());
assertEquals(0, this.te.getPutEvents().size());
assertEquals(0, this.te.getInvalidateEvents().size());
{
Cache teCache = this.te.getCache();
assertEquals(teCache, this.cache);
List<EntryEvent<?, ?>> creates = this.te.getDestroyEvents();
assertEquals(myTxId, this.te.getTransactionId());
assertEquals(1, creates.size());
for (EntryEvent ev : creates) {
assertEquals(myTxId, ev.getTransactionId());
assertTrue(ev.getRegion() == reg1);
assertEquals("key1", ev.getKey());
assertEquals(null, ev.getNewValue());
assertEquals("value0", ev.getOldValue());
verifyEventProps(ev);
assertEquals(null, ev.getCallbackArgument());
assertEquals(true, ev.isCallbackArgumentAvailable());
assertTrue(!ev.isOriginRemote());
assertTrue(!ev.getOperation().isExpiration());
if (!isPR())
assertTrue(!ev.getOperation().isDistributed());
}
}
reg1.localDestroy("key1");
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class DiskRegionJUnitTest method testClearInteractionWithLRUList_Bug37605.
/**
* If an entry which has just been written on the disk, sees clear just before updating the
* LRULiist, then that deleted entry should not go into the LRUList
*/
@Test
public void testClearInteractionWithLRUList_Bug37605() throws Exception {
DiskRegionProperties props = new DiskRegionProperties();
props.setOverflow(true);
props.setOverFlowCapacity(1);
props.setDiskDirs(dirs);
props.setRegionName("IGNORE_EXCEPTION_testClearInteractionWithLRUList_Bug37605");
final Region region = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache, props);
final Thread th = new Thread(new Runnable() {
public void run() {
region.clear();
}
});
region.getAttributesMutator().setCacheListener(new CacheListenerAdapter() {
public void afterCreate(EntryEvent event) {
th.start();
}
});
region.create("key1", "value1");
try {
cache.getLogger().info("waiting for clear to finish");
ThreadUtils.join(th, 30 * 1000);
} catch (Exception ie) {
DiskRegionJUnitTest.this.exceptionOccurred = true;
DiskRegionJUnitTest.this.failureCause = ie.toString();
}
assertFalse(this.failureCause, this.exceptionOccurred);
NewLRUClockHand lruList = ((VMLRURegionMap) ((LocalRegion) region).entries)._getLruList();
assertEquals(region.size(), 0);
lruList.audit();
assertNull("The LRU List should have been empty instead it contained a cleared entry", lruList.getLRUEntry());
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class EventIDVerificationInP2PDUnitTest method createServerCache.
public static void createServerCache(Integer type) throws Exception {
new EventIDVerificationInP2PDUnitTest().createCache(new Properties());
AttributesFactory factory = new AttributesFactory();
if (type.intValue() == DISTRIBUTED_ACK)
factory.setScope(Scope.DISTRIBUTED_ACK);
if (type.intValue() == GLOBAL)
factory.setScope(Scope.GLOBAL);
else
factory.setScope(Scope.DISTRIBUTED_NO_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
factory.addCacheListener(new CacheListenerAdapter() {
public void afterCreate(EntryEvent event) {
eventId = ((InternalCacheEvent) event).getEventId();
if (receiver) {
synchronized (EventIDVerificationInP2PDUnitTest.class) {
gotCallback = true;
EventIDVerificationInP2PDUnitTest.class.notify();
}
}
}
public void afterUpdate(EntryEvent event) {
eventId = ((InternalCacheEvent) event).getEventId();
if (receiver) {
synchronized (EventIDVerificationInP2PDUnitTest.class) {
gotCallback = true;
EventIDVerificationInP2PDUnitTest.class.notify();
}
}
}
public void afterDestroy(EntryEvent event) {
eventId = ((InternalCacheEvent) event).getEventId();
if (receiver) {
synchronized (EventIDVerificationInP2PDUnitTest.class) {
gotCallback = true;
EventIDVerificationInP2PDUnitTest.class.notify();
}
}
}
public void afterRegionDestroy(RegionEvent event) {
eventId = ((InternalCacheEvent) event).getEventId();
if (receiver) {
synchronized (EventIDVerificationInP2PDUnitTest.class) {
gotCallback = true;
EventIDVerificationInP2PDUnitTest.class.notify();
}
}
}
public void afterRegionInvalidate(RegionEvent event) {
eventId = ((InternalCacheEvent) event).getEventId();
if (receiver) {
synchronized (EventIDVerificationInP2PDUnitTest.class) {
gotCallback = true;
EventIDVerificationInP2PDUnitTest.class.notify();
}
}
}
});
RegionAttributes attrs = factory.create();
cache.createRegion(REGION_NAME, attrs);
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class CqServiceImpl method processEntryEvent.
private void processEntryEvent(CacheEvent event, Profile localProfile, Profile[] profiles, FilterRoutingInfo frInfo) throws CqException {
final boolean isDebugEnabled = logger.isDebugEnabled();
HashSet<Object> cqUnfilteredEventsSet_newValue = new HashSet<>();
HashSet<Object> cqUnfilteredEventsSet_oldValue = new HashSet<>();
boolean b_cqResults_newValue;
boolean b_cqResults_oldValue;
boolean queryOldValue;
EntryEvent entryEvent = (EntryEvent) event;
Object eventKey = entryEvent.getKey();
boolean isDupEvent = ((EntryEventImpl) event).isPossibleDuplicate();
// The CQ query needs to be applied when the op is update, destroy
// invalidate and in case when op is create and its an duplicate
// event, the reason for this is when peer sends a duplicate event
// it marks it as create and sends it, so that the receiving node
// applies it (see DR.virtualPut()).
boolean opRequiringQueryOnOldValue = (event.getOperation().isUpdate() || event.getOperation().isDestroy() || event.getOperation().isInvalidate() || (event.getOperation().isCreate() && isDupEvent));
HashMap<String, Integer> matchedCqs = new HashMap<>();
long executionStartTime;
for (int i = -1; i < profiles.length; i++) {
CacheProfile cf;
if (i < 0) {
cf = (CacheProfile) localProfile;
if (cf == null)
continue;
} else {
cf = (CacheProfile) profiles[i];
}
FilterProfile pf = cf.filterProfile;
if (pf == null || pf.getCqMap().isEmpty()) {
continue;
}
Map cqs = pf.getCqMap();
if (isDebugEnabled) {
logger.debug("Profile for {} processing {} CQs", cf.peerMemberId, cqs.size());
}
if (cqs.isEmpty()) {
continue;
}
// Get new value. If its not retrieved.
if (cqUnfilteredEventsSet_newValue.isEmpty() && (event.getOperation().isCreate() || event.getOperation().isUpdate())) {
Object newValue = entryEvent.getNewValue();
if (newValue != null) {
// We have a new value to run the query on
cqUnfilteredEventsSet_newValue.add(newValue);
}
}
HashMap<Long, Integer> cqInfo = new HashMap<>();
Iterator cqIter = cqs.entrySet().iterator();
while (cqIter.hasNext()) {
Map.Entry cqEntry = (Map.Entry) cqIter.next();
ServerCQImpl cQuery = (ServerCQImpl) cqEntry.getValue();
b_cqResults_newValue = false;
b_cqResults_oldValue = false;
queryOldValue = false;
if (cQuery == null) {
continue;
}
String cqName = cQuery.getServerCqName();
Long filterID = cQuery.getFilterID();
if (isDebugEnabled) {
logger.debug("Processing CQ : {} Key: {}", cqName, eventKey);
}
Integer cqEvent = null;
if (matchedCqs.containsKey(cqName)) {
cqEvent = matchedCqs.get(cqName);
if (isDebugEnabled) {
logger.debug("query {} has already been processed and returned {}", cqName, cqEvent);
}
if (cqEvent == null) {
continue;
}
// Update the Cache Results for this CQ.
if (cqEvent.intValue() == MessageType.LOCAL_CREATE || cqEvent.intValue() == MessageType.LOCAL_UPDATE) {
cQuery.addToCqResultKeys(eventKey);
} else if (cqEvent.intValue() == MessageType.LOCAL_DESTROY) {
cQuery.markAsDestroyedInCqResultKeys(eventKey);
}
} else {
boolean error = false;
{
try {
synchronized (cQuery) {
// Apply query on new value.
if (!cqUnfilteredEventsSet_newValue.isEmpty()) {
executionStartTime = this.stats.startCqQueryExecution();
b_cqResults_newValue = evaluateQuery(cQuery, new Object[] { cqUnfilteredEventsSet_newValue });
this.stats.endCqQueryExecution(executionStartTime);
}
}
// Apply query on oldValue.
if (opRequiringQueryOnOldValue) {
// with PR region.
if (cQuery.cqResultKeysInitialized) {
b_cqResults_oldValue = cQuery.isPartOfCqResult(eventKey);
// Also apply if the query was not executed during cq execute
if ((cQuery.isPR || !CqServiceImpl.EXECUTE_QUERY_DURING_INIT) && b_cqResults_oldValue == false) {
queryOldValue = true;
}
if (isDebugEnabled && !cQuery.isPR && !b_cqResults_oldValue) {
logger.debug("Event Key not found in the CQ Result Queue. EventKey : {} CQ Name : {}", eventKey, cqName);
}
} else {
queryOldValue = true;
}
if (queryOldValue) {
if (cqUnfilteredEventsSet_oldValue.isEmpty()) {
Object oldValue = entryEvent.getOldValue();
if (oldValue != null) {
cqUnfilteredEventsSet_oldValue.add(oldValue);
}
}
synchronized (cQuery) {
// Apply query on old value.
if (!cqUnfilteredEventsSet_oldValue.isEmpty()) {
executionStartTime = this.stats.startCqQueryExecution();
b_cqResults_oldValue = evaluateQuery(cQuery, new Object[] { cqUnfilteredEventsSet_oldValue });
this.stats.endCqQueryExecution(executionStartTime);
} else {
if (isDebugEnabled) {
logger.debug("old value for event with key {} is null - query execution not performed", eventKey);
}
}
}
}
// Query oldValue
}
} catch (Exception ex) {
// Any exception in running the query should be caught here and
// buried because this code is running in-line with the message
// processing code and we don't want to kill that thread
error = true;
// CHANGE LOG MESSAGE:
logger.info(LocalizedMessage.create(LocalizedStrings.CqService_ERROR_WHILE_PROCESSING_CQ_ON_THE_EVENT_KEY_0_CQNAME_1_ERROR_2, new Object[] { ((EntryEvent) event).getKey(), cQuery.getName(), ex.getLocalizedMessage() }));
}
if (error) {
cqEvent = MESSAGE_TYPE_EXCEPTION;
} else {
if (b_cqResults_newValue) {
if (b_cqResults_oldValue) {
cqEvent = MESSAGE_TYPE_LOCAL_UPDATE;
} else {
cqEvent = MESSAGE_TYPE_LOCAL_CREATE;
}
// If its create and caching is enabled, cache the key
// for this CQ.
cQuery.addToCqResultKeys(eventKey);
} else if (b_cqResults_oldValue) {
// Base invalidate operation is treated as destroy.
// When the invalidate comes through, the entry will no longer
// satisfy the query and will need to be deleted.
cqEvent = MESSAGE_TYPE_LOCAL_DESTROY;
// If caching is enabled, mark this event's key as removed
// from the CQ cache.
cQuery.markAsDestroyedInCqResultKeys(eventKey);
}
}
}
// Get the matching CQs if any.
// synchronized (this.matchingCqMap){
String query = cQuery.getQueryString();
Set matchingCqs = (Set) matchingCqMap.get(query);
if (matchingCqs != null) {
Iterator iter = matchingCqs.iterator();
while (iter.hasNext()) {
String matchingCqName = (String) iter.next();
if (!matchingCqName.equals(cqName)) {
matchedCqs.put(matchingCqName, cqEvent);
if (isDebugEnabled) {
logger.debug("Adding CQ into Matching CQ Map: {} Event is: {}", matchingCqName, cqEvent);
}
}
}
}
}
if (cqEvent != null && cQuery.isRunning()) {
if (isDebugEnabled) {
logger.debug("Added event to CQ with client-side name: {} key: {} operation : {}", cQuery.cqName, eventKey, cqEvent);
}
cqInfo.put(filterID, cqEvent);
CqQueryVsdStats stats = cQuery.getVsdStats();
if (stats != null) {
stats.updateStats(cqEvent);
}
}
}
if (cqInfo.size() > 0) {
if (pf.isLocalProfile()) {
if (isDebugEnabled) {
logger.debug("Setting local CQ matches to {}", cqInfo);
}
frInfo.setLocalCqInfo(cqInfo);
} else {
if (isDebugEnabled) {
logger.debug("Setting CQ matches for {} to {}", cf.getDistributedMember(), cqInfo);
}
frInfo.setCqRoutingInfo(cf.getDistributedMember(), cqInfo);
}
}
}
// iteration over Profiles.
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class RemoteTransactionDUnitTest method testOriginRemoteIsTrueForRemoteReplicatedRegions.
@Test
public void testOriginRemoteIsTrueForRemoteReplicatedRegions() {
Host host = Host.getHost(0);
VM accessor = host.getVM(0);
VM datastore = host.getVM(1);
initAccessorAndDataStore(accessor, datastore, 0);
class OriginRemoteRRWriter extends CacheWriterAdapter {
int fireC = 0;
int fireD = 0;
int fireU = 0;
public void beforeCreate(EntryEvent event) throws CacheWriterException {
if (!event.isOriginRemote()) {
throw new CacheWriterException("SUP?? This CREATE is supposed to be isOriginRemote");
}
fireC++;
}
public void beforeDestroy(EntryEvent event) throws CacheWriterException {
getGemfireCache().getLoggerI18n().fine("SWAP:writer:createEvent:" + event);
if (!event.isOriginRemote()) {
throw new CacheWriterException("SUP?? This DESTROY is supposed to be isOriginRemote");
}
fireD++;
}
public void beforeUpdate(EntryEvent event) throws CacheWriterException {
if (!event.isOriginRemote()) {
throw new CacheWriterException("SUP?? This UPDATE is supposed to be isOriginRemote");
}
fireU++;
}
}
datastore.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region refRegion = getCache().getRegion(D_REFERENCE);
refRegion.getAttributesMutator().setCacheWriter(new OriginRemoteRRWriter());
return null;
}
});
accessor.invoke(new DoOpsInTX(OP.PUT));
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
TXManagerImpl mgr = getGemfireCache().getTxManager();
TXStateProxy tx = mgr.internalSuspend();
assertNotNull(tx);
mgr.internalResume(tx);
mgr.commit();
return null;
}
});
accessor.invoke(new DoOpsInTX(OP.DESTROY));
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
TXManagerImpl mgr = getGemfireCache().getTxManager();
TXStateProxy tx = mgr.internalSuspend();
assertNotNull(tx);
mgr.internalResume(tx);
mgr.commit();
return null;
}
});
accessor.invoke(new DoOpsInTX(OP.PUT));
accessor.invoke(new SerializableCallable() {
public Object call() throws Exception {
TXManagerImpl mgr = getGemfireCache().getTxManager();
TXStateProxy tx = mgr.internalSuspend();
assertNotNull(tx);
mgr.internalResume(tx);
mgr.commit();
return null;
}
});
datastore.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region refRegion = getCache().getRegion(D_REFERENCE);
OriginRemoteRRWriter w = (OriginRemoteRRWriter) refRegion.getAttributes().getCacheWriter();
assertEquals(1, w.fireC);
assertEquals(1, w.fireD);
assertEquals(1, w.fireU);
return null;
}
});
}
Aggregations