Search in sources :

Example 31 with EntryEventImpl

use of org.apache.geode.internal.cache.EntryEventImpl in project geode by apache.

the class PRBucketSynchronizationDUnitTest method createEntry2.

private boolean createEntry2(VM vm, final InternalDistributedMember primary, final VersionSource primaryVersionID) {
    return (Boolean) vm.invoke(new SerializableCallable("create entry2") {

        public Object call() {
            // create a fake event that looks like it came from the primary and apply it to
            // this cache
            PartitionedRegion pr = (PartitionedRegion) TestRegion;
            BucketRegion bucket = pr.getDataStore().getLocalBucketById(0);
            VersionTag tag = new VMVersionTag();
            tag.setMemberID(primaryVersionID);
            tag.setRegionVersion(2);
            tag.setEntryVersion(1);
            tag.setIsRemoteForTesting();
            EntryEventImpl event = EntryEventImpl.create(bucket, Operation.CREATE, "Object3", true, primary, true, false);
            LogWriterUtils.getLogWriter().info("applying this event to the cache: " + event);
            event.setNewValue(new VMCachedDeserializable("value3", 12));
            event.setVersionTag(tag);
            bucket.getRegionMap().basicPut(event, System.currentTimeMillis(), true, false, null, false, false);
            event.release();
            // now create a tombstone so we can be sure these are transferred in delta-GII
            tag = new VMVersionTag();
            tag.setMemberID(primaryVersionID);
            tag.setRegionVersion(3);
            tag.setEntryVersion(1);
            tag.setIsRemoteForTesting();
            event = EntryEventImpl.create(bucket, Operation.CREATE, "Object5", true, primary, true, false);
            event.setNewValue(Token.TOMBSTONE);
            event.setVersionTag(tag);
            LogWriterUtils.getLogWriter().info("applying this event to the cache: " + event);
            bucket.getRegionMap().basicPut(event, System.currentTimeMillis(), true, false, null, false, false);
            event.release();
            bucket.dumpBackingMap();
            LogWriterUtils.getLogWriter().info("bucket version vector is now " + bucket.getVersionVector().fullToString());
            assertTrue("bucket should hold entry Object3 now", bucket.containsKey("Object3"));
            return true;
        }
    });
}
Also used : BucketRegion(org.apache.geode.internal.cache.BucketRegion) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) VMVersionTag(org.apache.geode.internal.cache.versions.VMVersionTag) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) VMVersionTag(org.apache.geode.internal.cache.versions.VMVersionTag) VMCachedDeserializable(org.apache.geode.internal.cache.VMCachedDeserializable)

Example 32 with EntryEventImpl

use of org.apache.geode.internal.cache.EntryEventImpl in project geode by apache.

the class ElidedPutAllDUnitTest method testElidedPutAllOnPR.

/**
   * bug #47425 - elided putAll event causes PutAllPartialResultException
   */
@Test
public void testElidedPutAllOnPR() throws Exception {
    final String regionName = getUniqueName() + "Region";
    final String key = "key-1";
    Cache cache = getCache();
    PartitionedRegion region = (PartitionedRegion) cache.createRegionFactory(RegionShortcut.PARTITION).create(regionName);
    region.put(key, "value-1");
    region.put(key, "value-2");
    Entry<?, ?> entry = region.getEntry(key);
    assertTrue("expected entry to be in this vm", entry != null);
    VM vm1 = Host.getHost(0).getVM(1);
    vm1.invoke(new SerializableRunnable("perform conflicting update") {

        @Override
        public void run() {
            Cache cache = getCache();
            PartitionedRegion region = (PartitionedRegion) cache.createRegionFactory(RegionShortcut.PARTITION).create(regionName);
            try {
                Entry<?, ?> entry = region.getEntry(key);
                assertTrue(entry instanceof EntrySnapshot);
                RegionEntry regionEntry = ((EntrySnapshot) entry).getRegionEntry();
                final VersionTag<?> tag = regionEntry.getVersionStamp().asVersionTag();
                tag.setEntryVersion(tag.getEntryVersion() - 1);
                tag.setRegionVersion(1);
                Map<String, String> map = new HashMap<String, String>();
                map.put(key, "value-3");
                DistributedPutAllOperation dpao = region.newPutAllOperation(map, null);
                EntryEventImpl event = EntryEventImpl.create(region, Operation.PUTALL_CREATE, null, null, null, true, (DistributedMember) tag.getMemberID());
                event.setOldValue("value-1");
                event.setVersionTag(tag);
                event.setEventId(new EventID(cache.getDistributedSystem()));
                event.setKeyInfo(((PartitionedRegion) region).getKeyInfo(key));
                dpao.addEntry(event, event.getKeyInfo().getBucketId());
                // getLogWriter().info("dpao data = " + dpao.getPutAllEntryData()[0]);
                VersionedObjectList successfulPuts = new VersionedObjectList(1, true, true);
                successfulPuts.addKeyAndVersion(key, tag);
                try {
                    region.postPutAllSend(dpao, successfulPuts);
                } catch (ConcurrentCacheModificationException e) {
                    Assert.fail("Should not have received an exception for an elided operation", e);
                } finally {
                    event.release();
                    dpao.getBaseEvent().release();
                    dpao.freeOffHeapResources();
                }
            } catch (Exception e) {
                Assert.fail("caught unexpected exception", e);
            }
        }
    });
    entry = region.getEntry(key);
    assertTrue("expected value-2: " + entry.getValue(), entry.getValue().equals("value-2"));
    RegionEntry regionEntry = ((EntrySnapshot) entry).getRegionEntry();
    final VersionTag<?> tag = regionEntry.getVersionStamp().asVersionTag();
    assertTrue(tag.getEntryVersion() == 2);
}
Also used : EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) DistributedPutAllOperation(org.apache.geode.internal.cache.DistributedPutAllOperation) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) RegionEntry(org.apache.geode.internal.cache.RegionEntry) Entry(org.apache.geode.cache.Region.Entry) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) DistributedMember(org.apache.geode.distributed.DistributedMember) RegionEntry(org.apache.geode.internal.cache.RegionEntry) EventID(org.apache.geode.internal.cache.EventID) HashMap(java.util.HashMap) Map(java.util.Map) EntrySnapshot(org.apache.geode.internal.cache.EntrySnapshot) Cache(org.apache.geode.cache.Cache) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 33 with EntryEventImpl

use of org.apache.geode.internal.cache.EntryEventImpl in project geode by apache.

the class ConnectionProxyJUnitTest method testListenerOnServerSitForever.

/**
   * This test verifies the behaviour of client request when the listener on the server sits
   * forever. This is done in following steps:<br>
   * 1)create server<br>
   * 2)initialize proxy object and create region for client having a CacheListener and make
   * afterCreate in the listener to wait infinitely<br>
   * 3)perform a PUT on client by acquiring Connection through proxy<br>
   * 4)Verify that exception occurs due to infinite wait in the listener<br>
   * 5)Verify that above exception occurs sometime after the readTimeout configured for the client
   * <br>
   *
   */
@Ignore
@Test
public void testListenerOnServerSitForever() throws Exception {
    int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
    Region testRegion = null;
    CacheServer server = this.cache.addCacheServer();
    server.setMaximumTimeBetweenPings(10000);
    server.setPort(port3);
    server.start();
    try {
        PoolFactory pf = PoolManager.createFactory();
        pf.addServer("localhost", port3);
        pf.setSubscriptionEnabled(false);
        pf.setSubscriptionRedundancy(-1);
        pf.setReadTimeout(2000);
        pf.setThreadLocalConnections(true);
        pf.setSocketBufferSize(32768);
        pf.setRetryAttempts(1);
        pf.setPingInterval(10000);
        proxy = (PoolImpl) pf.create("clientPool");
        AttributesFactory factory = new AttributesFactory();
        factory.setScope(Scope.DISTRIBUTED_ACK);
        factory.setCacheListener(new CacheListenerAdapter() {

            public void afterCreate(EntryEvent event) {
                synchronized (ConnectionProxyJUnitTest.this) {
                    try {
                        ConnectionProxyJUnitTest.this.wait();
                    } catch (InterruptedException e) {
                        fail("interrupted");
                    }
                }
            }
        });
        RegionAttributes attrs = factory.create();
        testRegion = cache.createRegion("testregion", attrs);
    } catch (Exception ex) {
        ex.printStackTrace();
        fail("Failed to initialize client");
    }
    Connection conn = (proxy).acquireConnection();
    long t1 = 0;
    try {
        t1 = System.currentTimeMillis();
        EntryEventImpl event = new EntryEventImpl((Object) null);
        try {
            event.setEventId(new EventID(new byte[] { 1 }, 1, 1));
            PutOp.execute(conn, proxy, testRegion.getFullPath(), "key1", "val1", event, null, false);
        } finally {
            event.release();
        }
        fail("Test failed as exception was expected");
    } catch (Exception e) {
        long t2 = System.currentTimeMillis();
        long net = (t2 - t1);
        assertTrue(net / 1000 < 5);
    }
    synchronized (ConnectionProxyJUnitTest.this) {
        ConnectionProxyJUnitTest.this.notify();
    }
}
Also used : EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) Connection(org.apache.geode.cache.client.internal.Connection) PoolFactory(org.apache.geode.cache.client.PoolFactory) CacheListenerAdapter(org.apache.geode.cache.util.CacheListenerAdapter) CacheServer(org.apache.geode.cache.server.CacheServer) EventID(org.apache.geode.internal.cache.EventID) Ignore(org.junit.Ignore) ClientSubscriptionTest(org.apache.geode.test.junit.categories.ClientSubscriptionTest) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 34 with EntryEventImpl

use of org.apache.geode.internal.cache.EntryEventImpl in project geode by apache.

the class CqServiceImpl method processEntryEvent.

private void processEntryEvent(CacheEvent event, Profile localProfile, Profile[] profiles, FilterRoutingInfo frInfo) throws CqException {
    final boolean isDebugEnabled = logger.isDebugEnabled();
    HashSet<Object> cqUnfilteredEventsSet_newValue = new HashSet<>();
    HashSet<Object> cqUnfilteredEventsSet_oldValue = new HashSet<>();
    boolean b_cqResults_newValue;
    boolean b_cqResults_oldValue;
    boolean queryOldValue;
    EntryEvent entryEvent = (EntryEvent) event;
    Object eventKey = entryEvent.getKey();
    boolean isDupEvent = ((EntryEventImpl) event).isPossibleDuplicate();
    // The CQ query needs to be applied when the op is update, destroy
    // invalidate and in case when op is create and its an duplicate
    // event, the reason for this is when peer sends a duplicate event
    // it marks it as create and sends it, so that the receiving node
    // applies it (see DR.virtualPut()).
    boolean opRequiringQueryOnOldValue = (event.getOperation().isUpdate() || event.getOperation().isDestroy() || event.getOperation().isInvalidate() || (event.getOperation().isCreate() && isDupEvent));
    HashMap<String, Integer> matchedCqs = new HashMap<>();
    long executionStartTime;
    for (int i = -1; i < profiles.length; i++) {
        CacheProfile cf;
        if (i < 0) {
            cf = (CacheProfile) localProfile;
            if (cf == null)
                continue;
        } else {
            cf = (CacheProfile) profiles[i];
        }
        FilterProfile pf = cf.filterProfile;
        if (pf == null || pf.getCqMap().isEmpty()) {
            continue;
        }
        Map cqs = pf.getCqMap();
        if (isDebugEnabled) {
            logger.debug("Profile for {} processing {} CQs", cf.peerMemberId, cqs.size());
        }
        if (cqs.isEmpty()) {
            continue;
        }
        // Get new value. If its not retrieved.
        if (cqUnfilteredEventsSet_newValue.isEmpty() && (event.getOperation().isCreate() || event.getOperation().isUpdate())) {
            Object newValue = entryEvent.getNewValue();
            if (newValue != null) {
                // We have a new value to run the query on
                cqUnfilteredEventsSet_newValue.add(newValue);
            }
        }
        HashMap<Long, Integer> cqInfo = new HashMap<>();
        Iterator cqIter = cqs.entrySet().iterator();
        while (cqIter.hasNext()) {
            Map.Entry cqEntry = (Map.Entry) cqIter.next();
            ServerCQImpl cQuery = (ServerCQImpl) cqEntry.getValue();
            b_cqResults_newValue = false;
            b_cqResults_oldValue = false;
            queryOldValue = false;
            if (cQuery == null) {
                continue;
            }
            String cqName = cQuery.getServerCqName();
            Long filterID = cQuery.getFilterID();
            if (isDebugEnabled) {
                logger.debug("Processing CQ : {} Key: {}", cqName, eventKey);
            }
            Integer cqEvent = null;
            if (matchedCqs.containsKey(cqName)) {
                cqEvent = matchedCqs.get(cqName);
                if (isDebugEnabled) {
                    logger.debug("query {} has already been processed and returned {}", cqName, cqEvent);
                }
                if (cqEvent == null) {
                    continue;
                }
                // Update the Cache Results for this CQ.
                if (cqEvent.intValue() == MessageType.LOCAL_CREATE || cqEvent.intValue() == MessageType.LOCAL_UPDATE) {
                    cQuery.addToCqResultKeys(eventKey);
                } else if (cqEvent.intValue() == MessageType.LOCAL_DESTROY) {
                    cQuery.markAsDestroyedInCqResultKeys(eventKey);
                }
            } else {
                boolean error = false;
                {
                    try {
                        synchronized (cQuery) {
                            // Apply query on new value.
                            if (!cqUnfilteredEventsSet_newValue.isEmpty()) {
                                executionStartTime = this.stats.startCqQueryExecution();
                                b_cqResults_newValue = evaluateQuery(cQuery, new Object[] { cqUnfilteredEventsSet_newValue });
                                this.stats.endCqQueryExecution(executionStartTime);
                            }
                        }
                        // Apply query on oldValue.
                        if (opRequiringQueryOnOldValue) {
                            // with PR region.
                            if (cQuery.cqResultKeysInitialized) {
                                b_cqResults_oldValue = cQuery.isPartOfCqResult(eventKey);
                                // Also apply if the query was not executed during cq execute
                                if ((cQuery.isPR || !CqServiceImpl.EXECUTE_QUERY_DURING_INIT) && b_cqResults_oldValue == false) {
                                    queryOldValue = true;
                                }
                                if (isDebugEnabled && !cQuery.isPR && !b_cqResults_oldValue) {
                                    logger.debug("Event Key not found in the CQ Result Queue. EventKey : {} CQ Name : {}", eventKey, cqName);
                                }
                            } else {
                                queryOldValue = true;
                            }
                            if (queryOldValue) {
                                if (cqUnfilteredEventsSet_oldValue.isEmpty()) {
                                    Object oldValue = entryEvent.getOldValue();
                                    if (oldValue != null) {
                                        cqUnfilteredEventsSet_oldValue.add(oldValue);
                                    }
                                }
                                synchronized (cQuery) {
                                    // Apply query on old value.
                                    if (!cqUnfilteredEventsSet_oldValue.isEmpty()) {
                                        executionStartTime = this.stats.startCqQueryExecution();
                                        b_cqResults_oldValue = evaluateQuery(cQuery, new Object[] { cqUnfilteredEventsSet_oldValue });
                                        this.stats.endCqQueryExecution(executionStartTime);
                                    } else {
                                        if (isDebugEnabled) {
                                            logger.debug("old value for event with key {} is null - query execution not performed", eventKey);
                                        }
                                    }
                                }
                            }
                        // Query oldValue
                        }
                    } catch (Exception ex) {
                        // Any exception in running the query should be caught here and
                        // buried because this code is running in-line with the message
                        // processing code and we don't want to kill that thread
                        error = true;
                        // CHANGE LOG MESSAGE:
                        logger.info(LocalizedMessage.create(LocalizedStrings.CqService_ERROR_WHILE_PROCESSING_CQ_ON_THE_EVENT_KEY_0_CQNAME_1_ERROR_2, new Object[] { ((EntryEvent) event).getKey(), cQuery.getName(), ex.getLocalizedMessage() }));
                    }
                    if (error) {
                        cqEvent = MESSAGE_TYPE_EXCEPTION;
                    } else {
                        if (b_cqResults_newValue) {
                            if (b_cqResults_oldValue) {
                                cqEvent = MESSAGE_TYPE_LOCAL_UPDATE;
                            } else {
                                cqEvent = MESSAGE_TYPE_LOCAL_CREATE;
                            }
                            // If its create and caching is enabled, cache the key
                            // for this CQ.
                            cQuery.addToCqResultKeys(eventKey);
                        } else if (b_cqResults_oldValue) {
                            // Base invalidate operation is treated as destroy.
                            // When the invalidate comes through, the entry will no longer
                            // satisfy the query and will need to be deleted.
                            cqEvent = MESSAGE_TYPE_LOCAL_DESTROY;
                            // If caching is enabled, mark this event's key as removed
                            // from the CQ cache.
                            cQuery.markAsDestroyedInCqResultKeys(eventKey);
                        }
                    }
                }
                // Get the matching CQs if any.
                // synchronized (this.matchingCqMap){
                String query = cQuery.getQueryString();
                Set matchingCqs = (Set) matchingCqMap.get(query);
                if (matchingCqs != null) {
                    Iterator iter = matchingCqs.iterator();
                    while (iter.hasNext()) {
                        String matchingCqName = (String) iter.next();
                        if (!matchingCqName.equals(cqName)) {
                            matchedCqs.put(matchingCqName, cqEvent);
                            if (isDebugEnabled) {
                                logger.debug("Adding CQ into Matching CQ Map: {} Event is: {}", matchingCqName, cqEvent);
                            }
                        }
                    }
                }
            }
            if (cqEvent != null && cQuery.isRunning()) {
                if (isDebugEnabled) {
                    logger.debug("Added event to CQ with client-side name: {} key: {} operation : {}", cQuery.cqName, eventKey, cqEvent);
                }
                cqInfo.put(filterID, cqEvent);
                CqQueryVsdStats stats = cQuery.getVsdStats();
                if (stats != null) {
                    stats.updateStats(cqEvent);
                }
            }
        }
        if (cqInfo.size() > 0) {
            if (pf.isLocalProfile()) {
                if (isDebugEnabled) {
                    logger.debug("Setting local CQ matches to {}", cqInfo);
                }
                frInfo.setLocalCqInfo(cqInfo);
            } else {
                if (isDebugEnabled) {
                    logger.debug("Setting CQ matches for {} to {}", cf.getDistributedMember(), cqInfo);
                }
                frInfo.setCqRoutingInfo(cf.getDistributedMember(), cqInfo);
            }
        }
    }
// iteration over Profiles.
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) TimeoutException(org.apache.geode.cache.TimeoutException) CqExistsException(org.apache.geode.cache.query.CqExistsException) CqException(org.apache.geode.cache.query.CqException) QueryInvalidException(org.apache.geode.cache.query.QueryInvalidException) InvalidDeltaException(org.apache.geode.InvalidDeltaException) RegionNotFoundException(org.apache.geode.cache.query.RegionNotFoundException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) CqClosedException(org.apache.geode.cache.query.CqClosedException) QueryException(org.apache.geode.cache.query.QueryException) CqQueryVsdStats(org.apache.geode.cache.query.internal.CqQueryVsdStats) CacheProfile(org.apache.geode.internal.cache.CacheDistributionAdvisor.CacheProfile) FilterProfile(org.apache.geode.internal.cache.FilterProfile) EntryEvent(org.apache.geode.cache.EntryEvent) Iterator(java.util.Iterator) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) HashSet(java.util.HashSet)

Aggregations

EntryEventImpl (org.apache.geode.internal.cache.EntryEventImpl)34 EventID (org.apache.geode.internal.cache.EventID)14 Released (org.apache.geode.internal.offheap.annotations.Released)8 HashMap (java.util.HashMap)7 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)7 Map (java.util.Map)6 CacheWriterException (org.apache.geode.cache.CacheWriterException)6 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)6 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)6 PartitionedRegionDataStore (org.apache.geode.internal.cache.PartitionedRegionDataStore)6 PrimaryBucketException (org.apache.geode.internal.cache.PrimaryBucketException)6 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)6 CancelException (org.apache.geode.CancelException)5 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)5 LocalRegion (org.apache.geode.internal.cache.LocalRegion)5 PutAllPartialResultException (org.apache.geode.internal.cache.PutAllPartialResultException)5 VersionedObjectList (org.apache.geode.internal.cache.tier.sockets.VersionedObjectList)5 CacheException (org.apache.geode.cache.CacheException)4 ReplyException (org.apache.geode.distributed.internal.ReplyException)4 ForceReattemptException (org.apache.geode.internal.cache.ForceReattemptException)4