Search in sources :

Example 11 with ThreadIdentifier

use of org.apache.geode.internal.cache.ha.ThreadIdentifier in project geode by apache.

the class EventStateHelper method groupThreadIds.

private static Map<MemberIdentifier, Map<ThreadIdentifier, Object>> groupThreadIds(Map eventState) {
    Map<MemberIdentifier, Map<ThreadIdentifier, Object>> results = new HashMap<MemberIdentifier, Map<ThreadIdentifier, Object>>();
    for (Object next : eventState.entrySet()) {
        Map.Entry entry = (Map.Entry) next;
        ThreadIdentifier key = (ThreadIdentifier) entry.getKey();
        MemberIdentifier memberId = new MemberIdentifier(key.getMembershipID());
        Object value = entry.getValue();
        Map<ThreadIdentifier, Object> subMap = results.get(memberId);
        if (subMap == null) {
            subMap = new HashMap<ThreadIdentifier, Object>();
            results.put(memberId, subMap);
        }
        subMap.put(key, value);
    }
    return results;
}
Also used : HashMap(java.util.HashMap) ThreadIdentifier(org.apache.geode.internal.cache.ha.ThreadIdentifier) HashMap(java.util.HashMap) Map(java.util.Map)

Example 12 with ThreadIdentifier

use of org.apache.geode.internal.cache.ha.ThreadIdentifier in project geode by apache.

the class EventTracker method findVersionTagForGateway.

public VersionTag findVersionTagForGateway(EventID eventID) {
    ThreadIdentifier threadID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
    EventSeqnoHolder evh = recordedEvents.get(threadID);
    if (evh == null) {
        if (logger.isDebugEnabled()) {
            logger.debug("search for version tag failed as no event is recorded for {}", threadID.expensiveToString());
        }
        return null;
    }
    synchronized (evh) {
        if (logger.isDebugEnabled()) {
            logger.debug("search for version tag located last event for {}: {} {}", threadID.expensiveToString(), evh, eventID.getSequenceID());
        }
        if (evh.lastSeqno < eventID.getSequenceID()) {
            return null;
        }
        // during normal operation during bucket region initialization
        if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_BRIDGE_SERVER) && evh.versionTag == null) {
            logger.trace(LogMarker.DISTRIBUTION_BRIDGE_SERVER, "Could not recover version tag.  Found event holder with no version tag for {}", eventID);
        }
        return evh.versionTag;
    }
// synchronized
}
Also used : ThreadIdentifier(org.apache.geode.internal.cache.ha.ThreadIdentifier)

Example 13 with ThreadIdentifier

use of org.apache.geode.internal.cache.ha.ThreadIdentifier in project geode by apache.

the class EventTracker method findVersionTagForBulkOp.

public VersionTag findVersionTagForBulkOp(EventID eventID) {
    ThreadIdentifier threadID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
    BulkOpHolder evh = recordedBulkOpVersionTags.get(threadID);
    if (evh == null) {
        if (logger.isDebugEnabled()) {
            logger.debug("search for version tag failed as no events are recorded for {}", threadID.expensiveToString());
        }
        return null;
    }
    synchronized (evh) {
        if (logger.isDebugEnabled()) {
            logger.debug("search for version tag located event holder for {}: {}", threadID.expensiveToString(), evh);
        }
        return evh.entryVersionTags.get(eventID);
    }
// synchronized
}
Also used : ThreadIdentifier(org.apache.geode.internal.cache.ha.ThreadIdentifier)

Example 14 with ThreadIdentifier

use of org.apache.geode.internal.cache.ha.ThreadIdentifier in project geode by apache.

the class EventTracker method findVersionTag.

public VersionTag findVersionTag(EventID eventID) {
    ThreadIdentifier threadID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
    EventSeqnoHolder evh = recordedEvents.get(threadID);
    if (evh == null) {
        if (logger.isDebugEnabled()) {
            logger.debug("search for version tag failed as no event is recorded for {}", threadID.expensiveToString());
        }
        return null;
    }
    synchronized (evh) {
        if (logger.isDebugEnabled()) {
            logger.debug("search for version tag located last event for {}: {}", threadID.expensiveToString(), evh);
        }
        if (evh.lastSeqno != eventID.getSequenceID()) {
            return null;
        }
        // during normal operation during bucket region initialization
        if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_BRIDGE_SERVER) && evh.versionTag == null) {
            logger.trace(LogMarker.DISTRIBUTION_BRIDGE_SERVER, "Could not recover version tag.  Found event holder with no version tag for {}", eventID);
        }
        return evh.versionTag;
    }
// synchronized
}
Also used : ThreadIdentifier(org.apache.geode.internal.cache.ha.ThreadIdentifier)

Example 15 with ThreadIdentifier

use of org.apache.geode.internal.cache.ha.ThreadIdentifier in project geode by apache.

the class PutAllPRMessage method doLocalPutAll.

/**
   * This method is called by both operateOnPartitionedRegion() when processing a remote msg or by
   * sendMsgByBucket() when processing a msg targeted to local Jvm. PartitionedRegion Note: It is
   * very important that this message does NOT cause any deadlocks as the sender will wait
   * indefinitely for the acknowledgment
   * 
   * @param r partitioned region eventSender the endpoint server who received request from client
   *        lastModified timestamp for last modification
   * @return If succeeds, return true, otherwise, throw exception
   */
@edu.umd.cs.findbugs.annotations.SuppressWarnings("IMSE_DONT_CATCH_IMSE")
public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember eventSender, long lastModified) throws EntryExistsException, ForceReattemptException, DataLocationException {
    boolean didPut = false;
    long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT;
    if (r.hasServerProxy()) {
        clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout();
        if (logger.isDebugEnabled()) {
            logger.debug("PutAllPRMessage: doLocalPutAll: clientReadTimeOut is {}", clientReadTimeOut);
        }
    }
    DistributedPutAllOperation dpao = null;
    @Released EntryEventImpl baseEvent = null;
    BucketRegion bucketRegion = null;
    PartitionedRegionDataStore ds = r.getDataStore();
    InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
    try {
        if (!notificationOnly) {
            // bucketRegion is not null only when !notificationOnly
            bucketRegion = ds.getInitializedBucketForId(null, bucketId);
            this.versions = new VersionedObjectList(this.putAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
            // create a base event and a DPAO for PutAllMessage distributed btw redundant buckets
            baseEvent = EntryEventImpl.create(bucketRegion, Operation.PUTALL_CREATE, null, null, this.callbackArg, true, eventSender, !skipCallbacks, true);
            // set baseEventId to the first entry's event id. We need the thread id for DACE
            baseEvent.setEventId(putAllPRData[0].getEventID());
            if (this.bridgeContext != null) {
                baseEvent.setContext(this.bridgeContext);
            }
            baseEvent.setPossibleDuplicate(this.posDup);
            if (logger.isDebugEnabled()) {
                logger.debug("PutAllPRMessage.doLocalPutAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
            }
            dpao = new DistributedPutAllOperation(baseEvent, putAllPRDataSize, false);
        }
        // Fix the updateMsg misorder issue
        // Lock the keys when doing postPutAll
        Object[] keys = new Object[putAllPRDataSize];
        for (int i = 0; i < putAllPRDataSize; ++i) {
            keys[i] = putAllPRData[i].getKey();
        }
        if (!notificationOnly) {
            try {
                if (putAllPRData.length > 0) {
                    if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
                        if (logger.isDebugEnabled()) {
                            logger.debug("attempting to locate version tags for retried event");
                        }
                        // of the previous attempt
                        for (int i = 0; i < putAllPRDataSize; i++) {
                            if (putAllPRData[i].versionTag == null) {
                                putAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(putAllPRData[i].getEventID());
                                if (putAllPRData[i].versionTag != null) {
                                    putAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember());
                                }
                            }
                        }
                    }
                    EventID eventID = putAllPRData[0].getEventID();
                    ThreadIdentifier membershipID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
                    bucketRegion.recordBulkOpStart(membershipID, eventID);
                }
                bucketRegion.waitUntilLocked(keys);
                boolean lockedForPrimary = false;
                final HashMap succeeded = new HashMap();
                PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize);
                Object key = keys[0];
                try {
                    bucketRegion.doLockForPrimary(false);
                    lockedForPrimary = true;
                    /*
             * The real work to be synchronized, it will take long time. We don't worry about
             * another thread to send any msg which has the same key in this request, because these
             * request will be blocked by foundKey
             */
                    for (int i = 0; i < putAllPRDataSize; i++) {
                        @Released EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                        try {
                            key = ev.getKey();
                            ev.setPutAllOperation(dpao);
                            // make sure a local update inserts a cache de-serializable
                            ev.makeSerializedNewValue();
                            // then in basicPutPart3(), the ev is added into dpao
                            try {
                                didPut = r.getDataView().putEntryOnRemote(ev, false, false, null, false, lastModified, true);
                                if (didPut && logger.isDebugEnabled()) {
                                    logger.debug("PutAllPRMessage.doLocalPutAll:putLocally success for {}", ev);
                                }
                            } catch (ConcurrentCacheModificationException e) {
                                didPut = true;
                                if (logger.isDebugEnabled()) {
                                    logger.debug("PutAllPRMessage.doLocalPutAll:putLocally encountered concurrent cache modification for {}", ev, e);
                                }
                            }
                            putAllPRData[i].setTailKey(ev.getTailKey());
                            if (!didPut) {
                                // make sure the region hasn't gone away
                                r.checkReadiness();
                                ForceReattemptException fre = new ForceReattemptException("unable to perform put in PutAllPR, but operation should not fail");
                                fre.setHash(ev.getKey().hashCode());
                                throw fre;
                            } else {
                                succeeded.put(putAllPRData[i].getKey(), putAllPRData[i].getValue());
                                this.versions.addKeyAndVersion(putAllPRData[i].getKey(), ev.getVersionTag());
                            }
                        } finally {
                            ev.release();
                        }
                    }
                // for
                } catch (IllegalMonitorStateException ignore) {
                    throw new ForceReattemptException("unable to get lock for primary, retrying... ");
                } catch (CacheWriterException cwe) {
                    // encounter cacheWriter exception
                    partialKeys.saveFailedKey(key, cwe);
                } finally {
                    try {
                        // Only PutAllPRMessage knows if the thread id is fake. Event has no idea.
                        // So we have to manually set useFakeEventId for this DPAO
                        dpao.setUseFakeEventId(true);
                        r.checkReadiness();
                        bucketRegion.getDataView().postPutAll(dpao, this.versions, bucketRegion);
                    } finally {
                        if (lockedForPrimary) {
                            bucketRegion.doUnlockForPrimary();
                        }
                    }
                }
                if (partialKeys.hasFailure()) {
                    partialKeys.addKeysAndVersions(this.versions);
                    if (logger.isDebugEnabled()) {
                        logger.debug("PutAllPRMessage: partial keys applied, map to bucket {}'s keys: {}. Applied {}", bucketId, Arrays.toString(keys), succeeded);
                    }
                    throw new PutAllPartialResultException(partialKeys);
                }
            } catch (RegionDestroyedException e) {
                ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
            } finally {
                bucketRegion.removeAndNotifyKeys(keys);
            }
        } else {
            for (int i = 0; i < putAllPRDataSize; i++) {
                EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                try {
                    ev.setOriginRemote(true);
                    if (this.callbackArg != null) {
                        ev.setCallbackArgument(this.callbackArg);
                    }
                    r.invokePutCallbacks(ev.getOperation().isCreate() ? EnumListenerEvent.AFTER_CREATE : EnumListenerEvent.AFTER_UPDATE, ev, r.isInitialized(), true);
                } finally {
                    ev.release();
                }
            }
        }
    } finally {
        if (baseEvent != null)
            baseEvent.release();
        if (dpao != null)
            dpao.freeOffHeapResources();
    }
    return true;
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) HashMap(java.util.HashMap) ThreadIdentifier(org.apache.geode.internal.cache.ha.ThreadIdentifier) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) DistributedPutAllOperation(org.apache.geode.internal.cache.DistributedPutAllOperation) PartitionedRegionDataStore(org.apache.geode.internal.cache.PartitionedRegionDataStore) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) BucketRegion(org.apache.geode.internal.cache.BucketRegion) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) EventID(org.apache.geode.internal.cache.EventID) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Aggregations

ThreadIdentifier (org.apache.geode.internal.cache.ha.ThreadIdentifier)20 HashMap (java.util.HashMap)4 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)4 EventID (org.apache.geode.internal.cache.EventID)4 Test (org.junit.Test)4 Map (java.util.Map)3 BulkOpHolder (org.apache.geode.internal.cache.EventTracker.BulkOpHolder)3 CacheWriterException (org.apache.geode.cache.CacheWriterException)2 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)2 PoolFactory (org.apache.geode.cache.client.PoolFactory)2 CacheServer (org.apache.geode.cache.server.CacheServer)2 BucketRegion (org.apache.geode.internal.cache.BucketRegion)2 EntryEventImpl (org.apache.geode.internal.cache.EntryEventImpl)2 EventSeqnoHolder (org.apache.geode.internal.cache.EventTracker.EventSeqnoHolder)2 ForceReattemptException (org.apache.geode.internal.cache.ForceReattemptException)2 PartitionedRegionDataStore (org.apache.geode.internal.cache.PartitionedRegionDataStore)2 PutAllPartialResultException (org.apache.geode.internal.cache.PutAllPartialResultException)2 PutAllPartialResult (org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult)2 DispatchedAndCurrentEvents (org.apache.geode.internal.cache.ha.HARegionQueue.DispatchedAndCurrentEvents)2 VersionedObjectList (org.apache.geode.internal.cache.tier.sockets.VersionedObjectList)2