use of org.apache.geode.distributed.internal.membership.InternalDistributedMember in project geode by apache.
the class GMSJoinLeave method findCoordinatorFromView.
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "WA_NOT_IN_LOOP")
boolean findCoordinatorFromView() {
ArrayList<FindCoordinatorResponse> result;
SearchState state = searchState;
NetView v = state.view;
List<InternalDistributedMember> recipients = new ArrayList<>(v.getMembers());
if (recipients.size() > MAX_DISCOVERY_NODES && MAX_DISCOVERY_NODES > 0) {
recipients = recipients.subList(0, MAX_DISCOVERY_NODES);
}
if (state.registrants != null) {
recipients.addAll(state.registrants);
}
recipients.remove(localAddress);
// FindCoordinatorRequest req = new FindCoordinatorRequest(localAddress, state.alreadyTried,
// state.viewId, services.getMessenger().getPublickey(
// localAddress), services.getMessenger().getRequestId());
// req.setRecipients(v.getMembers());
boolean testing = unitTesting.contains("findCoordinatorFromView");
synchronized (state.responses) {
if (!testing) {
state.responses.clear();
}
String dhalgo = services.getConfig().getDistributionConfig().getSecurityUDPDHAlgo();
if (!dhalgo.isEmpty()) {
// Usually this happens when locator re-joins the cluster and it has saved view.
for (InternalDistributedMember mbr : v.getMembers()) {
Set<InternalDistributedMember> r = new HashSet<>();
r.add(mbr);
FindCoordinatorRequest req = new FindCoordinatorRequest(localAddress, state.alreadyTried, state.viewId, services.getMessenger().getPublicKey(localAddress), services.getMessenger().getRequestId(), dhalgo);
req.setRecipients(r);
services.getMessenger().send(req, v);
}
} else {
FindCoordinatorRequest req = new FindCoordinatorRequest(localAddress, state.alreadyTried, state.viewId, services.getMessenger().getPublicKey(localAddress), services.getMessenger().getRequestId(), dhalgo);
req.setRecipients(v.getMembers());
services.getMessenger().send(req, v);
}
try {
if (!testing) {
state.responses.wait(DISCOVERY_TIMEOUT);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false;
}
result = new ArrayList<>(state.responses);
state.responses.clear();
}
InternalDistributedMember coord = null;
if (localAddress.getNetMember().preferredForCoordinator()) {
// it's possible that all other potential coordinators are gone
// and this new member must become the coordinator
coord = localAddress;
}
boolean coordIsNoob = true;
for (FindCoordinatorResponse resp : result) {
InternalDistributedMember mbr = resp.getCoordinator();
if (!state.alreadyTried.contains(mbr)) {
boolean mbrIsNoob = (mbr.getVmViewId() < 0);
if (mbrIsNoob) {
// member has not yet joined
if (coordIsNoob && (coord == null || coord.compareTo(mbr, false) > 0)) {
coord = mbr;
}
} else {
// member has already joined
if (coordIsNoob || mbr.getVmViewId() > coord.getVmViewId()) {
coord = mbr;
coordIsNoob = false;
}
}
}
}
state.possibleCoordinator = coord;
return coord != null;
}
use of org.apache.geode.distributed.internal.membership.InternalDistributedMember in project geode by apache.
the class DistTXStateProxyImplOnCoordinator method getSecondariesAndReplicasForTxOps.
/**
* creates a map of all secondaries(for PR) / replicas(for RR) to stubs to send commit message to
* those
*/
private HashMap<DistributedMember, DistTXCoordinatorInterface> getSecondariesAndReplicasForTxOps() {
final GemFireCacheImpl cache = GemFireCacheImpl.getExisting("getSecondariesAndReplicasForTxOps");
InternalDistributedMember currentNode = cache.getInternalDistributedSystem().getDistributedMember();
HashMap<DistributedMember, DistTXCoordinatorInterface> secondaryTarget2realDeals = new HashMap<>();
for (Entry<DistributedMember, DistTXCoordinatorInterface> e : target2realDeals.entrySet()) {
DistributedMember originalTarget = e.getKey();
DistTXCoordinatorInterface distPeerTxStateStub = e.getValue();
ArrayList<DistTxEntryEvent> primaryTxOps = distPeerTxStateStub.getPrimaryTransactionalOperations();
for (DistTxEntryEvent dtop : primaryTxOps) {
LocalRegion lr = dtop.getRegion();
// replicas or secondaries
Set<InternalDistributedMember> otherNodes = null;
if (lr instanceof PartitionedRegion) {
Set<InternalDistributedMember> allNodes = ((PartitionedRegion) dtop.getRegion()).getRegionAdvisor().getBucketOwners(dtop.getKeyInfo().getBucketId());
allNodes.remove(originalTarget);
otherNodes = allNodes;
} else if (lr instanceof DistributedRegion) {
otherNodes = ((DistributedRegion) lr).getCacheDistributionAdvisor().adviseInitializedReplicates();
otherNodes.remove(originalTarget);
}
if (otherNodes != null) {
for (InternalDistributedMember dm : otherNodes) {
// whether the target already exists due to other Tx op on the node
DistTXCoordinatorInterface existingDistPeerTXStateStub = target2realDeals.get(dm);
if (existingDistPeerTXStateStub == null) {
existingDistPeerTXStateStub = secondaryTarget2realDeals.get(dm);
if (existingDistPeerTXStateStub == null) {
DistTXCoordinatorInterface newTxStub = null;
if (currentNode.equals(dm)) {
// [DISTTX] TODO add a test case for this condition?
newTxStub = new DistTXStateOnCoordinator(this, false);
} else {
newTxStub = new DistPeerTXStateStub(this, dm, onBehalfOfClientMember);
}
newTxStub.addSecondaryTransactionalOperations(dtop);
secondaryTarget2realDeals.put(dm, newTxStub);
} else {
existingDistPeerTXStateStub.addSecondaryTransactionalOperations(dtop);
}
} else {
existingDistPeerTXStateStub.addSecondaryTransactionalOperations(dtop);
}
}
}
}
}
return secondaryTarget2realDeals;
}
use of org.apache.geode.distributed.internal.membership.InternalDistributedMember in project geode by apache.
the class DistributedCacheOperation method _distribute.
/**
* About to distribute a cache operation to other members of the distributed system. This method
* determines who the recipients are and handles careful delivery of the operation to those
* members. This method should wrapped by startOperation() and endOperation() in try/finally
* block.
*/
private void _distribute() {
DistributedRegion region = getRegion();
DM mgr = region.getDistributionManager();
boolean reliableOp = isOperationReliable() && region.requiresReliabilityCheck();
if (SLOW_DISTRIBUTION_MS > 0) {
// test hook
try {
Thread.sleep(SLOW_DISTRIBUTION_MS);
} catch (InterruptedException ignore) {
Thread.currentThread().interrupt();
}
SLOW_DISTRIBUTION_MS = 0;
}
boolean isPutAll = (this instanceof DistributedPutAllOperation);
boolean isRemoveAll = (this instanceof DistributedRemoveAllOperation);
try {
// Recipients with CacheOp
Set<InternalDistributedMember> recipients = getRecipients();
Map<InternalDistributedMember, PersistentMemberID> persistentIds = null;
if (region.getDataPolicy().withPersistence()) {
persistentIds = region.getDistributionAdvisor().adviseInitializedPersistentMembers();
}
// some members requiring old value are also in the cache op recipients set
Set needsOldValueInCacheOp = Collections.emptySet();
// set client routing information into the event
boolean routingComputed = false;
FilterRoutingInfo filterRouting = null;
// recipients that will get a cacheop msg and also a PR message
Set twoMessages = Collections.emptySet();
if (region.isUsedForPartitionedRegionBucket()) {
twoMessages = ((Bucket) region).getBucketAdvisor().adviseRequiresTwoMessages();
routingComputed = true;
filterRouting = getRecipientFilterRouting(recipients);
if (filterRouting != null) {
if (logger.isDebugEnabled()) {
logger.debug("Computed this filter routing: {}", filterRouting);
}
}
}
// some members need PR notification of the change for client/wan
// notification
Set adjunctRecipients = Collections.emptySet();
// the operation's message
if (this.supportsAdjunctMessaging() && region.isUsedForPartitionedRegionBucket()) {
BucketRegion br = (BucketRegion) region;
adjunctRecipients = getAdjunctReceivers(br, recipients, twoMessages, filterRouting);
}
EntryEventImpl entryEvent = event.getOperation().isEntry() ? getEvent() : null;
if (entryEvent != null && entryEvent.hasOldValue()) {
if (testSendingOldValues) {
needsOldValueInCacheOp = new HashSet(recipients);
} else {
needsOldValueInCacheOp = region.getCacheDistributionAdvisor().adviseRequiresOldValueInCacheOp();
}
recipients.removeAll(needsOldValueInCacheOp);
}
Set cachelessNodes = Collections.emptySet();
Set adviseCacheServers;
Set<InternalDistributedMember> cachelessNodesWithNoCacheServer = new HashSet<>();
if (region.getDistributionConfig().getDeltaPropagation() && this.supportsDeltaPropagation()) {
cachelessNodes = region.getCacheDistributionAdvisor().adviseEmptys();
if (!cachelessNodes.isEmpty()) {
List list = new ArrayList(cachelessNodes);
for (Object member : cachelessNodes) {
if (!recipients.contains(member) || adjunctRecipients.contains(member)) {
// Don't include those originally excluded.
list.remove(member);
}
}
cachelessNodes.clear();
recipients.removeAll(list);
cachelessNodes.addAll(list);
}
cachelessNodesWithNoCacheServer.addAll(cachelessNodes);
adviseCacheServers = region.getCacheDistributionAdvisor().adviseCacheServers();
cachelessNodesWithNoCacheServer.removeAll(adviseCacheServers);
}
if (recipients.isEmpty() && adjunctRecipients.isEmpty() && needsOldValueInCacheOp.isEmpty() && cachelessNodes.isEmpty()) {
if (region.isInternalRegion()) {
if (mgr.getNormalDistributionManagerIds().size() > 1) {
// suppress this msg if we are the only member.
if (logger.isTraceEnabled()) {
logger.trace("<No Recipients> {}", this);
}
} else {
// suppress this msg if we are the only member.
if (logger.isDebugEnabled()) {
logger.debug("<No Recipients> {}", this);
}
}
}
if (!reliableOp || region.isNoDistributionOk()) {
// nothing needs be done in this case
} else {
region.handleReliableDistribution(Collections.emptySet());
}
// compute local client routing before waiting for an ack only for a bucket
if (region.isUsedForPartitionedRegionBucket()) {
FilterInfo filterInfo = getLocalFilterRouting(filterRouting);
this.event.setLocalFilterInfo(filterInfo);
}
} else {
boolean directAck = false;
boolean useMulticast = region.getMulticastEnabled() && region.getSystem().getConfig().getMcastPort() != 0 && this.supportsMulticast();
boolean shouldAck = shouldAck();
if (shouldAck) {
if (this.supportsDirectAck() && adjunctRecipients.isEmpty()) {
if (region.getSystem().threadOwnsResources()) {
directAck = true;
}
}
}
// their own response. fixes bug #45973
if (entryEvent != null) {
RemoteOperationMessage rmsg = entryEvent.getRemoteOperationMessage();
if (rmsg != null) {
recipients.remove(rmsg.getSender());
// bug #45106: can't mcast or the sender of the one-hop op will
useMulticast = false;
// get it
}
}
if (logger.isDebugEnabled()) {
logger.debug("recipients for {}: {} with adjunct messages to: {}", this, recipients, adjunctRecipients);
}
if (shouldAck) {
// adjunct messages are sent using the same reply processor, so
// add them to the processor's membership set
Collection waitForMembers = null;
if (recipients.size() > 0 && adjunctRecipients.size() == 0 && cachelessNodes.isEmpty()) {
// the
// common
// case
waitForMembers = recipients;
} else if (!cachelessNodes.isEmpty()) {
waitForMembers = new HashSet(recipients);
waitForMembers.addAll(cachelessNodes);
} else {
// note that we use a Vector instead of a Set for the responders
// collection
// because partitioned regions sometimes send both a regular cache
// operation and a partitioned-region notification message to the
// same recipient
waitForMembers = new Vector(recipients);
waitForMembers.addAll(adjunctRecipients);
waitForMembers.addAll(needsOldValueInCacheOp);
waitForMembers.addAll(cachelessNodes);
}
if (DistributedCacheOperation.LOSS_SIMULATION_RATIO != 0.0) {
if (LOSS_SIMULATION_GENERATOR == null) {
LOSS_SIMULATION_GENERATOR = new Random(this.hashCode());
}
if ((LOSS_SIMULATION_GENERATOR.nextInt(100) * 1.0 / 100.0) < LOSS_SIMULATION_RATIO) {
if (logger.isDebugEnabled()) {
logger.debug("loss simulation is inhibiting message transmission to {}", recipients);
}
waitForMembers.removeAll(recipients);
recipients = Collections.emptySet();
}
}
if (reliableOp) {
this.departedMembers = new HashSet();
this.processor = new ReliableCacheReplyProcessor(region.getSystem(), waitForMembers, this.departedMembers);
} else {
this.processor = new CacheOperationReplyProcessor(region.getSystem(), waitForMembers);
}
}
Set failures = null;
CacheOperationMessage msg = createMessage();
initMessage(msg, this.processor);
if (DistributedCacheOperation.internalBeforePutOutgoing != null) {
DistributedCacheOperation.internalBeforePutOutgoing.run();
}
if (processor != null && msg.isSevereAlertCompatible()) {
this.processor.enableSevereAlertProcessing();
// if this message is distributing for a partitioned region message,
// we can't wait as long as the full ack-severe-alert-threshold or
// the sender might kick us out of the system before we can get an ack
// back
DistributedRegion r = getRegion();
if (r.isUsedForPartitionedRegionBucket() && event.getOperation().isEntry()) {
PartitionMessage pm = ((EntryEventImpl) event).getPartitionMessage();
if (pm != null && pm.getSender() != null && !pm.getSender().equals(r.getDistributionManager().getDistributionManagerId())) {
// PR message sent by another member
ReplyProcessor21.setShortSevereAlertProcessing(true);
}
}
}
msg.setMulticast(useMulticast);
msg.directAck = directAck;
if (region.isUsedForPartitionedRegionBucket()) {
if (!isPutAll && !isRemoveAll && filterRouting != null && filterRouting.hasMemberWithFilterInfo()) {
if (logger.isDebugEnabled()) {
logger.debug("Setting filter information for message to {}", filterRouting);
}
msg.filterRouting = filterRouting;
}
} else if (!routingComputed) {
msg.needsRouting = true;
}
initProcessor(processor, msg);
if (region.cache.isClosed() && !canBeSentDuringShutdown()) {
throw region.cache.getCacheClosedException(LocalizedStrings.DistributedCacheOperation_THE_CACHE_HAS_BEEN_CLOSED.toLocalizedString(), null);
}
msg.setRecipients(recipients);
failures = mgr.putOutgoing(msg);
// distribute to members needing the old value now
if (needsOldValueInCacheOp.size() > 0) {
msg.appendOldValueToMessage((EntryEventImpl) this.event);
msg.resetRecipients();
msg.setRecipients(needsOldValueInCacheOp);
Set newFailures = mgr.putOutgoing(msg);
if (newFailures != null) {
if (logger.isDebugEnabled()) {
logger.debug("Failed sending ({}) to {}", msg, newFailures);
}
if (failures != null && failures.size() > 0) {
failures.addAll(newFailures);
} else {
failures = newFailures;
}
}
}
if (cachelessNodes.size() > 0) {
cachelessNodes.removeAll(cachelessNodesWithNoCacheServer);
if (cachelessNodes.size() > 0) {
msg.resetRecipients();
msg.setRecipients(cachelessNodes);
msg.setSendDelta(false);
Set newFailures = mgr.putOutgoing(msg);
if (newFailures != null) {
if (failures != null && failures.size() > 0) {
failures.addAll(newFailures);
} else {
failures = newFailures;
}
}
}
if (cachelessNodesWithNoCacheServer.size() > 0) {
msg.resetRecipients();
msg.setRecipients(cachelessNodesWithNoCacheServer);
msg.setSendDelta(false);
((UpdateMessage) msg).setSendDeltaWithFullValue(false);
Set newFailures = mgr.putOutgoing(msg);
if (newFailures != null) {
if (failures != null && failures.size() > 0) {
failures.addAll(newFailures);
} else {
failures = newFailures;
}
}
}
// Add it back for size calculation ahead
cachelessNodes.addAll(cachelessNodesWithNoCacheServer);
}
if (failures != null && !failures.isEmpty() && logger.isDebugEnabled()) {
logger.debug("Failed sending ({}) to {} while processing event:{}", msg, failures, event);
}
Set<InternalDistributedMember> adjunctRecipientsWithNoCacheServer = new HashSet<InternalDistributedMember>();
// send partitioned region listener notification messages now
if (!adjunctRecipients.isEmpty()) {
if (cachelessNodes.size() > 0) {
// calculations
if (recipients.isEmpty()) {
recipients = cachelessNodes;
} else {
recipients.addAll(cachelessNodes);
}
}
adjunctRecipientsWithNoCacheServer.addAll(adjunctRecipients);
adviseCacheServers = ((Bucket) region).getPartitionedRegion().getCacheDistributionAdvisor().adviseCacheServers();
adjunctRecipientsWithNoCacheServer.removeAll(adviseCacheServers);
if (isPutAll) {
((BucketRegion) region).performPutAllAdjunctMessaging((DistributedPutAllOperation) this, recipients, adjunctRecipients, filterRouting, this.processor);
} else if (isRemoveAll) {
((BucketRegion) region).performRemoveAllAdjunctMessaging((DistributedRemoveAllOperation) this, recipients, adjunctRecipients, filterRouting, this.processor);
} else {
boolean calculateDelta = adjunctRecipientsWithNoCacheServer.size() < adjunctRecipients.size();
adjunctRecipients.removeAll(adjunctRecipientsWithNoCacheServer);
if (!adjunctRecipients.isEmpty()) {
((BucketRegion) region).performAdjunctMessaging(getEvent(), recipients, adjunctRecipients, filterRouting, this.processor, calculateDelta, true);
}
if (!adjunctRecipientsWithNoCacheServer.isEmpty()) {
((BucketRegion) region).performAdjunctMessaging(getEvent(), recipients, adjunctRecipientsWithNoCacheServer, filterRouting, this.processor, calculateDelta, false);
}
}
}
// compute local client routing before waiting for an ack only for a bucket
if (region.isUsedForPartitionedRegionBucket()) {
FilterInfo filterInfo = getLocalFilterRouting(filterRouting);
event.setLocalFilterInfo(filterInfo);
}
waitForAckIfNeeded(msg, persistentIds);
if (/* msg != null && */
reliableOp) {
Set successfulRecips = new HashSet(recipients);
successfulRecips.addAll(cachelessNodes);
successfulRecips.addAll(needsOldValueInCacheOp);
if (failures != null && !failures.isEmpty()) {
successfulRecips.removeAll(failures);
}
if (departedMembers != null) {
successfulRecips.removeAll(departedMembers);
}
region.handleReliableDistribution(successfulRecips);
}
}
if (region.isUsedForPartitionedRegionBucket() && filterRouting != null) {
removeDestroyTokensFromCqResultKeys(filterRouting);
}
} catch (CancelException e) {
if (logger.isDebugEnabled()) {
logger.debug("distribution of message aborted by shutdown: {}", this);
}
throw e;
} catch (RuntimeException e) {
logger.info(LocalizedMessage.create(LocalizedStrings.DistributedCacheOperation_EXCEPTION_OCCURRED_WHILE_PROCESSING__0, this), e);
throw e;
} finally {
ReplyProcessor21.setShortSevereAlertProcessing(false);
}
}
use of org.apache.geode.distributed.internal.membership.InternalDistributedMember in project geode by apache.
the class DistributedCacheOperation method removeDestroyTokensFromCqResultKeys.
/**
* Cleanup destroyed events in CQ result cache for remote CQs. While maintaining the CQ results
* key caching. the destroy event keys are marked as destroyed instead of removing them, this is
* to take care, arrival of duplicate events. The key marked as destroyed are removed after the
* event is placed in clients HAQueue or distributed to the peers.
*
* This is similar to CacheClientNotifier.removeDestroyTokensFromCqResultKeys() where the
* destroyed events for local CQs are handled.
*/
private void removeDestroyTokensFromCqResultKeys(FilterRoutingInfo filterRouting) {
for (InternalDistributedMember m : filterRouting.getMembers()) {
FilterInfo filterInfo = filterRouting.getFilterInfo(m);
if (filterInfo.getCQs() == null) {
continue;
}
CacheProfile cf = (CacheProfile) ((Bucket) getRegion()).getPartitionedRegion().getCacheDistributionAdvisor().getProfile(m);
if (cf == null || cf.filterProfile == null || cf.filterProfile.isLocalProfile() || cf.filterProfile.getCqMap().isEmpty()) {
continue;
}
for (Object value : cf.filterProfile.getCqMap().values()) {
ServerCQ cq = (ServerCQ) value;
for (Map.Entry<Long, Integer> e : filterInfo.getCQs().entrySet()) {
Long cqID = e.getKey();
// the entry form CQ cache.
if (cq.getFilterID() == cqID && (e.getValue().equals(MessageType.LOCAL_DESTROY))) {
cq.removeFromCqResultKeys(((EntryOperation) event).getKey(), true);
}
}
}
}
}
use of org.apache.geode.distributed.internal.membership.InternalDistributedMember in project geode by apache.
the class DistributedRegion method basicClear.
@Override
void basicClear(RegionEventImpl regionEvent, boolean cacheWrite) {
if (this.concurrencyChecksEnabled && !this.dataPolicy.withReplication()) {
boolean retry = false;
do {
// non-replicate regions must defer to a replicate for clear/invalidate of region
Set<InternalDistributedMember> repls = this.distAdvisor.adviseReplicates();
if (!repls.isEmpty()) {
InternalDistributedMember mbr = repls.iterator().next();
RemoteRegionOperation op = RemoteRegionOperation.clear(mbr, this);
try {
op.distribute();
return;
} catch (CancelException e) {
this.stopper.checkCancelInProgress(e);
retry = true;
} catch (RemoteOperationException e) {
this.stopper.checkCancelInProgress(e);
retry = true;
}
}
} while (retry);
}
// if no version vector or if no replicates are around, use the default mechanism
super.basicClear(regionEvent, cacheWrite);
}
Aggregations