use of org.apache.geode.internal.cache.UpdateOperation.UpdateMessage in project geode by apache.
the class DistributedCacheOperation method _distribute.
/**
* About to distribute a cache operation to other members of the distributed system. This method
* determines who the recipients are and handles careful delivery of the operation to those
* members. This method should wrapped by startOperation() and endOperation() in try/finally
* block.
*/
private void _distribute() {
DistributedRegion region = getRegion();
DM mgr = region.getDistributionManager();
boolean reliableOp = isOperationReliable() && region.requiresReliabilityCheck();
if (SLOW_DISTRIBUTION_MS > 0) {
// test hook
try {
Thread.sleep(SLOW_DISTRIBUTION_MS);
} catch (InterruptedException ignore) {
Thread.currentThread().interrupt();
}
SLOW_DISTRIBUTION_MS = 0;
}
boolean isPutAll = (this instanceof DistributedPutAllOperation);
boolean isRemoveAll = (this instanceof DistributedRemoveAllOperation);
try {
// Recipients with CacheOp
Set<InternalDistributedMember> recipients = getRecipients();
Map<InternalDistributedMember, PersistentMemberID> persistentIds = null;
if (region.getDataPolicy().withPersistence()) {
persistentIds = region.getDistributionAdvisor().adviseInitializedPersistentMembers();
}
// some members requiring old value are also in the cache op recipients set
Set needsOldValueInCacheOp = Collections.emptySet();
// set client routing information into the event
boolean routingComputed = false;
FilterRoutingInfo filterRouting = null;
// recipients that will get a cacheop msg and also a PR message
Set twoMessages = Collections.emptySet();
if (region.isUsedForPartitionedRegionBucket()) {
twoMessages = ((Bucket) region).getBucketAdvisor().adviseRequiresTwoMessages();
routingComputed = true;
filterRouting = getRecipientFilterRouting(recipients);
if (filterRouting != null) {
if (logger.isDebugEnabled()) {
logger.debug("Computed this filter routing: {}", filterRouting);
}
}
}
// some members need PR notification of the change for client/wan
// notification
Set adjunctRecipients = Collections.emptySet();
// the operation's message
if (this.supportsAdjunctMessaging() && region.isUsedForPartitionedRegionBucket()) {
BucketRegion br = (BucketRegion) region;
adjunctRecipients = getAdjunctReceivers(br, recipients, twoMessages, filterRouting);
}
EntryEventImpl entryEvent = event.getOperation().isEntry() ? getEvent() : null;
if (entryEvent != null && entryEvent.hasOldValue()) {
if (testSendingOldValues) {
needsOldValueInCacheOp = new HashSet(recipients);
} else {
needsOldValueInCacheOp = region.getCacheDistributionAdvisor().adviseRequiresOldValueInCacheOp();
}
recipients.removeAll(needsOldValueInCacheOp);
}
Set cachelessNodes = Collections.emptySet();
Set adviseCacheServers;
Set<InternalDistributedMember> cachelessNodesWithNoCacheServer = new HashSet<>();
if (region.getDistributionConfig().getDeltaPropagation() && this.supportsDeltaPropagation()) {
cachelessNodes = region.getCacheDistributionAdvisor().adviseEmptys();
if (!cachelessNodes.isEmpty()) {
List list = new ArrayList(cachelessNodes);
for (Object member : cachelessNodes) {
if (!recipients.contains(member) || adjunctRecipients.contains(member)) {
// Don't include those originally excluded.
list.remove(member);
}
}
cachelessNodes.clear();
recipients.removeAll(list);
cachelessNodes.addAll(list);
}
cachelessNodesWithNoCacheServer.addAll(cachelessNodes);
adviseCacheServers = region.getCacheDistributionAdvisor().adviseCacheServers();
cachelessNodesWithNoCacheServer.removeAll(adviseCacheServers);
}
if (recipients.isEmpty() && adjunctRecipients.isEmpty() && needsOldValueInCacheOp.isEmpty() && cachelessNodes.isEmpty()) {
if (region.isInternalRegion()) {
if (mgr.getNormalDistributionManagerIds().size() > 1) {
// suppress this msg if we are the only member.
if (logger.isTraceEnabled()) {
logger.trace("<No Recipients> {}", this);
}
} else {
// suppress this msg if we are the only member.
if (logger.isDebugEnabled()) {
logger.debug("<No Recipients> {}", this);
}
}
}
if (!reliableOp || region.isNoDistributionOk()) {
// nothing needs be done in this case
} else {
region.handleReliableDistribution(Collections.emptySet());
}
// compute local client routing before waiting for an ack only for a bucket
if (region.isUsedForPartitionedRegionBucket()) {
FilterInfo filterInfo = getLocalFilterRouting(filterRouting);
this.event.setLocalFilterInfo(filterInfo);
}
} else {
boolean directAck = false;
boolean useMulticast = region.getMulticastEnabled() && region.getSystem().getConfig().getMcastPort() != 0 && this.supportsMulticast();
boolean shouldAck = shouldAck();
if (shouldAck) {
if (this.supportsDirectAck() && adjunctRecipients.isEmpty()) {
if (region.getSystem().threadOwnsResources()) {
directAck = true;
}
}
}
// their own response. fixes bug #45973
if (entryEvent != null) {
RemoteOperationMessage rmsg = entryEvent.getRemoteOperationMessage();
if (rmsg != null) {
recipients.remove(rmsg.getSender());
// bug #45106: can't mcast or the sender of the one-hop op will
useMulticast = false;
// get it
}
}
if (logger.isDebugEnabled()) {
logger.debug("recipients for {}: {} with adjunct messages to: {}", this, recipients, adjunctRecipients);
}
if (shouldAck) {
// adjunct messages are sent using the same reply processor, so
// add them to the processor's membership set
Collection waitForMembers = null;
if (recipients.size() > 0 && adjunctRecipients.size() == 0 && cachelessNodes.isEmpty()) {
// the
// common
// case
waitForMembers = recipients;
} else if (!cachelessNodes.isEmpty()) {
waitForMembers = new HashSet(recipients);
waitForMembers.addAll(cachelessNodes);
} else {
// note that we use a Vector instead of a Set for the responders
// collection
// because partitioned regions sometimes send both a regular cache
// operation and a partitioned-region notification message to the
// same recipient
waitForMembers = new Vector(recipients);
waitForMembers.addAll(adjunctRecipients);
waitForMembers.addAll(needsOldValueInCacheOp);
waitForMembers.addAll(cachelessNodes);
}
if (DistributedCacheOperation.LOSS_SIMULATION_RATIO != 0.0) {
if (LOSS_SIMULATION_GENERATOR == null) {
LOSS_SIMULATION_GENERATOR = new Random(this.hashCode());
}
if ((LOSS_SIMULATION_GENERATOR.nextInt(100) * 1.0 / 100.0) < LOSS_SIMULATION_RATIO) {
if (logger.isDebugEnabled()) {
logger.debug("loss simulation is inhibiting message transmission to {}", recipients);
}
waitForMembers.removeAll(recipients);
recipients = Collections.emptySet();
}
}
if (reliableOp) {
this.departedMembers = new HashSet();
this.processor = new ReliableCacheReplyProcessor(region.getSystem(), waitForMembers, this.departedMembers);
} else {
this.processor = new CacheOperationReplyProcessor(region.getSystem(), waitForMembers);
}
}
Set failures = null;
CacheOperationMessage msg = createMessage();
initMessage(msg, this.processor);
if (DistributedCacheOperation.internalBeforePutOutgoing != null) {
DistributedCacheOperation.internalBeforePutOutgoing.run();
}
if (processor != null && msg.isSevereAlertCompatible()) {
this.processor.enableSevereAlertProcessing();
// if this message is distributing for a partitioned region message,
// we can't wait as long as the full ack-severe-alert-threshold or
// the sender might kick us out of the system before we can get an ack
// back
DistributedRegion r = getRegion();
if (r.isUsedForPartitionedRegionBucket() && event.getOperation().isEntry()) {
PartitionMessage pm = ((EntryEventImpl) event).getPartitionMessage();
if (pm != null && pm.getSender() != null && !pm.getSender().equals(r.getDistributionManager().getDistributionManagerId())) {
// PR message sent by another member
ReplyProcessor21.setShortSevereAlertProcessing(true);
}
}
}
msg.setMulticast(useMulticast);
msg.directAck = directAck;
if (region.isUsedForPartitionedRegionBucket()) {
if (!isPutAll && !isRemoveAll && filterRouting != null && filterRouting.hasMemberWithFilterInfo()) {
if (logger.isDebugEnabled()) {
logger.debug("Setting filter information for message to {}", filterRouting);
}
msg.filterRouting = filterRouting;
}
} else if (!routingComputed) {
msg.needsRouting = true;
}
initProcessor(processor, msg);
if (region.cache.isClosed() && !canBeSentDuringShutdown()) {
throw region.cache.getCacheClosedException(LocalizedStrings.DistributedCacheOperation_THE_CACHE_HAS_BEEN_CLOSED.toLocalizedString(), null);
}
msg.setRecipients(recipients);
failures = mgr.putOutgoing(msg);
// distribute to members needing the old value now
if (needsOldValueInCacheOp.size() > 0) {
msg.appendOldValueToMessage((EntryEventImpl) this.event);
msg.resetRecipients();
msg.setRecipients(needsOldValueInCacheOp);
Set newFailures = mgr.putOutgoing(msg);
if (newFailures != null) {
if (logger.isDebugEnabled()) {
logger.debug("Failed sending ({}) to {}", msg, newFailures);
}
if (failures != null && failures.size() > 0) {
failures.addAll(newFailures);
} else {
failures = newFailures;
}
}
}
if (cachelessNodes.size() > 0) {
cachelessNodes.removeAll(cachelessNodesWithNoCacheServer);
if (cachelessNodes.size() > 0) {
msg.resetRecipients();
msg.setRecipients(cachelessNodes);
msg.setSendDelta(false);
Set newFailures = mgr.putOutgoing(msg);
if (newFailures != null) {
if (failures != null && failures.size() > 0) {
failures.addAll(newFailures);
} else {
failures = newFailures;
}
}
}
if (cachelessNodesWithNoCacheServer.size() > 0) {
msg.resetRecipients();
msg.setRecipients(cachelessNodesWithNoCacheServer);
msg.setSendDelta(false);
((UpdateMessage) msg).setSendDeltaWithFullValue(false);
Set newFailures = mgr.putOutgoing(msg);
if (newFailures != null) {
if (failures != null && failures.size() > 0) {
failures.addAll(newFailures);
} else {
failures = newFailures;
}
}
}
// Add it back for size calculation ahead
cachelessNodes.addAll(cachelessNodesWithNoCacheServer);
}
if (failures != null && !failures.isEmpty() && logger.isDebugEnabled()) {
logger.debug("Failed sending ({}) to {} while processing event:{}", msg, failures, event);
}
Set<InternalDistributedMember> adjunctRecipientsWithNoCacheServer = new HashSet<InternalDistributedMember>();
// send partitioned region listener notification messages now
if (!adjunctRecipients.isEmpty()) {
if (cachelessNodes.size() > 0) {
// calculations
if (recipients.isEmpty()) {
recipients = cachelessNodes;
} else {
recipients.addAll(cachelessNodes);
}
}
adjunctRecipientsWithNoCacheServer.addAll(adjunctRecipients);
adviseCacheServers = ((Bucket) region).getPartitionedRegion().getCacheDistributionAdvisor().adviseCacheServers();
adjunctRecipientsWithNoCacheServer.removeAll(adviseCacheServers);
if (isPutAll) {
((BucketRegion) region).performPutAllAdjunctMessaging((DistributedPutAllOperation) this, recipients, adjunctRecipients, filterRouting, this.processor);
} else if (isRemoveAll) {
((BucketRegion) region).performRemoveAllAdjunctMessaging((DistributedRemoveAllOperation) this, recipients, adjunctRecipients, filterRouting, this.processor);
} else {
boolean calculateDelta = adjunctRecipientsWithNoCacheServer.size() < adjunctRecipients.size();
adjunctRecipients.removeAll(adjunctRecipientsWithNoCacheServer);
if (!adjunctRecipients.isEmpty()) {
((BucketRegion) region).performAdjunctMessaging(getEvent(), recipients, adjunctRecipients, filterRouting, this.processor, calculateDelta, true);
}
if (!adjunctRecipientsWithNoCacheServer.isEmpty()) {
((BucketRegion) region).performAdjunctMessaging(getEvent(), recipients, adjunctRecipientsWithNoCacheServer, filterRouting, this.processor, calculateDelta, false);
}
}
}
// compute local client routing before waiting for an ack only for a bucket
if (region.isUsedForPartitionedRegionBucket()) {
FilterInfo filterInfo = getLocalFilterRouting(filterRouting);
event.setLocalFilterInfo(filterInfo);
}
waitForAckIfNeeded(msg, persistentIds);
if (/* msg != null && */
reliableOp) {
Set successfulRecips = new HashSet(recipients);
successfulRecips.addAll(cachelessNodes);
successfulRecips.addAll(needsOldValueInCacheOp);
if (failures != null && !failures.isEmpty()) {
successfulRecips.removeAll(failures);
}
if (departedMembers != null) {
successfulRecips.removeAll(departedMembers);
}
region.handleReliableDistribution(successfulRecips);
}
}
if (region.isUsedForPartitionedRegionBucket() && filterRouting != null) {
removeDestroyTokensFromCqResultKeys(filterRouting);
}
} catch (CancelException e) {
if (logger.isDebugEnabled()) {
logger.debug("distribution of message aborted by shutdown: {}", this);
}
throw e;
} catch (RuntimeException e) {
logger.info(LocalizedMessage.create(LocalizedStrings.DistributedCacheOperation_EXCEPTION_OCCURRED_WHILE_PROCESSING__0, this), e);
throw e;
} finally {
ReplyProcessor21.setShortSevereAlertProcessing(false);
}
}
use of org.apache.geode.internal.cache.UpdateOperation.UpdateMessage in project geode by apache.
the class InterruptsDUnitTest method testDRPutWithInterrupt.
/**
* A simple test case that we are actually persisting with a PR.
*
* @throws Throwable
*/
@Test
public void testDRPutWithInterrupt() throws Throwable {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
createRegion(vm0);
// put some data in vm0
createData(vm0, 0, 10, "a");
final SerializableCallable interruptTask = new SerializableCallable() {
@Override
public Object call() throws Exception {
puttingThread.interrupt();
return null;
}
};
vm1.invoke(new SerializableCallable() {
@Override
public Object call() throws Exception {
disconnectFromDS();
DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
@Override
public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
if (message instanceof UpdateMessage && ((UpdateMessage) message).regionPath.contains("region") && doInterrupt.compareAndSet(true, false)) {
vm0.invoke(interruptTask);
DistributionMessageObserver.setInstance(null);
}
}
});
return null;
}
});
createRegion(vm1);
SerializableCallable doPuts = new SerializableCallable() {
@Override
public Object call() throws Exception {
puttingThread = Thread.currentThread();
Region<Object, Object> region = getCache().getRegion("region");
long value = 0;
long end = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(MAX_WAIT);
while (!Thread.currentThread().isInterrupted()) {
region.put(0, value);
if (System.nanoTime() > end) {
fail("Did not get interrupted in 60 seconds");
}
}
return null;
}
};
AsyncInvocation async0 = vm0.invokeAsync(doPuts);
vm1.invoke(new SerializableCallable() {
@Override
public Object call() throws Exception {
doInterrupt.set(true);
return null;
}
});
// vm0.invoke(new SerializableCallable() {
//
// @Override
// public Object call() throws Exception {
// long end = System.nanoTime() + TimeUnit.SECONDS.toNanos(MAX_WAIT);
// while(puttingThread == null) {
// Thread.sleep(50);
// if(System.nanoTime() > end) {
// fail("Putting thread not set in 60 seconds");
// }
// }
//
// puttingThread.interrupt();
// return null;
// }
// });
async0.getResult();
Object value0 = checkCacheAndGetValue(vm0);
Object value1 = checkCacheAndGetValue(vm1);
assertEquals(value0, value1);
}
use of org.apache.geode.internal.cache.UpdateOperation.UpdateMessage in project geode by apache.
the class InterruptClientServerDUnitTest method testClientPutWithInterrupt.
/**
* A simple test case that we are actually persisting with a PR.
*
* @throws Throwable
*/
@Test
public void testClientPutWithInterrupt() throws Throwable {
IgnoredException.addIgnoredException("InterruptedException");
Host host = Host.getHost(0);
final VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
final VM vm2 = host.getVM(2);
int port = AvailablePortHelper.getRandomAvailableTCPPort();
createRegionAndServer(vm0, port);
// put some data in vm0
createData(vm0, 0, 10, "a");
final SerializableCallable interruptTask = new SerializableCallable() {
@Override
public Object call() throws Exception {
puttingThread.interrupt();
return null;
}
};
vm1.invoke(new SerializableCallable() {
@Override
public Object call() throws Exception {
disconnectFromDS();
DistributionMessageObserver.setInstance(new DistributionMessageObserver() {
@Override
public void beforeProcessMessage(DistributionManager dm, DistributionMessage message) {
if (message instanceof UpdateMessage && ((UpdateMessage) message).regionPath.contains("region") && doInterrupt.compareAndSet(true, false)) {
vm2.invoke(interruptTask);
DistributionMessageObserver.setInstance(null);
}
}
});
return null;
}
});
createRegion(vm1);
createClientRegion(vm2, port);
SerializableCallable doPuts = new SerializableCallable() {
@Override
public Object call() throws Exception {
puttingThread = Thread.currentThread();
Region<Object, Object> region = getCache().getRegion("region");
long value = 0;
long end = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(MAX_WAIT);
while (!Thread.currentThread().isInterrupted()) {
region.put(0, value);
if (System.nanoTime() > end) {
fail("Did not get interrupted in 60 seconds");
}
}
return null;
}
};
AsyncInvocation async0 = vm2.invokeAsync(doPuts);
vm1.invoke(new SerializableCallable() {
@Override
public Object call() throws Exception {
doInterrupt.set(true);
return null;
}
});
// vm0.invoke(new SerializableCallable() {
//
// @Override
// public Object call() throws Exception {
// long end = System.nanoTime() + TimeUnit.SECONDS.toNanos(MAX_WAIT);
// while(puttingThread == null) {
// Thread.sleep(50);
// if(System.nanoTime() > end) {
// fail("Putting thread not set in 60 seconds");
// }
// }
//
// puttingThread.interrupt();
// return null;
// }
// });
async0.getResult();
Object value0 = checkCacheAndGetValue(vm0);
Object value1 = checkCacheAndGetValue(vm1);
assertEquals(value0, value1);
}
Aggregations