use of org.apache.geode.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue in project geode by apache.
the class PartitionedRegion method postCreateRegion.
@Override
protected void postCreateRegion() {
super.postCreateRegion();
CacheListener[] listeners = fetchCacheListenersField();
if (listeners != null && listeners.length > 0) {
Set others = getRegionAdvisor().adviseGeneric();
for (int i = 0; i < listeners.length; i++) {
if (listeners[i] instanceof RegionMembershipListener) {
RegionMembershipListener rml = (RegionMembershipListener) listeners[i];
try {
DistributedMember[] otherDms = new DistributedMember[others.size()];
others.toArray(otherDms);
rml.initialMembers(this, otherDms);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
logger.error(LocalizedMessage.create(LocalizedStrings.DistributedRegion_EXCEPTION_OCCURRED_IN_REGIONMEMBERSHIPLISTENER), t);
}
}
}
}
PartitionListener[] partitionListeners = this.getPartitionListeners();
if (partitionListeners != null && partitionListeners.length != 0) {
for (int i = 0; i < partitionListeners.length; i++) {
PartitionListener listener = partitionListeners[i];
if (listener != null) {
listener.afterRegionCreate(this);
}
}
}
Set<String> allGatewaySenderIds = getAllGatewaySenderIds();
if (!allGatewaySenderIds.isEmpty()) {
for (GatewaySender sender : cache.getAllGatewaySenders()) {
if (sender.isParallel() && allGatewaySenderIds.contains(sender.getId())) {
/*
* get the ParallelGatewaySender to create the colocated partitioned region for this
* region.
*/
if (sender.isRunning()) {
AbstractGatewaySender senderImpl = (AbstractGatewaySender) sender;
((ConcurrentParallelGatewaySenderQueue) senderImpl.getQueues().toArray(new RegionQueue[1])[0]).addShadowPartitionedRegionForUserPR(this);
}
}
}
}
}
use of org.apache.geode.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue in project geode by apache.
the class ParallelAsyncEventQueueImpl method stop.
@Override
public void stop() {
this.getLifeCycleLock().writeLock().lock();
try {
if (!this.isRunning()) {
return;
}
// Stop the dispatcher
AbstractGatewaySenderEventProcessor ev = this.eventProcessor;
if (ev != null && !ev.isStopped()) {
ev.stopProcessing();
}
// Stop the proxy (after the dispatcher, so the socket is still
// alive until after the dispatcher has stopped)
stompProxyDead();
// Close the listeners
for (AsyncEventListener listener : this.listeners) {
listener.close();
}
// stop the running threads, open sockets if any
((ConcurrentParallelGatewaySenderQueue) this.eventProcessor.getQueue()).cleanUp();
logger.info(LocalizedMessage.create(LocalizedStrings.GatewayImpl_STOPPED__0, this));
InternalDistributedSystem system = (InternalDistributedSystem) this.cache.getDistributedSystem();
system.handleResourceEvent(ResourceEvent.GATEWAYSENDER_STOP, this);
clearTempEventsAfterSenderStopped();
} finally {
this.getLifeCycleLock().writeLock().unlock();
}
}
use of org.apache.geode.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue in project geode by apache.
the class PartitionedRegion method destroyParallelGatewaySenderRegion.
public void destroyParallelGatewaySenderRegion(Operation op, boolean cacheWrite, boolean lock, boolean callbackEvents) {
if (logger.isDebugEnabled()) {
logger.debug("Destoying parallel queue region for senders: {}", this.getParallelGatewaySenderIds());
}
boolean keepWaiting = true;
while (true) {
List<String> pausedSenders = new ArrayList<String>();
List<ConcurrentParallelGatewaySenderQueue> parallelQueues = new ArrayList<ConcurrentParallelGatewaySenderQueue>();
isDestroyedForParallelWAN = true;
int countOfQueueRegionsToBeDestroyed = 0;
for (String senderId : this.getParallelGatewaySenderIds()) {
AbstractGatewaySender sender = (AbstractGatewaySender) this.cache.getGatewaySender(senderId);
if (sender == null || sender.getEventProcessor() == null) {
continue;
}
if (cacheWrite) {
// resumed
if (sender.isPaused()) {
pausedSenders.add(senderId);
continue;
}
}
if (pausedSenders.isEmpty()) {
// if there are puase sender then only
// check for other pause senders instead
// of creating list of shadowPR
AbstractGatewaySenderEventProcessor ep = sender.getEventProcessor();
if (ep == null)
continue;
ConcurrentParallelGatewaySenderQueue parallelQueue = (ConcurrentParallelGatewaySenderQueue) ep.getQueue();
PartitionedRegion parallelQueueRegion = parallelQueue.getRegion(this.getFullPath());
// this may be removed in previous iteration
if (parallelQueueRegion == null || parallelQueueRegion.isDestroyed || parallelQueueRegion.isClosed) {
continue;
}
parallelQueues.add(parallelQueue);
countOfQueueRegionsToBeDestroyed++;
}
}
if (!pausedSenders.isEmpty()) {
String exception = null;
if (pausedSenders.size() == 1) {
exception = LocalizedStrings.PartitionedRegion_GATEWAYSENDER_0_IS_PAUSED_RESUME_IT_BEFORE_DESTROYING_USER_REGION_1.toLocalizedString(pausedSenders, this.getName());
} else {
exception = LocalizedStrings.PartitionedRegion_GATEWAYSENDERS_0_ARE_PAUSED_RESUME_THEM_BEFORE_DESTROYING_USER_REGION_1.toLocalizedString(pausedSenders, this.getName());
}
isDestroyedForParallelWAN = false;
throw new GatewaySenderException(exception);
}
if (countOfQueueRegionsToBeDestroyed == 0) {
break;
}
for (ConcurrentParallelGatewaySenderQueue parallelQueue : parallelQueues) {
PartitionedRegion parallelQueueRegion = parallelQueue.getRegion(this.getFullPath());
// keepWaiting : comes from the MAXIMUM_SHUTDOWN_WAIT_TIME case handled
if (cacheWrite && parallelQueueRegion.size() != 0 && keepWaiting) {
continue;
} else {
// In any case, destroy shadow PR locally. distributed destroy of
// userPR will take care of detsroying shadowPR locally on other
// nodes.
RegionEventImpl event = null;
if (op.isClose()) {
// In case of cache close operation, we want SPR's basic destroy to go
// through CACHE_CLOSE condition of postDestroyRegion not
// closePartitionedRegion code
event = new RegionEventImpl(parallelQueueRegion, op, null, false, getMyId(), generateEventID());
} else {
event = new RegionEventImpl(parallelQueueRegion, Operation.REGION_LOCAL_DESTROY, null, false, getMyId(), generateEventID());
}
parallelQueueRegion.basicDestroyRegion(event, false, lock, callbackEvents);
parallelQueue.removeShadowPR(this.getFullPath());
countOfQueueRegionsToBeDestroyed--;
continue;
}
}
if (countOfQueueRegionsToBeDestroyed == 0) {
break;
}
if (cacheWrite) {
if (AbstractGatewaySender.MAXIMUM_SHUTDOWN_WAIT_TIME == -1) {
keepWaiting = true;
try {
Thread.sleep(5000);
} catch (InterruptedException ignore) {
// interrupted
}
} else {
try {
Thread.sleep(AbstractGatewaySender.MAXIMUM_SHUTDOWN_WAIT_TIME * 1000);
} catch (InterruptedException ignore) {
/* ignore */
// interrupted
}
keepWaiting = false;
}
}
}
}
use of org.apache.geode.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue in project geode by apache.
the class PartitionedRegion method addAsyncEventQueueId.
public void addAsyncEventQueueId(String asyncEventQueueId) {
super.addAsyncEventQueueId(asyncEventQueueId);
new UpdateAttributesProcessor(this).distribute();
((PartitionedRegion) this).distributeUpdatedProfileOnSenderCreation();
GatewaySender sender = getCache().getGatewaySender(AsyncEventQueueImpl.getSenderIdFromAsyncEventQueueId(asyncEventQueueId));
if (sender != null && sender.isParallel() && sender.isRunning()) {
AbstractGatewaySender senderImpl = (AbstractGatewaySender) sender;
((ConcurrentParallelGatewaySenderQueue) senderImpl.getQueues().toArray(new RegionQueue[1])[0]).addShadowPartitionedRegionForUserPR(this);
}
}
use of org.apache.geode.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderQueue in project geode by apache.
the class AbstractGatewaySenderEventProcessor method processQueue.
protected void processQueue() {
final boolean isDebugEnabled = logger.isDebugEnabled();
final boolean isTraceEnabled = logger.isTraceEnabled();
final int batchTimeInterval = sender.getBatchTimeInterval();
final GatewaySenderStats statistics = this.sender.getStatistics();
if (isDebugEnabled) {
logger.debug("STARTED processQueue {}", this.getId());
}
// list of the events peeked from queue
List<GatewaySenderEventImpl> events = null;
// list of the above peeked events which are filtered through the filters attached
List<GatewaySenderEventImpl> filteredList = new ArrayList<GatewaySenderEventImpl>();
// list of the PDX events which are peeked from pDX region and needs to go acrossthe site
List<GatewaySenderEventImpl> pdxEventsToBeDispatched = new ArrayList<GatewaySenderEventImpl>();
// list of filteredList + pdxEventsToBeDispatched events
List<GatewaySenderEventImpl> eventsToBeDispatched = new ArrayList<GatewaySenderEventImpl>();
for (; ; ) {
if (stopped()) {
break;
}
try {
// Check if paused. If so, wait for resumption
if (this.isPaused) {
waitForResumption();
}
// Peek a batch
if (isDebugEnabled) {
logger.debug("Attempting to peek a batch of {} events", this.batchSize);
}
for (; ; ) {
// check before sleeping
if (stopped()) {
if (isDebugEnabled) {
logger.debug("GatewaySenderEventProcessor is stopped. Returning without peeking events.");
}
break;
}
// Check if paused. If so, wait for resumption
if (this.isPaused) {
waitForResumption();
}
// We need to initialize connection in dispatcher before sending first
// batch here ONLY, because we need GatewayReceiver's version for
// filtering VERSION_ACTION events from being sent.
boolean sendUpdateVersionEvents = shouldSendVersionEvents(this.dispatcher);
// sleep a little bit, look for events
boolean interrupted = Thread.interrupted();
try {
if (resetLastPeekedEvents) {
resetLastPeekedEvents();
resetLastPeekedEvents = false;
}
{
// Below code was added to consider the case of queue region is
// destroyed due to userPRs localdestroy or destroy operation.
// In this case we were waiting for queue region to get created
// and then only peek from the region queue.
// With latest change of multiple PR with single ParalleSender, we
// cant wait for particular regionqueue to get recreated as there
// will be other region queue from which events can be picked
/*
* // Check if paused. If so, wait for resumption if (this.isPaused) {
* waitForResumption(); }
*
* synchronized (this.getQueue()) { // its quite possible that the queue region is //
* destroyed(userRegion // localdestroy destroys shadow region locally). In this case
* // better to // wait for shadows region to get recreated instead of keep loop //
* for peeking events if (this.getQueue().getRegion() == null ||
* this.getQueue().getRegion().isDestroyed()) { try { this.getQueue().wait();
* continue; // this continue is important to recheck the // conditions of stop/ pause
* after the wait of 1 sec } catch (InterruptedException e1) {
* Thread.currentThread().interrupt(); } } }
*/
}
events = this.queue.peek(this.batchSize, batchTimeInterval);
} catch (InterruptedException e) {
interrupted = true;
this.sender.getCancelCriterion().checkCancelInProgress(e);
// keep trying
continue;
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
if (events.isEmpty()) {
// nothing to do!
continue;
}
// this list is access by ack reader thread so create new every time. #50220
filteredList = new ArrayList<GatewaySenderEventImpl>();
filteredList.addAll(events);
// remove all events whose serialized value is no longer available
if (this.exception != null && this.exception.getCause() != null && this.exception.getCause() instanceof IllegalStateException) {
for (Iterator<GatewaySenderEventImpl> i = filteredList.iterator(); i.hasNext(); ) {
GatewaySenderEventImpl event = i.next();
if (event.isSerializedValueNotAvailable()) {
i.remove();
}
}
this.exception = null;
}
// Filter the events
for (GatewayEventFilter filter : sender.getGatewayEventFilters()) {
Iterator<GatewaySenderEventImpl> itr = filteredList.iterator();
while (itr.hasNext()) {
GatewayQueueEvent event = itr.next();
// version is < 7.0.1, especially to prevent another loop over events.
if (!sendUpdateVersionEvents && event.getOperation() == Operation.UPDATE_VERSION_STAMP) {
if (isTraceEnabled) {
logger.trace("Update Event Version event: {} removed from Gateway Sender queue: {}", event, sender);
}
itr.remove();
statistics.incEventsNotQueued();
continue;
}
boolean transmit = filter.beforeTransmit(event);
if (!transmit) {
if (isDebugEnabled) {
logger.debug("{}: Did not transmit event due to filtering: {}", sender.getId(), event);
}
itr.remove();
statistics.incEventsFiltered();
}
}
}
// AsyncEventQueue since possibleDuplicate flag is not used in WAN.
if (this.getSender().isParallel() && (this.getDispatcher() instanceof GatewaySenderEventCallbackDispatcher)) {
Iterator<GatewaySenderEventImpl> itr = filteredList.iterator();
while (itr.hasNext()) {
GatewaySenderEventImpl event = (GatewaySenderEventImpl) itr.next();
PartitionedRegion qpr = null;
if (this.getQueue() instanceof ConcurrentParallelGatewaySenderQueue) {
qpr = ((ConcurrentParallelGatewaySenderQueue) this.getQueue()).getRegion(event.getRegionPath());
} else {
qpr = ((ParallelGatewaySenderQueue) this.getQueue()).getRegion(event.getRegionPath());
}
int bucketId = event.getBucketId();
// primary, then set possibleDuplicate to true on the event
if (qpr != null) {
BucketRegion bucket = qpr.getDataStore().getLocalBucketById(bucketId);
if (bucket == null || !bucket.getBucketAdvisor().isPrimary()) {
event.setPossibleDuplicate(true);
if (isDebugEnabled) {
logger.debug("Bucket id: {} is no longer primary on this node. The event: {} will be dispatched from this node with possibleDuplicate set to true.", bucketId, event);
}
}
}
}
}
eventsToBeDispatched.clear();
if (!(this.dispatcher instanceof GatewaySenderEventCallbackDispatcher)) {
// store the batch before dispatching so it can be retrieved by the ack thread.
List<GatewaySenderEventImpl>[] eventsArr = (List<GatewaySenderEventImpl>[]) new List[2];
eventsArr[0] = events;
eventsArr[1] = filteredList;
this.batchIdToEventsMap.put(getBatchId(), eventsArr);
// find out PDX event and append it in front of the list
pdxEventsToBeDispatched = addPDXEvent();
eventsToBeDispatched.addAll(pdxEventsToBeDispatched);
if (!pdxEventsToBeDispatched.isEmpty()) {
this.batchIdToPDXEventsMap.put(getBatchId(), pdxEventsToBeDispatched);
}
}
eventsToBeDispatched.addAll(filteredList);
// Conflate the batch. Event conflation only occurs on the queue.
// Once an event has been peeked into a batch, it won't be
// conflated. So if events go through the queue quickly (as in the
// no-ack case), then multiple events for the same key may end up in
// the batch.
List conflatedEventsToBeDispatched = conflate(eventsToBeDispatched);
if (isDebugEnabled) {
logBatchFine("During normal processing, dispatching the following ", conflatedEventsToBeDispatched);
}
boolean success = this.dispatcher.dispatchBatch(conflatedEventsToBeDispatched, sender.isRemoveFromQueueOnException(), false);
if (success) {
if (isDebugEnabled) {
logger.debug("During normal processing, successfully dispatched {} events (batch #{})", conflatedEventsToBeDispatched.size(), getBatchId());
}
removeEventFromFailureMap(getBatchId());
} else {
if (!skipFailureLogging(getBatchId())) {
logger.warn(LocalizedMessage.create(LocalizedStrings.GatewayImpl_EVENT_QUEUE_DISPATCH_FAILED, new Object[] { filteredList.size(), getBatchId() }));
}
}
// check again, don't do post-processing if we're stopped.
if (stopped()) {
break;
}
// If the batch is successfully processed, remove it from the queue.
if (success) {
if (this.dispatcher instanceof GatewaySenderEventCallbackDispatcher) {
handleSuccessfulBatchDispatch(conflatedEventsToBeDispatched, events);
} else {
incrementBatchId();
}
// isDispatched
for (GatewaySenderEventImpl pdxGatewaySenderEvent : pdxEventsToBeDispatched) {
pdxGatewaySenderEvent.isDispatched = true;
}
if (TEST_HOOK) {
this.numEventsDispatched += conflatedEventsToBeDispatched.size();
}
} else // successful batch
{
// The batch was unsuccessful.
if (this.dispatcher instanceof GatewaySenderEventCallbackDispatcher) {
handleUnSuccessfulBatchDispatch(events);
this.resetLastPeekedEvents = true;
} else {
handleUnSuccessfulBatchDispatch(events);
if (!resetLastPeekedEvents) {
while (!this.dispatcher.dispatchBatch(conflatedEventsToBeDispatched, sender.isRemoveFromQueueOnException(), true)) {
if (isDebugEnabled) {
logger.debug("During normal processing, unsuccessfully dispatched {} events (batch #{})", conflatedEventsToBeDispatched.size(), getBatchId());
}
if (stopped() || resetLastPeekedEvents) {
break;
}
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
incrementBatchId();
}
}
}
// unsuccessful batch
if (logger.isDebugEnabled()) {
logger.debug("Finished processing events (batch #{})", (getBatchId() - 1));
}
}
// for
} catch (RegionDestroyedException e) {
// setting this flag will ensure that already peeked events will make
// it to the next batch before new events are peeked (fix for #48784)
this.resetLastPeekedEvents = true;
// shadow PR is also locally destroyed
if (logger.isDebugEnabled()) {
logger.debug("Observed RegionDestroyedException on Queue's region.");
}
} catch (CancelException e) {
logger.debug("Caught cancel exception", e);
setIsStopped(true);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable e) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
// Well, OK. Some strange nonfatal thing.
if (stopped()) {
// don't complain, just exit.
return;
}
if (events != null) {
handleUnSuccessfulBatchDispatch(events);
}
this.resetLastPeekedEvents = true;
if (e instanceof GatewaySenderException) {
Throwable cause = e.getCause();
if (cause instanceof IOException || e instanceof GatewaySenderConfigurationException) {
continue;
}
}
// We'll log it but continue on with the next batch.
logger.warn(LocalizedMessage.create(LocalizedStrings.GatewayImpl_AN_EXCEPTION_OCCURRED_THE_DISPATCHER_WILL_CONTINUE), e);
}
}
// for
}
Aggregations