use of org.apache.geode.cache.wan.GatewayEventFilter in project geode by apache.
the class AbstractGatewaySenderEventProcessor method handleSuccessBatchAck.
public void handleSuccessBatchAck(int batchId) {
// this is to acknowledge PDX related events
List<GatewaySenderEventImpl> pdxEvents = this.batchIdToPDXEventsMap.remove(batchId);
if (pdxEvents != null) {
for (GatewaySenderEventImpl senderEvent : pdxEvents) {
senderEvent.isAcked = true;
}
}
List<GatewaySenderEventImpl>[] eventsArr = this.batchIdToEventsMap.remove(batchId);
if (eventsArr != null) {
List<GatewaySenderEventImpl> filteredEvents = eventsArr[1];
for (GatewayEventFilter filter : sender.getGatewayEventFilters()) {
for (GatewaySenderEventImpl event : filteredEvents) {
try {
filter.afterAcknowledgement(event);
} catch (Exception e) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.GatewayEventFilter_EXCEPTION_OCCURRED_WHILE_HANDLING_CALL_TO_0_AFTER_ACKNOWLEDGEMENT_FOR_EVENT_1, new Object[] { filter.toString(), event }), e);
}
}
}
List<GatewaySenderEventImpl> events = eventsArr[0];
if (logger.isDebugEnabled()) {
logger.debug("Removing events from the queue {}", events.size());
}
eventQueueRemove(events.size());
}
}
use of org.apache.geode.cache.wan.GatewayEventFilter in project geode by apache.
the class AbstractGatewaySenderEventProcessor method processQueue.
protected void processQueue() {
final boolean isDebugEnabled = logger.isDebugEnabled();
final boolean isTraceEnabled = logger.isTraceEnabled();
final int batchTimeInterval = sender.getBatchTimeInterval();
final GatewaySenderStats statistics = this.sender.getStatistics();
if (isDebugEnabled) {
logger.debug("STARTED processQueue {}", this.getId());
}
// list of the events peeked from queue
List<GatewaySenderEventImpl> events = null;
// list of the above peeked events which are filtered through the filters attached
List<GatewaySenderEventImpl> filteredList = new ArrayList<GatewaySenderEventImpl>();
// list of the PDX events which are peeked from pDX region and needs to go acrossthe site
List<GatewaySenderEventImpl> pdxEventsToBeDispatched = new ArrayList<GatewaySenderEventImpl>();
// list of filteredList + pdxEventsToBeDispatched events
List<GatewaySenderEventImpl> eventsToBeDispatched = new ArrayList<GatewaySenderEventImpl>();
for (; ; ) {
if (stopped()) {
break;
}
try {
// Check if paused. If so, wait for resumption
if (this.isPaused) {
waitForResumption();
}
// Peek a batch
if (isDebugEnabled) {
logger.debug("Attempting to peek a batch of {} events", this.batchSize);
}
for (; ; ) {
// check before sleeping
if (stopped()) {
if (isDebugEnabled) {
logger.debug("GatewaySenderEventProcessor is stopped. Returning without peeking events.");
}
break;
}
// Check if paused. If so, wait for resumption
if (this.isPaused) {
waitForResumption();
}
// We need to initialize connection in dispatcher before sending first
// batch here ONLY, because we need GatewayReceiver's version for
// filtering VERSION_ACTION events from being sent.
boolean sendUpdateVersionEvents = shouldSendVersionEvents(this.dispatcher);
// sleep a little bit, look for events
boolean interrupted = Thread.interrupted();
try {
if (resetLastPeekedEvents) {
resetLastPeekedEvents();
resetLastPeekedEvents = false;
}
{
// Below code was added to consider the case of queue region is
// destroyed due to userPRs localdestroy or destroy operation.
// In this case we were waiting for queue region to get created
// and then only peek from the region queue.
// With latest change of multiple PR with single ParalleSender, we
// cant wait for particular regionqueue to get recreated as there
// will be other region queue from which events can be picked
/*
* // Check if paused. If so, wait for resumption if (this.isPaused) {
* waitForResumption(); }
*
* synchronized (this.getQueue()) { // its quite possible that the queue region is //
* destroyed(userRegion // localdestroy destroys shadow region locally). In this case
* // better to // wait for shadows region to get recreated instead of keep loop //
* for peeking events if (this.getQueue().getRegion() == null ||
* this.getQueue().getRegion().isDestroyed()) { try { this.getQueue().wait();
* continue; // this continue is important to recheck the // conditions of stop/ pause
* after the wait of 1 sec } catch (InterruptedException e1) {
* Thread.currentThread().interrupt(); } } }
*/
}
events = this.queue.peek(this.batchSize, batchTimeInterval);
} catch (InterruptedException e) {
interrupted = true;
this.sender.getCancelCriterion().checkCancelInProgress(e);
// keep trying
continue;
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
if (events.isEmpty()) {
// nothing to do!
continue;
}
// this list is access by ack reader thread so create new every time. #50220
filteredList = new ArrayList<GatewaySenderEventImpl>();
filteredList.addAll(events);
// remove all events whose serialized value is no longer available
if (this.exception != null && this.exception.getCause() != null && this.exception.getCause() instanceof IllegalStateException) {
for (Iterator<GatewaySenderEventImpl> i = filteredList.iterator(); i.hasNext(); ) {
GatewaySenderEventImpl event = i.next();
if (event.isSerializedValueNotAvailable()) {
i.remove();
}
}
this.exception = null;
}
// Filter the events
for (GatewayEventFilter filter : sender.getGatewayEventFilters()) {
Iterator<GatewaySenderEventImpl> itr = filteredList.iterator();
while (itr.hasNext()) {
GatewayQueueEvent event = itr.next();
// version is < 7.0.1, especially to prevent another loop over events.
if (!sendUpdateVersionEvents && event.getOperation() == Operation.UPDATE_VERSION_STAMP) {
if (isTraceEnabled) {
logger.trace("Update Event Version event: {} removed from Gateway Sender queue: {}", event, sender);
}
itr.remove();
statistics.incEventsNotQueued();
continue;
}
boolean transmit = filter.beforeTransmit(event);
if (!transmit) {
if (isDebugEnabled) {
logger.debug("{}: Did not transmit event due to filtering: {}", sender.getId(), event);
}
itr.remove();
statistics.incEventsFiltered();
}
}
}
// AsyncEventQueue since possibleDuplicate flag is not used in WAN.
if (this.getSender().isParallel() && (this.getDispatcher() instanceof GatewaySenderEventCallbackDispatcher)) {
Iterator<GatewaySenderEventImpl> itr = filteredList.iterator();
while (itr.hasNext()) {
GatewaySenderEventImpl event = (GatewaySenderEventImpl) itr.next();
PartitionedRegion qpr = null;
if (this.getQueue() instanceof ConcurrentParallelGatewaySenderQueue) {
qpr = ((ConcurrentParallelGatewaySenderQueue) this.getQueue()).getRegion(event.getRegionPath());
} else {
qpr = ((ParallelGatewaySenderQueue) this.getQueue()).getRegion(event.getRegionPath());
}
int bucketId = event.getBucketId();
// primary, then set possibleDuplicate to true on the event
if (qpr != null) {
BucketRegion bucket = qpr.getDataStore().getLocalBucketById(bucketId);
if (bucket == null || !bucket.getBucketAdvisor().isPrimary()) {
event.setPossibleDuplicate(true);
if (isDebugEnabled) {
logger.debug("Bucket id: {} is no longer primary on this node. The event: {} will be dispatched from this node with possibleDuplicate set to true.", bucketId, event);
}
}
}
}
}
eventsToBeDispatched.clear();
if (!(this.dispatcher instanceof GatewaySenderEventCallbackDispatcher)) {
// store the batch before dispatching so it can be retrieved by the ack thread.
List<GatewaySenderEventImpl>[] eventsArr = (List<GatewaySenderEventImpl>[]) new List[2];
eventsArr[0] = events;
eventsArr[1] = filteredList;
this.batchIdToEventsMap.put(getBatchId(), eventsArr);
// find out PDX event and append it in front of the list
pdxEventsToBeDispatched = addPDXEvent();
eventsToBeDispatched.addAll(pdxEventsToBeDispatched);
if (!pdxEventsToBeDispatched.isEmpty()) {
this.batchIdToPDXEventsMap.put(getBatchId(), pdxEventsToBeDispatched);
}
}
eventsToBeDispatched.addAll(filteredList);
// Conflate the batch. Event conflation only occurs on the queue.
// Once an event has been peeked into a batch, it won't be
// conflated. So if events go through the queue quickly (as in the
// no-ack case), then multiple events for the same key may end up in
// the batch.
List conflatedEventsToBeDispatched = conflate(eventsToBeDispatched);
if (isDebugEnabled) {
logBatchFine("During normal processing, dispatching the following ", conflatedEventsToBeDispatched);
}
boolean success = this.dispatcher.dispatchBatch(conflatedEventsToBeDispatched, sender.isRemoveFromQueueOnException(), false);
if (success) {
if (isDebugEnabled) {
logger.debug("During normal processing, successfully dispatched {} events (batch #{})", conflatedEventsToBeDispatched.size(), getBatchId());
}
removeEventFromFailureMap(getBatchId());
} else {
if (!skipFailureLogging(getBatchId())) {
logger.warn(LocalizedMessage.create(LocalizedStrings.GatewayImpl_EVENT_QUEUE_DISPATCH_FAILED, new Object[] { filteredList.size(), getBatchId() }));
}
}
// check again, don't do post-processing if we're stopped.
if (stopped()) {
break;
}
// If the batch is successfully processed, remove it from the queue.
if (success) {
if (this.dispatcher instanceof GatewaySenderEventCallbackDispatcher) {
handleSuccessfulBatchDispatch(conflatedEventsToBeDispatched, events);
} else {
incrementBatchId();
}
// isDispatched
for (GatewaySenderEventImpl pdxGatewaySenderEvent : pdxEventsToBeDispatched) {
pdxGatewaySenderEvent.isDispatched = true;
}
if (TEST_HOOK) {
this.numEventsDispatched += conflatedEventsToBeDispatched.size();
}
} else // successful batch
{
// The batch was unsuccessful.
if (this.dispatcher instanceof GatewaySenderEventCallbackDispatcher) {
handleUnSuccessfulBatchDispatch(events);
this.resetLastPeekedEvents = true;
} else {
handleUnSuccessfulBatchDispatch(events);
if (!resetLastPeekedEvents) {
while (!this.dispatcher.dispatchBatch(conflatedEventsToBeDispatched, sender.isRemoveFromQueueOnException(), true)) {
if (isDebugEnabled) {
logger.debug("During normal processing, unsuccessfully dispatched {} events (batch #{})", conflatedEventsToBeDispatched.size(), getBatchId());
}
if (stopped() || resetLastPeekedEvents) {
break;
}
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
incrementBatchId();
}
}
}
// unsuccessful batch
if (logger.isDebugEnabled()) {
logger.debug("Finished processing events (batch #{})", (getBatchId() - 1));
}
}
// for
} catch (RegionDestroyedException e) {
// setting this flag will ensure that already peeked events will make
// it to the next batch before new events are peeked (fix for #48784)
this.resetLastPeekedEvents = true;
// shadow PR is also locally destroyed
if (logger.isDebugEnabled()) {
logger.debug("Observed RegionDestroyedException on Queue's region.");
}
} catch (CancelException e) {
logger.debug("Caught cancel exception", e);
setIsStopped(true);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable e) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
// Well, OK. Some strange nonfatal thing.
if (stopped()) {
// don't complain, just exit.
return;
}
if (events != null) {
handleUnSuccessfulBatchDispatch(events);
}
this.resetLastPeekedEvents = true;
if (e instanceof GatewaySenderException) {
Throwable cause = e.getCause();
if (cause instanceof IOException || e instanceof GatewaySenderConfigurationException) {
continue;
}
}
// We'll log it but continue on with the next batch.
logger.warn(LocalizedMessage.create(LocalizedStrings.GatewayImpl_AN_EXCEPTION_OCCURRED_THE_DISPATCHER_WILL_CONTINUE), e);
}
}
// for
}
use of org.apache.geode.cache.wan.GatewayEventFilter in project geode by apache.
the class AsyncEventQueueValidationsJUnitTest method testAsyncEventQueueConfiguredFromXmlUsesFilter.
@Test
@Parameters(method = "getCacheXmlFileBaseNames")
public void testAsyncEventQueueConfiguredFromXmlUsesFilter(String cacheXmlFileBaseName) {
// Create cache with xml
String cacheXmlFileName = TestUtil.getResourcePath(getClass(), getClass().getSimpleName() + "." + cacheXmlFileBaseName + ".cache.xml");
cache = new CacheFactory().set(MCAST_PORT, "0").set(CACHE_XML_FILE, cacheXmlFileName).create();
// Get region and do puts
Region region = cache.getRegion(cacheXmlFileBaseName);
int numPuts = 10;
for (int i = 0; i < numPuts; i++) {
region.put(i, i);
}
// Get AsyncEventQueue and GatewayEventFilter
AsyncEventQueue aeq = cache.getAsyncEventQueue(cacheXmlFileBaseName);
List<GatewayEventFilter> filters = aeq.getGatewayEventFilters();
assertTrue(filters.size() == 1);
MyGatewayEventFilter filter = (MyGatewayEventFilter) filters.get(0);
// Validate filter callbacks were invoked
Awaitility.waitAtMost(60, TimeUnit.SECONDS).until(() -> filter.getBeforeEnqueueInvocations() == numPuts);
Awaitility.waitAtMost(60, TimeUnit.SECONDS).until(() -> filter.getBeforeTransmitInvocations() == numPuts);
Awaitility.waitAtMost(60, TimeUnit.SECONDS).until(() -> filter.getAfterAcknowledgementInvocations() == numPuts);
}
use of org.apache.geode.cache.wan.GatewayEventFilter in project geode by apache.
the class ParallelGatewaySenderQueueOverflowDUnitTest method test_ValidateParallelGatewaySenderQueueAttributes_2.
/**
* Test to validate that ParallelGatewaySenderQueue diskSynchronous attribute when persistence of
* sender is not enabled.
*/
@Ignore("TODO: test is disabled")
@Test
public void test_ValidateParallelGatewaySenderQueueAttributes_2() {
Integer localLocPort = (Integer) vm0.invoke(() -> WANTestBase.createFirstLocatorWithDSId(1));
Integer remoteLocPort = (Integer) vm1.invoke(() -> WANTestBase.createFirstRemoteLocator(2, localLocPort));
WANTestBase test = new WANTestBase();
Properties props = test.getDistributedSystemProperties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "localhost[" + localLocPort + "]");
InternalDistributedSystem ds = test.getSystem(props);
cache = CacheFactory.create(ds);
GatewaySenderFactory fact = cache.createGatewaySenderFactory();
// set parallel to true
fact.setParallel(true);
fact.setBatchConflationEnabled(true);
fact.setBatchSize(200);
fact.setBatchTimeInterval(300);
// set persistence to false
fact.setPersistenceEnabled(false);
fact.setDiskSynchronous(true);
fact.setMaximumQueueMemory(200);
fact.setAlertThreshold(1200);
GatewayEventFilter myEventFilter1 = new MyGatewayEventFilter1();
fact.addGatewayEventFilter(myEventFilter1);
GatewayTransportFilter myStreamFilter1 = new MyGatewayTransportFilter1();
fact.addGatewayTransportFilter(myStreamFilter1);
GatewayTransportFilter myStreamFilter2 = new MyGatewayTransportFilter2();
fact.addGatewayTransportFilter(myStreamFilter2);
final IgnoredException ex = IgnoredException.addIgnoredException("Could not connect");
try {
GatewaySender sender1 = fact.create("TKSender", 2);
AttributesFactory factory = new AttributesFactory();
factory.addGatewaySenderId(sender1.getId());
factory.setDataPolicy(DataPolicy.PARTITION);
Region region = cache.createRegionFactory(factory.create()).create("test_ValidateGatewaySenderAttributes");
Set<GatewaySender> senders = cache.getGatewaySenders();
assertEquals(senders.size(), 1);
GatewaySender gatewaySender = senders.iterator().next();
Set<RegionQueue> regionQueues = ((AbstractGatewaySender) gatewaySender).getQueues();
assertEquals(regionQueues.size(), 1);
RegionQueue regionQueue = regionQueues.iterator().next();
assertEquals(false, regionQueue.getRegion().getAttributes().isDiskSynchronous());
} finally {
ex.remove();
}
}
use of org.apache.geode.cache.wan.GatewayEventFilter in project geode by apache.
the class SerialGatewaySenderQueueDUnitTest method test_ValidateSerialGatewaySenderQueueAttributes_1.
/**
* Test to validate that serial gateway sender queue diskSynchronous attribute when persistence of
* sender is enabled.
*/
@Test
public void test_ValidateSerialGatewaySenderQueueAttributes_1() {
Integer localLocPort = (Integer) vm0.invoke(() -> WANTestBase.createFirstLocatorWithDSId(1));
Integer remoteLocPort = (Integer) vm1.invoke(() -> WANTestBase.createFirstRemoteLocator(2, localLocPort));
WANTestBase test = new WANTestBase(getTestMethodName());
Properties props = test.getDistributedSystemProperties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "localhost[" + localLocPort + "]");
InternalDistributedSystem ds = test.getSystem(props);
cache = CacheFactory.create(ds);
File directory = new File("TKSender" + "_disk_" + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
directory.mkdir();
File[] dirs1 = new File[] { directory };
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(dirs1);
DiskStore diskStore = dsf.create("FORNY");
GatewaySenderFactory fact = cache.createGatewaySenderFactory();
fact.setBatchConflationEnabled(true);
fact.setBatchSize(200);
fact.setBatchTimeInterval(300);
// enable the persistence
fact.setPersistenceEnabled(true);
fact.setDiskSynchronous(true);
fact.setDiskStoreName("FORNY");
fact.setMaximumQueueMemory(200);
fact.setAlertThreshold(1200);
GatewayEventFilter myEventFilter1 = new MyGatewayEventFilter1();
fact.addGatewayEventFilter(myEventFilter1);
GatewayTransportFilter myStreamFilter1 = new MyGatewayTransportFilter1();
fact.addGatewayTransportFilter(myStreamFilter1);
GatewayTransportFilter myStreamFilter2 = new MyGatewayTransportFilter2();
fact.addGatewayTransportFilter(myStreamFilter2);
final IgnoredException exTKSender = IgnoredException.addIgnoredException("Could not connect");
try {
GatewaySender sender1 = fact.create("TKSender", 2);
AttributesFactory factory = new AttributesFactory();
factory.addGatewaySenderId(sender1.getId());
factory.setDataPolicy(DataPolicy.PARTITION);
Region region = cache.createRegionFactory(factory.create()).create("test_ValidateGatewaySenderAttributes");
Set<GatewaySender> senders = cache.getGatewaySenders();
assertEquals(senders.size(), 1);
GatewaySender gatewaySender = senders.iterator().next();
Set<RegionQueue> regionQueues = ((AbstractGatewaySender) gatewaySender).getQueues();
assertEquals(regionQueues.size(), GatewaySender.DEFAULT_DISPATCHER_THREADS);
RegionQueue regionQueue = regionQueues.iterator().next();
assertEquals(true, regionQueue.getRegion().getAttributes().isDiskSynchronous());
} finally {
exTKSender.remove();
}
}
Aggregations