use of org.apache.geode.internal.cache.Conflatable in project geode by apache.
the class HARegionQueueStatsJUnitTest method testSequenceViolationStats.
/**
* This test does the follwing: <br>
* 1)Create HARegionQueue.<br>
* 2)Add objects with unique eventids and conflation false.<br>
* 3)Add some objects with same eventids(sequence ids)- duplicate events.<br>
* 4)Verify that numSequenceViolated stats is same as number of duplicate events.<br>
* 5)Verify that eventsEnqued stats is same as the queue size ( i.e. eventsEnqued stats is not
* updated for duplicate events.)
*
* @throws Exception
*/
@Test
public void testSequenceViolationStats() throws Exception {
HARegionQueue rq = createHARegionQueue("testSequenceViolationStats");
Conflatable cf = null;
int totalEvents = 10;
for (int i = 0; i < totalEvents; i++) {
cf = new ConflatableObject("key" + i, "value" + i, new EventID(new byte[] { 1 }, 1, i), false, "testing");
rq.put(cf);
}
int seqViolated = 3;
for (int i = 0; i < seqViolated; i++) {
cf = new ConflatableObject("key" + i, "value" + i, new EventID(new byte[] { 1 }, 1, i), false, "testing");
rq.put(cf);
}
HARegionQueueStats stats = rq.getStatistics();
assertNotNull("stats for HARegionQueue found null", stats);
assertEquals("Number of sequence violated by stats not equal to the actual number", seqViolated, stats.getNumSequenceViolated());
assertEquals("Events corresponding to sequence violation not added to the queue but eventsEnqued stats updated for them.", rq.size(), stats.getEventsEnqued());
}
use of org.apache.geode.internal.cache.Conflatable in project geode by apache.
the class HARegionQueue method basicPut.
private void basicPut(Object object) throws CacheException, InterruptedException {
// optmistically decrease the put count
try {
this.checkQueueSizeConstraint();
// this.region.checkReadiness(); // throws CacheClosed or RegionDestroyed
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
this.region.getCache().getCancelCriterion().checkCancelInProgress(ie);
}
Conflatable event = (Conflatable) object;
// Get the EventID object & from it obtain the ThreadIdentifier
EventID eventId = event.getEventId();
ThreadIdentifier ti = getThreadIdentifier(eventId);
long sequenceID = eventId.getSequenceID();
// Check from Events Map if the put operation should proceed or not
DispatchedAndCurrentEvents dace = (DispatchedAndCurrentEvents) this.eventsMap.get(ti);
if (dace != null && dace.isGIIDace && this.puttingGIIDataInQueue) {
// we only need to retain DACE for which there are no entries in the queue.
// for other thread identifiers we build up a new DACE
dace = null;
}
if (dace != null) {
// check the last dispatched sequence Id
if (this.puttingGIIDataInQueue || (sequenceID > dace.lastDispatchedSequenceId)) {
// also does not get added
if (!dace.putObject(event, sequenceID)) {
// dace encountered a DESTROYED token - stop adding GII data
if (!this.puttingGIIDataInQueue) {
this.put(object);
}
} else {
if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
logger.trace(LogMarker.BRIDGE_SERVER, "{}: Adding message to queue: {}", this, object);
}
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("{}: This queue has already seen this event. The highest sequence number in the queue for {} is {}, but this event's sequence number is {}", this.regionName, ti, dace.lastDispatchedSequenceId, sequenceID);
}
incrementTakeSidePutPermits();
}
} else {
dace = new DispatchedAndCurrentEvents(this);
DispatchedAndCurrentEvents oldDace = (DispatchedAndCurrentEvents) this.eventsMap.putIfAbsent(ti, dace);
if (oldDace != null) {
dace = oldDace;
} else {
// Add the recently added ThreadIdentifier to the RegionQueue for expiry
this.region.put(ti, dace.lastDispatchedSequenceId);
// update the stats
this.stats.incThreadIdentifiers();
}
if (!dace.putObject(event, sequenceID)) {
this.put(object);
} else {
if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
logger.trace(LogMarker.BRIDGE_SERVER, "{}: Adding message to queue: {}", this, object);
}
}
}
}
use of org.apache.geode.internal.cache.Conflatable in project geode by apache.
the class SerialGatewaySenderQueue method putAndGetKeyNoSync.
private long putAndGetKeyNoSync(Object object) throws CacheException {
// don't sync on whole put; callers will do the puts in parallel but
// will wait later for previous tailKey put to complete after its own
// put is done
Long key;
synchronized (this) {
initializeKeys();
// Get and increment the current key
// Go for full sync in case of wrapover
long ckey = this.currentKey;
if (logger.isTraceEnabled()) {
logger.trace("{}: Determined current key: {}", this, ckey);
}
key = Long.valueOf(ckey);
this.currentKey = inc(ckey);
}
try {
// Put the object into the region at that key
this.region.put(key, (AsyncEvent) object);
if (logger.isDebugEnabled()) {
logger.debug("{}: Inserted {} -> {}", this, key, object);
}
} finally {
final Object sync = this.pendingPuts;
synchronized (sync) {
while (true) {
if (key.longValue() == this.tailKey.get()) {
// this is the next thread, so increment tail and signal all other
// waiting threads if required
incrementTailKey();
// check pendingPuts
boolean notifyWaiters = false;
if (this.pendingPuts.size() > 0) {
Iterator<Long> itr = this.pendingPuts.iterator();
while (itr.hasNext()) {
Long k = itr.next();
if (k.longValue() == this.tailKey.get()) {
incrementTailKey();
// removed something from pending queue, so notify any waiters
if (!notifyWaiters) {
notifyWaiters = (this.pendingPuts.size() >= this.maxPendingPuts);
}
itr.remove();
} else {
break;
}
}
}
if (notifyWaiters) {
sync.notifyAll();
}
break;
} else if (this.pendingPuts.size() < this.maxPendingPuts) {
this.pendingPuts.add(key);
break;
} else {
// wait for the queue size to go down
boolean interrupted = Thread.interrupted();
Throwable t = null;
try {
sync.wait(5);
} catch (InterruptedException ie) {
t = ie;
interrupted = true;
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
((LocalRegion) this.region).getCancelCriterion().checkCancelInProgress(t);
}
}
}
}
}
if (object instanceof Conflatable) {
removeOldEntry((Conflatable) object, key);
}
return key.longValue();
}
use of org.apache.geode.internal.cache.Conflatable in project geode by apache.
the class CacheClientNotifier method singletonNotifyClients.
private void singletonNotifyClients(InternalCacheEvent event, ClientUpdateMessage cmsg) {
final boolean isDebugEnabled = logger.isDebugEnabled();
final boolean isTraceEnabled = logger.isTraceEnabled();
FilterInfo filterInfo = event.getLocalFilterInfo();
FilterProfile regionProfile = ((LocalRegion) event.getRegion()).getFilterProfile();
if (filterInfo != null) {
// if the routing was made using an old profile we need to recompute it
if (isTraceEnabled) {
logger.trace("Event isOriginRemote={}", event.isOriginRemote());
}
}
if ((filterInfo == null || (filterInfo.getCQs() == null && filterInfo.getInterestedClients() == null && filterInfo.getInterestedClientsInv() == null))) {
return;
}
long startTime = this.statistics.startTime();
ClientUpdateMessageImpl clientMessage;
if (cmsg == null) {
clientMessage = constructClientMessage(event);
} else {
clientMessage = (ClientUpdateMessageImpl) cmsg;
}
if (clientMessage == null) {
return;
}
// Holds the clientIds to which filter message needs to be sent.
Set<ClientProxyMembershipID> filterClients = new HashSet();
// Add CQ info.
if (filterInfo.getCQs() != null) {
for (Map.Entry<Long, Integer> e : filterInfo.getCQs().entrySet()) {
Long cqID = e.getKey();
String cqName = regionProfile.getRealCqID(cqID);
if (cqName == null) {
continue;
}
ServerCQ cq = regionProfile.getCq(cqName);
if (cq != null) {
ClientProxyMembershipID id = cq.getClientProxyId();
filterClients.add(id);
if (isDebugEnabled) {
logger.debug("Adding cq routing info to message for id: {} and cq: {}", id, cqName);
}
clientMessage.addClientCq(id, cq.getName(), e.getValue());
}
}
}
// Add interestList info.
if (filterInfo.getInterestedClientsInv() != null) {
Set<Object> rawIDs = regionProfile.getRealClientIDs(filterInfo.getInterestedClientsInv());
Set<ClientProxyMembershipID> ids = getProxyIDs(rawIDs, true);
if (ids.remove(event.getContext())) {
// don't send to member of origin
CacheClientProxy ccp = getClientProxy(event.getContext());
if (ccp != null) {
ccp.getStatistics().incMessagesNotQueuedOriginator();
}
}
if (!ids.isEmpty()) {
if (isTraceEnabled) {
logger.trace("adding invalidation routing to message for {}", ids);
}
clientMessage.addClientInterestList(ids, false);
filterClients.addAll(ids);
}
}
if (filterInfo.getInterestedClients() != null) {
Set<Object> rawIDs = regionProfile.getRealClientIDs(filterInfo.getInterestedClients());
Set<ClientProxyMembershipID> ids = getProxyIDs(rawIDs, true);
if (ids.remove(event.getContext())) {
// don't send to member of origin
CacheClientProxy ccp = getClientProxy(event.getContext());
if (ccp != null) {
ccp.getStatistics().incMessagesNotQueuedOriginator();
}
}
if (!ids.isEmpty()) {
if (isTraceEnabled) {
logger.trace("adding routing to message for {}", ids);
}
clientMessage.addClientInterestList(ids, true);
filterClients.addAll(ids);
}
}
Conflatable conflatable = null;
if (clientMessage instanceof ClientTombstoneMessage) {
// bug #46832 - HAEventWrapper deserialization can't handle subclasses
// of ClientUpdateMessageImpl, so don't wrap them
conflatable = clientMessage;
// Remove clients older than 70 from the filterClients if the message is
// ClientTombstoneMessage. Fix for #46591.
Object[] objects = filterClients.toArray();
for (Object id : objects) {
CacheClientProxy ccp = getClientProxy((ClientProxyMembershipID) id, true);
if (ccp != null && ccp.getVersion().compareTo(Version.GFE_70) < 0) {
filterClients.remove(id);
}
}
} else {
HAEventWrapper wrapper = new HAEventWrapper(clientMessage);
// Set the putInProgress flag to true before starting the put on proxy's
// HA queues. Nowhere else, this flag is being set to true.
wrapper.setPutInProgress(true);
conflatable = wrapper;
}
singletonRouteClientMessage(conflatable, filterClients);
this.statistics.endEvent(startTime);
// destroyed are removed after the event is placed in clients HAQueue.
if (filterInfo.filterProcessedLocally) {
removeDestroyTokensFromCqResultKeys(event, filterInfo);
}
}
use of org.apache.geode.internal.cache.Conflatable in project geode by apache.
the class CacheClientProxy method initializeMessageDispatcher.
/**
* Initializes the message dispatcher thread. The <code>MessageDispatcher</code> processes the
* message queue.
*
* @throws CacheException
*/
public void initializeMessageDispatcher() throws CacheException {
// Initialization process.
this.messageDispatcherInit = true;
try {
if (logger.isDebugEnabled()) {
logger.debug("{}: Initializing message dispatcher with capacity of {} entries", this, _maximumMessageCount);
}
String name = "Client Message Dispatcher for " + getProxyID().getDistributedMember() + (isDurable() ? " (" + getDurableId() + ")" : "");
this._messageDispatcher = new MessageDispatcher(this, name);
// as we can without synchronization.
if (logger.isDebugEnabled()) {
logger.debug("{} draining {} events from init queue into intialized queue", this, this.queuedEvents.size());
}
Conflatable nextEvent;
while ((nextEvent = queuedEvents.poll()) != null) {
this._messageDispatcher.enqueueMessage(nextEvent);
}
// sure we don't miss any events.
synchronized (this.queuedEventsSync) {
while ((nextEvent = queuedEvents.poll()) != null) {
this._messageDispatcher.enqueueMessage(nextEvent);
}
// Done initialization.
this.messageDispatcherInit = false;
}
} finally {
if (this.messageDispatcherInit) {
// If its not successfully completed.
this._statistics.close();
}
}
}
Aggregations