use of org.apache.geode.cache.CacheException in project geode by apache.
the class RemotePutMessage method distribute.
/**
* this is similar to send() but it selects an initialized replicate that is used to proxy the
* message
*
* @param event represents the current operation
* @param lastModified lastModified time
* @param ifNew whether a new entry can be created
* @param ifOld whether an old entry can be used (updates are okay)
* @param expectedOldValue the value being overwritten is required to match this value
* @param requireOldValue whether the old value should be returned
* @param onlyPersistent send message to persistent members only
* @return whether the message was successfully distributed to another member
*/
public static boolean distribute(EntryEventImpl event, long lastModified, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, boolean onlyPersistent) {
boolean successful = false;
DistributedRegion r = (DistributedRegion) event.getRegion();
Collection replicates = onlyPersistent ? r.getCacheDistributionAdvisor().adviseInitializedPersistentMembers().keySet() : r.getCacheDistributionAdvisor().adviseInitializedReplicates();
if (replicates.isEmpty()) {
return false;
}
if (replicates.size() > 1) {
ArrayList l = new ArrayList(replicates);
Collections.shuffle(l);
replicates = l;
}
int attempts = 0;
if (logger.isDebugEnabled()) {
logger.debug("performing remote put messaging for {}", event);
}
for (Iterator<InternalDistributedMember> it = replicates.iterator(); it.hasNext(); ) {
InternalDistributedMember replicate = it.next();
try {
attempts++;
final boolean posDup = (attempts > 1);
RemotePutResponse response = send(replicate, event.getRegion(), event, lastModified, ifNew, ifOld, expectedOldValue, requireOldValue, false, DistributionManager.SERIAL_EXECUTOR, posDup);
PutResult result = response.waitForResult();
event.setOldValue(result.oldValue, true);
event.setOperation(result.op);
if (result.versionTag != null) {
event.setVersionTag(result.versionTag);
if (event.getRegion().getVersionVector() != null) {
event.getRegion().getVersionVector().recordVersion(result.versionTag.getMemberID(), result.versionTag);
}
}
event.setInhibitDistribution(true);
return true;
} catch (TransactionDataNotColocatedException enfe) {
throw enfe;
} catch (CancelException e) {
event.getRegion().getCancelCriterion().checkCancelInProgress(e);
} catch (CacheException e) {
if (logger.isDebugEnabled()) {
logger.debug("RemotePutMessage caught CacheException during distribution", e);
}
// not a cancel-exception, so don't complain any more about it
successful = true;
} catch (RemoteOperationException e) {
if (logger.isTraceEnabled(LogMarker.DM)) {
logger.trace(LogMarker.DM, "RemotePutMessage caught an unexpected exception during distribution", e);
}
}
}
return successful;
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class HARegionQueue method remove.
/**
* Removes the events that were peeked by this thread. The events are destroyed from the queue and
* conflation map and DispatchedAndCurrentEvents are updated accordingly.
*/
public void remove() throws InterruptedException {
List peekedIds = (List) HARegionQueue.peekedEventsContext.get();
if (peekedIds == null) {
if (logger.isDebugEnabled()) {
logger.debug("Remove() called before peek(), nothing to remove.");
}
return;
}
if (!this.checkPrevAcks()) {
return;
}
Map groupedThreadIDs = new HashMap();
for (Iterator iter = peekedIds.iterator(); iter.hasNext(); ) {
Long counter = (Long) iter.next();
Conflatable event = (Conflatable) this.region.get(counter);
if (event != null) {
EventID eventid = event.getEventId();
long sequenceId = eventid.getSequenceID();
ThreadIdentifier threadid = getThreadIdentifier(eventid);
if (!checkEventForRemoval(counter, threadid, sequenceId)) {
continue;
}
Object key = null;
String r = null;
if (shouldBeConflated(event)) {
key = event.getKeyToConflate();
r = event.getRegionToConflate();
}
RemovedEventInfo info = new RemovedEventInfo(counter, r, key);
List countersList;
if ((countersList = (List) groupedThreadIDs.get(threadid)) != null) {
countersList.add(info);
countersList.set(0, sequenceId);
} else {
countersList = new ArrayList();
countersList.add(sequenceId);
countersList.add(info);
groupedThreadIDs.put(threadid, countersList);
}
event = null;
info = null;
} else {
// if (logger.isDebugEnabled()) {
HARegionQueue.this.stats.incNumVoidRemovals();
// }
}
}
for (Iterator iter = groupedThreadIDs.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry element = (Map.Entry) iter.next();
ThreadIdentifier tid = (ThreadIdentifier) element.getKey();
List removedEvents = (List) element.getValue();
long lastDispatchedId = (Long) removedEvents.remove(0);
DispatchedAndCurrentEvents dace = (DispatchedAndCurrentEvents) this.eventsMap.get(tid);
if (dace != null && dace.lastDispatchedSequenceId < lastDispatchedId) {
try {
dace.setLastDispatchedIDAndRemoveEvents(removedEvents, lastDispatchedId);
} catch (CacheException e) {
// ignore and log
logger.error(LocalizedMessage.create(LocalizedStrings.HARegionQueue_EXCEPTION_OCCURRED_WHILE_TRYING_TO_SET_THE_LAST_DISPATCHED_ID), e);
}
}
// Periodic ack from the client will add to the addDispatchMessage Map.
// This method gets called from cacheClientNotifier upon receiving the ack from client.
// addDispatchedMessage(tid, lastDispatchedId);
}
groupedThreadIDs = null;
// removed the events from queue, now clear the peekedEventsContext
setPeekedEvents();
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class CacheClientNotifier method registerGFEClient.
protected void registerGFEClient(DataInputStream dis, DataOutputStream dos, Socket socket, boolean isPrimary, long startTime, Version clientVersion, long acceptorId, boolean notifyBySubscription) throws IOException {
// Read the ports and throw them away. We no longer need them
int numberOfPorts = dis.readInt();
for (int i = 0; i < numberOfPorts; i++) {
dis.readInt();
}
// Read the handshake identifier and convert it to a string member id
ClientProxyMembershipID proxyID = null;
CacheClientProxy proxy;
AccessControl authzCallback = null;
byte clientConflation = HandShake.CONFLATION_DEFAULT;
try {
proxyID = ClientProxyMembershipID.readCanonicalized(dis);
if (getBlacklistedClient().contains(proxyID)) {
writeException(dos, HandShake.REPLY_INVALID, new Exception("This client is blacklisted by server"), clientVersion);
return;
}
proxy = getClientProxy(proxyID);
DistributedMember member = proxyID.getDistributedMember();
DistributedSystem system = this.getCache().getDistributedSystem();
Properties sysProps = system.getProperties();
String authenticator = sysProps.getProperty(SECURITY_CLIENT_AUTHENTICATOR);
if (clientVersion.compareTo(Version.GFE_603) >= 0) {
byte[] overrides = HandShake.extractOverrides(new byte[] { (byte) dis.read() });
clientConflation = overrides[0];
} else {
clientConflation = (byte) dis.read();
}
switch(clientConflation) {
case HandShake.CONFLATION_DEFAULT:
case HandShake.CONFLATION_OFF:
case HandShake.CONFLATION_ON:
break;
default:
writeException(dos, HandShake.REPLY_INVALID, new IllegalArgumentException("Invalid conflation byte"), clientVersion);
return;
}
proxy = registerClient(socket, proxyID, proxy, isPrimary, clientConflation, clientVersion, acceptorId, notifyBySubscription);
Properties credentials = HandShake.readCredentials(dis, dos, system);
if (credentials != null && proxy != null) {
if (securityLogWriter.fineEnabled()) {
securityLogWriter.fine("CacheClientNotifier: verifying credentials for proxyID: " + proxyID);
}
Object subject = HandShake.verifyCredentials(authenticator, credentials, system.getSecurityProperties(), this.logWriter, this.securityLogWriter, member);
if (subject instanceof Principal) {
Principal principal = (Principal) subject;
if (securityLogWriter.fineEnabled()) {
securityLogWriter.fine("CacheClientNotifier: successfully verified credentials for proxyID: " + proxyID + " having principal: " + principal.getName());
}
String postAuthzFactoryName = sysProps.getProperty(SECURITY_CLIENT_ACCESSOR_PP);
if (postAuthzFactoryName != null && postAuthzFactoryName.length() > 0) {
if (principal == null) {
securityLogWriter.warning(LocalizedStrings.CacheClientNotifier_CACHECLIENTNOTIFIER_POST_PROCESS_AUTHORIZATION_CALLBACK_ENABLED_BUT_AUTHENTICATION_CALLBACK_0_RETURNED_WITH_NULL_CREDENTIALS_FOR_PROXYID_1, new Object[] { SECURITY_CLIENT_AUTHENTICATOR, proxyID });
}
Method authzMethod = ClassLoadUtil.methodFromName(postAuthzFactoryName);
authzCallback = (AccessControl) authzMethod.invoke(null, (Object[]) null);
authzCallback.init(principal, member, this.getCache());
}
proxy.setPostAuthzCallback(authzCallback);
} else if (subject instanceof Subject) {
proxy.setSubject((Subject) subject);
}
}
} catch (ClassNotFoundException e) {
throw new IOException(LocalizedStrings.CacheClientNotifier_CLIENTPROXYMEMBERSHIPID_OBJECT_COULD_NOT_BE_CREATED_EXCEPTION_OCCURRED_WAS_0.toLocalizedString(e));
} catch (AuthenticationRequiredException ex) {
securityLogWriter.warning(LocalizedStrings.CacheClientNotifier_AN_EXCEPTION_WAS_THROWN_FOR_CLIENT_0_1, new Object[] { proxyID, ex });
writeException(dos, HandShake.REPLY_EXCEPTION_AUTHENTICATION_REQUIRED, ex, clientVersion);
return;
} catch (AuthenticationFailedException ex) {
securityLogWriter.warning(LocalizedStrings.CacheClientNotifier_AN_EXCEPTION_WAS_THROWN_FOR_CLIENT_0_1, new Object[] { proxyID, ex });
writeException(dos, HandShake.REPLY_EXCEPTION_AUTHENTICATION_FAILED, ex, clientVersion);
return;
} catch (CacheException e) {
logger.warn(LocalizedMessage.create(LocalizedStrings.CacheClientNotifier_0_REGISTERCLIENT_EXCEPTION_ENCOUNTERED_IN_REGISTRATION_1, new Object[] { this, e }), e);
IOException io = new IOException(LocalizedStrings.CacheClientNotifier_EXCEPTION_OCCURRED_WHILE_TRYING_TO_REGISTER_INTEREST_DUE_TO_0.toLocalizedString(e.getMessage()));
io.initCause(e);
throw io;
} catch (Exception ex) {
logger.warn(LocalizedMessage.create(LocalizedStrings.CacheClientNotifier_AN_EXCEPTION_WAS_THROWN_FOR_CLIENT_0_1, new Object[] { proxyID, "" }), ex);
writeException(dos, Acceptor.UNSUCCESSFUL_SERVER_TO_CLIENT, ex, clientVersion);
return;
}
this.statistics.endClientRegistration(startTime);
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class SerialGatewaySenderEventProcessor method handleFailover.
/**
* Handle failover. This method is called when a secondary <code>GatewaySender</code> becomes a
* primary <code>GatewaySender</code>.
*
* Once this secondary becomes the primary, it must:
* <ul>
* <li>Remove the queue's CacheListener
* <li>Process the map of unprocessed events (those it has seen but the previous primary had not
* yet processed before it crashed). These will include both queued and unqueued events. Remove
* from the queue any events that were already sent
* <li>Clear the unprocessed events map
* </ul>
*/
protected void handleFailover() {
/*
* We must hold this lock while we're processing these maps to prevent us from handling a
* secondary event while failover occurs. See enqueueEvent
*/
synchronized (this.unprocessedEventsLock) {
// Remove the queue's CacheListener
this.queue.removeCacheListener();
this.unprocessedTokens = null;
// Process the map of unprocessed events
logger.info(LocalizedMessage.create(LocalizedStrings.GatewayImpl_GATEWAY_FAILOVER_INITIATED_PROCESSING_0_UNPROCESSED_EVENTS, this.unprocessedEvents.size()));
GatewaySenderStats statistics = this.sender.getStatistics();
if (!this.unprocessedEvents.isEmpty()) {
// do a reap for bug 37603
// to get rid of timed out events
reapOld(statistics, true);
// now iterate over the region queue to figure out what unprocessed
// events are already in the queue
{
Iterator it = this.queue.getRegion().values().iterator();
while (it.hasNext() && !stopped()) {
Object o = it.next();
if (o != null && o instanceof GatewaySenderEventImpl) {
GatewaySenderEventImpl ge = (GatewaySenderEventImpl) o;
EventWrapper unprocessedEvent = this.unprocessedEvents.remove(ge.getEventId());
if (unprocessedEvent != null) {
unprocessedEvent.event.release();
if (this.unprocessedEvents.isEmpty()) {
break;
}
}
}
}
}
// now for every unprocessed event add it to the end of the queue
{
Iterator<Map.Entry<EventID, EventWrapper>> it = this.unprocessedEvents.entrySet().iterator();
while (it.hasNext()) {
if (stopped())
break;
Map.Entry<EventID, EventWrapper> me = it.next();
EventWrapper ew = me.getValue();
GatewaySenderEventImpl gatewayEvent = ew.event;
// Initialize each gateway event. This initializes the key,
// value
// and callback arg based on the EntryEvent.
// TODO:wan70, remove dependencies from old code
gatewayEvent.initialize();
// Verify that they GatewayEventCallbackArgument is initialized.
// If not, initialize it. It won't be initialized if a client to
// this GatewayHub VM was the creator of this event. This Gateway
// will be the first one to process it. If will be initialized if
// this event was sent to this Gateway from another GatewayHub
// (either directly or indirectly).
GatewaySenderEventCallbackArgument seca = gatewayEvent.getSenderCallbackArgument();
if (seca.getOriginatingDSId() == GatewaySender.DEFAULT_DISTRIBUTED_SYSTEM_ID) {
seca.setOriginatingDSId(sender.getMyDSId());
seca.initializeReceipientDSIds(Collections.singletonList(sender.getRemoteDSId()));
}
it.remove();
boolean queuedEvent = false;
try {
queuedEvent = queuePrimaryEvent(gatewayEvent);
} catch (IOException ex) {
if (!stopped()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.GatewayImpl_EVENT_DROPPED_DURING_FAILOVER_0, gatewayEvent), ex);
}
} catch (CacheException ex) {
if (!stopped()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.GatewayImpl_EVENT_DROPPED_DURING_FAILOVER_0, gatewayEvent), ex);
}
} finally {
if (!queuedEvent) {
gatewayEvent.release();
}
}
}
}
// Clear the unprocessed events map
statistics.clearUnprocessedMaps();
}
// Iterate the entire queue and mark all events as possible
// duplicate
logger.info(LocalizedMessage.create(LocalizedStrings.GatewayImpl_0__MARKING__1__EVENTS_AS_POSSIBLE_DUPLICATES, new Object[] { getSender(), Integer.valueOf(this.queue.size()) }));
Iterator it = this.queue.getRegion().values().iterator();
while (it.hasNext() && !stopped()) {
Object o = it.next();
if (o != null && o instanceof GatewaySenderEventImpl) {
GatewaySenderEventImpl ge = (GatewaySenderEventImpl) o;
ge.setPossibleDuplicate(true);
}
}
releaseUnprocessedEvents();
}
// synchronized
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class SerialGatewaySenderQueue method initializeRegion.
/**
* Initializes the <code>Region</code> backing this queue. The <code>Region</code>'s scope is
* DISTRIBUTED_NO_ACK and mirror type is KEYS_VALUES and is set to overflow to disk based on the
* <code>GatewayQueueAttributes</code>.
*
* @param sender The GatewaySender <code>SerialGatewaySenderImpl</code>
* @param listener The GemFire <code>CacheListener</code>. The <code>CacheListener</code> can be
* null.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
private void initializeRegion(AbstractGatewaySender sender, CacheListener listener) {
final InternalCache gemCache = sender.getCache();
this.region = gemCache.getRegion(this.regionName);
if (this.region == null) {
AttributesFactory<Long, AsyncEvent> factory = new AttributesFactory<Long, AsyncEvent>();
factory.setScope(NO_ACK ? Scope.DISTRIBUTED_NO_ACK : Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(this.enablePersistence ? DataPolicy.PERSISTENT_REPLICATE : DataPolicy.REPLICATE);
if (logger.isDebugEnabled()) {
logger.debug("The policy of region is {}", (this.enablePersistence ? DataPolicy.PERSISTENT_REPLICATE : DataPolicy.REPLICATE));
}
// when the user of this queue is a secondary VM.
if (listener != null) {
factory.addCacheListener(listener);
}
// allow for no overflow directory
EvictionAttributes ea = EvictionAttributes.createLIFOMemoryAttributes(this.maximumQueueMemory, EvictionAction.OVERFLOW_TO_DISK);
factory.setEvictionAttributes(ea);
factory.setConcurrencyChecksEnabled(false);
factory.setDiskStoreName(this.diskStoreName);
// In case of persistence write to disk sync and in case of eviction write in async
factory.setDiskSynchronous(this.isDiskSynchronous);
// Create the region
if (logger.isDebugEnabled()) {
logger.debug("{}: Attempting to create queue region: {}", this, this.regionName);
}
final RegionAttributes<Long, AsyncEvent> ra = factory.create();
try {
SerialGatewaySenderQueueMetaRegion meta = new SerialGatewaySenderQueueMetaRegion(this.regionName, ra, null, gemCache, sender);
try {
this.region = gemCache.createVMRegion(this.regionName, ra, new InternalRegionArguments().setInternalMetaRegion(meta).setDestroyLockFlag(true).setSnapshotInputStream(null).setImageTarget(null).setIsUsedForSerialGatewaySenderQueue(true).setInternalRegion(true).setSerialGatewaySender(sender));
} catch (IOException veryUnLikely) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_UNEXPECTED_EXCEPTION_DURING_INIT_OF_0, this.getClass()), veryUnLikely);
} catch (ClassNotFoundException alsoUnlikely) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_UNEXPECTED_EXCEPTION_DURING_INIT_OF_0, this.getClass()), alsoUnlikely);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Created queue region: {}", this, this.region);
}
} catch (CacheException e) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.SingleWriteSingleReadRegionQueue_0_THE_QUEUE_REGION_NAMED_1_COULD_NOT_BE_CREATED, new Object[] { this, this.regionName }), e);
}
} else {
throw new IllegalStateException("Queue region " + this.region.getFullPath() + " already exists.");
}
}
Aggregations