use of org.apache.geode.distributed.DistributedSystemDisconnectedException in project geode by apache.
the class ConnectionTable method getThreadOwnedConnection.
/**
* Must be looking for an ordered connection that this thread owns
*
* @param id stub on which to create the connection
* @param startTime the ms clock start time for the operation
* @param ackTimeout the ms ack-wait-threshold, or zero
* @param ackSATimeout the ms ack-severe-alert-threshold, or zero
* @return the connection, or null if an error
* @throws IOException if the connection could not be created
* @throws DistributedSystemDisconnectedException
*/
Connection getThreadOwnedConnection(DistributedMember id, long startTime, long ackTimeout, long ackSATimeout) throws IOException, DistributedSystemDisconnectedException {
Connection result = null;
// Look for result in the thread local
Map m = (Map) this.threadOrderedConnMap.get();
if (m == null) {
// First time for this thread. Create thread local
m = new HashMap();
synchronized (this.threadConnMaps) {
if (this.closed) {
owner.getCancelCriterion().checkCancelInProgress(null);
throw new DistributedSystemDisconnectedException(LocalizedStrings.ConnectionTable_CONNECTION_TABLE_IS_CLOSED.toLocalizedString());
}
// check for stale references and remove them.
for (Iterator it = this.threadConnMaps.iterator(); it.hasNext(); ) {
Reference r = (Reference) it.next();
if (r.get() == null) {
it.remove();
}
}
// for
// ref added for bug 38011
this.threadConnMaps.add(new WeakReference(m));
}
// synchronized
this.threadOrderedConnMap.set(m);
} else {
// Consult thread local.
synchronized (m) {
result = (Connection) m.get(id);
}
if (result != null && result.timedOut) {
result = null;
}
}
if (result != null)
return result;
// OK, we have to create a new connection.
result = Connection.createSender(owner.getMembershipManager(), this, true, /* preserveOrder */
id, false, /* shared */
startTime, ackTimeout, ackSATimeout);
if (logger.isDebugEnabled()) {
logger.debug("ConnectionTable: created an ordered connection: {}", result);
}
this.owner.stats.incSenders(false, /* shared */
true);
if (this.threadConnectionMap == null) {
// This instance is being destroyed; fail the operation
closeCon(LocalizedStrings.ConnectionTable_CONNECTION_TABLE_BEING_DESTROYED.toLocalizedString(), result);
return null;
}
ArrayList al = (ArrayList) this.threadConnectionMap.get(id);
if (al == null) {
// First connection for this DistributedMember. Make sure list for this
// stub is created if it isn't already there.
al = new ArrayList();
// Since it's a concurrent map, we just try to put it and then
// return whichever we got.
Object o = this.threadConnectionMap.putIfAbsent(id, al);
if (o != null) {
al = (ArrayList) o;
}
}
// Add our Connection to the list
synchronized (al) {
al.add(result);
}
// Finally, add the connection to our thread local map.
synchronized (m) {
m.put(id, result);
}
scheduleIdleTimeout(result);
return result;
}
use of org.apache.geode.distributed.DistributedSystemDisconnectedException in project geode by apache.
the class TXMessage method process.
@Override
protected void process(final DistributionManager dm) {
Throwable thr = null;
boolean sendReply = true;
try {
if (logger.isDebugEnabled()) {
logger.debug("processing {}", this);
}
InternalCache cache = GemFireCacheImpl.getInstance();
if (checkCacheClosing(cache) || checkDSClosing(cache.getInternalDistributedSystem())) {
thr = new CacheClosedException(LocalizedStrings.PartitionMessage_REMOTE_CACHE_IS_CLOSED_0.toLocalizedString(dm.getId()));
return;
}
TXManagerImpl txMgr = cache.getTXMgr();
TXStateProxy tx = null;
try {
assert this.txUniqId != TXManagerImpl.NOTX;
TXId txId = new TXId(getMemberToMasqueradeAs(), this.txUniqId);
tx = txMgr.masqueradeAs(this);
sendReply = operateOnTx(txId, dm);
} finally {
txMgr.unmasquerade(tx);
}
} catch (CommitConflictException cce) {
thr = cce;
} catch (DistributedSystemDisconnectedException se) {
sendReply = false;
if (logger.isDebugEnabled()) {
logger.debug("shutdown caught, abandoning message: " + se);
}
} catch (RegionDestroyedException rde) {
thr = new ForceReattemptException(LocalizedStrings.PartitionMessage_REGION_IS_DESTROYED_IN_0.toLocalizedString(dm.getDistributionManagerId()), rde);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
if (sendReply) {
thr = t;
}
} finally {
ReplySender rs = getReplySender(dm);
if (sendReply && (this.processorId != 0 || (rs != dm))) {
ReplyException rex = null;
if (thr != null) {
rex = new ReplyException(thr);
}
sendReply(getSender(), this.processorId, dm, rex);
}
}
}
use of org.apache.geode.distributed.DistributedSystemDisconnectedException in project geode by apache.
the class RemoteGetMessage method operateOnRegion.
@Override
protected boolean operateOnRegion(final DistributionManager dm, LocalRegion r, long startTime) throws RemoteOperationException {
if (logger.isTraceEnabled(LogMarker.DM)) {
logger.trace(LogMarker.DM, "RemoteGetMessage operateOnRegion: {}", r.getFullPath());
}
if (this.getTXUniqId() != TXManagerImpl.NOTX) {
assert r.getDataView() instanceof TXStateProxy;
}
if (!(r instanceof PartitionedRegion)) {
// prs already wait on initialization
// bug #43371 - accessing a region before it's initialized
r.waitOnInitialization();
}
RawValue valueBytes;
Object val = null;
try {
KeyInfo keyInfo = r.getKeyInfo(key, cbArg);
val = r.getDataView().getSerializedValue(r, keyInfo, false, this.context, null, false);
valueBytes = val instanceof RawValue ? (RawValue) val : new RawValue(val);
if (logger.isTraceEnabled(LogMarker.DM)) {
logger.trace(LogMarker.DM, "GetMessage sending serialized value {} back via GetReplyMessage using processorId: {}", valueBytes, getProcessorId());
}
// r.getPrStats().endPartitionMessagesProcessing(startTime);
GetReplyMessage.send(getSender(), getProcessorId(), valueBytes, getReplySender(dm));
// response
return false;
} catch (DistributedSystemDisconnectedException sde) {
sendReply(getSender(), this.processorId, dm, new ReplyException(new RemoteOperationException(LocalizedStrings.GetMessage_OPERATION_GOT_INTERRUPTED_DUE_TO_SHUTDOWN_IN_PROGRESS_ON_REMOTE_VM.toLocalizedString(), sde)), r, startTime);
return false;
} catch (PrimaryBucketException pbe) {
sendReply(getSender(), getProcessorId(), dm, new ReplyException(pbe), r, startTime);
return false;
} catch (DataLocationException e) {
sendReply(getSender(), getProcessorId(), dm, new ReplyException(e), r, startTime);
return false;
} finally {
OffHeapHelper.release(val);
}
}
use of org.apache.geode.distributed.DistributedSystemDisconnectedException in project geode by apache.
the class InternalClientMembership method startMonitoring.
/**
* This work used to be in a class initializer. Unfortunately, this allowed the class to escape
* before it was fully initialized, so now we just make sure this work is done before any public
* static method on it is invoked.
*/
private static synchronized void startMonitoring() {
if (isMonitoring) {
return;
}
synchronized (systems) {
// Initialize our own list of distributed systems via a connect listener
List existingSystems = InternalDistributedSystem.addConnectListener(new InternalDistributedSystem.ConnectListener() {
public void onConnect(InternalDistributedSystem sys) {
addInternalDistributedSystem(sys);
}
});
isMonitoring = true;
// systems to our own list
for (Iterator iter = existingSystems.iterator(); iter.hasNext(); ) {
InternalDistributedSystem sys = (InternalDistributedSystem) iter.next();
try {
if (sys.isConnected()) {
addInternalDistributedSystem(sys);
}
} catch (DistributedSystemDisconnectedException e) {
// it doesn't care (bug 37379)
}
}
}
// synchronized
}
use of org.apache.geode.distributed.DistributedSystemDisconnectedException in project geode by apache.
the class BaseCommandQuery method processQueryUsingParams.
/**
* Process the give query and sends the resulset back to the client.
*
* @param msg
* @param query
* @param queryString
* @param regionNames
* @param start
* @param cqQuery
* @param queryContext
* @param servConn
* @return true if successful execution false in case of failure.
* @throws IOException
*/
protected boolean processQueryUsingParams(Message msg, Query query, String queryString, Set regionNames, long start, ServerCQ cqQuery, QueryOperationContext queryContext, ServerConnection servConn, boolean sendResults, Object[] params) throws IOException, InterruptedException {
ChunkedMessage queryResponseMsg = servConn.getQueryResponseMessage();
CacheServerStats stats = servConn.getCacheServerStats();
CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
{
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incReadQueryRequestTime(start - oldStart);
}
// object type
if (servConn.getClientVersion().compareTo(Version.GFE_70) >= 0) {
((DefaultQuery) query).setRemoteQuery(true);
}
// Process the query request
try {
// integrated security
for (Object regionName : regionNames) {
this.securityService.authorizeRegionRead(regionName.toString());
}
// Execute query
// startTime = GenericStats.getTime();
// startTime = System.currentTimeMillis();
// For now we assume the results are a SelectResults
// which is the only possibility now, but this may change
// in the future if we support arbitrary queries
Object result = null;
if (params != null) {
result = query.execute(params);
} else {
result = query.execute();
}
// Asif : Before conditioning the results check if any
// of the regions involved in the query have been destroyed
// or not. If yes, throw an Exception.
// This is a workaround/fix for Bug 36969
Iterator itr = regionNames.iterator();
while (itr.hasNext()) {
String regionName = (String) itr.next();
if (crHelper.getRegion(regionName) == null) {
throw new RegionDestroyedException(LocalizedStrings.BaseCommand_REGION_DESTROYED_DURING_THE_EXECUTION_OF_THE_QUERY.toLocalizedString(), regionName);
}
}
AuthorizeRequestPP postAuthzRequest = servConn.getPostAuthzRequest();
if (postAuthzRequest != null) {
if (cqQuery == null) {
queryContext = postAuthzRequest.queryAuthorize(queryString, regionNames, result, queryContext, params);
} else {
queryContext = postAuthzRequest.executeCQAuthorize(cqQuery.getName(), queryString, regionNames, result, queryContext);
}
result = queryContext.getQueryResult();
}
if (result instanceof SelectResults) {
SelectResults selectResults = (SelectResults) result;
if (logger.isDebugEnabled()) {
logger.debug("Query Result size for : {} is {}", query.getQueryString(), selectResults.size());
}
CollectionType collectionType = null;
boolean sendCqResultsWithKey = true;
boolean isStructs = false;
// check if resultset has serialized objects, so that they could be sent
// as ObjectPartList
boolean hasSerializedObjects = ((DefaultQuery) query).isKeepSerialized();
if (logger.isDebugEnabled()) {
logger.debug("Query Result for :{} has serialized objects: {}", query.getQueryString(), hasSerializedObjects);
}
// Don't convert to a Set, there might be duplicates now
// The results in a StructSet are stored in Object[]s
// Get them as Object[]s for the objs[] in order to avoid duplicating
// the StructTypes
// Object[] objs = new Object[selectResults.size()];
// Get the collection type (which includes the element type)
// (used to generate the appropriate instance on the client)
// Get the collection type (which includes the element type)
// (used to generate the appropriate instance on the client)
collectionType = getCollectionType(selectResults);
isStructs = collectionType.getElementType().isStructType();
// Check if the Query is from CQ execution.
if (cqQuery != null) {
// Check if the key can be sent to the client based on its version.
sendCqResultsWithKey = sendCqResultsWithKey(servConn);
if (sendCqResultsWithKey) {
// Update the collection type to include key info.
collectionType = new CollectionTypeImpl(Collection.class, new StructTypeImpl(new String[] { "key", "value" }));
isStructs = collectionType.getElementType().isStructType();
}
}
int numberOfChunks = (int) Math.ceil(selectResults.size() * 1.0 / MAXIMUM_CHUNK_SIZE);
if (logger.isTraceEnabled()) {
logger.trace("{}: Query results size: {}: Entries in chunk: {}: Number of chunks: {}", servConn.getName(), selectResults.size(), MAXIMUM_CHUNK_SIZE, numberOfChunks);
}
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incProcessQueryTime(start - oldStart);
if (sendResults) {
queryResponseMsg.setMessageType(MessageType.RESPONSE);
queryResponseMsg.setTransactionId(msg.getTransactionId());
queryResponseMsg.sendHeader();
}
if (sendResults && numberOfChunks == 0) {
// Send 1 empty chunk
if (logger.isTraceEnabled()) {
logger.trace("{}: Creating chunk: 0", servConn.getName());
}
writeQueryResponseChunk(new Object[0], collectionType, true, servConn);
if (logger.isDebugEnabled()) {
logger.debug("{}: Sent chunk (1 of 1) of query response for query {}", servConn.getName(), queryString);
}
} else {
// send it as a part of ObjectPartList
if (hasSerializedObjects) {
sendResultsAsObjectPartList(numberOfChunks, servConn, selectResults.asList(), isStructs, collectionType, queryString, cqQuery, sendCqResultsWithKey, sendResults);
} else {
sendResultsAsObjectArray(selectResults, numberOfChunks, servConn, isStructs, collectionType, queryString, cqQuery, sendCqResultsWithKey, sendResults);
}
}
if (cqQuery != null) {
// Set the CQ query result cache initialized flag.
cqQuery.setCqResultsCacheInitialized();
}
} else if (result instanceof Integer) {
if (sendResults) {
queryResponseMsg.setMessageType(MessageType.RESPONSE);
queryResponseMsg.setTransactionId(msg.getTransactionId());
queryResponseMsg.sendHeader();
writeQueryResponseChunk(result, null, true, servConn);
}
} else {
throw new QueryInvalidException(LocalizedStrings.BaseCommand_UNKNOWN_RESULT_TYPE_0.toLocalizedString(result.getClass()));
}
msg.clearParts();
} catch (QueryInvalidException e) {
// Handle this exception differently since it can contain
// non-serializable objects.
// java.io.NotSerializableException: antlr.CommonToken
// Log a warning to show stack trace and create a new
// QueryInvalidEsception on the original one's message (not cause).
logger.warn(LocalizedMessage.create(LocalizedStrings.BaseCommand_UNEXPECTED_QUERYINVALIDEXCEPTION_WHILE_PROCESSING_QUERY_0, queryString), e);
QueryInvalidException qie = new QueryInvalidException(LocalizedStrings.BaseCommand_0_QUERYSTRING_IS_1.toLocalizedString(new Object[] { e.getLocalizedMessage(), queryString }));
writeQueryResponseException(msg, qie, servConn);
return false;
} catch (DistributedSystemDisconnectedException se) {
if (msg != null && logger.isDebugEnabled()) {
logger.debug("{}: ignoring message of type {} from client {} because shutdown occurred during message processing.", servConn.getName(), MessageType.getString(msg.getMessageType()), servConn.getProxyID());
}
servConn.setFlagProcessMessagesAsFalse();
servConn.setClientDisconnectedException(se);
return false;
} catch (Exception e) {
// If an interrupted exception is thrown , rethrow it
checkForInterrupt(servConn, e);
// Otherwise, write a query response and continue
// Check if query got canceled from QueryMonitor.
DefaultQuery defaultQuery = (DefaultQuery) query;
if ((defaultQuery).isCanceled()) {
e = new QueryException(defaultQuery.getQueryCanceledException().getMessage(), e.getCause());
}
writeQueryResponseException(msg, e, servConn);
return false;
} finally {
// Since the query object is being shared in case of bind queries,
// resetting the flag may cause inconsistency.
// Also since this flag is only being set in code path executed by
// remote query execution, resetting it is not required.
// ((DefaultQuery)query).setRemoteQuery(false);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Sent query response for query {}", servConn.getName(), queryString);
}
stats.incWriteQueryResponseTime(DistributionStats.getStatTime() - start);
return true;
}
Aggregations