use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class Oplog method basicCreate.
/**
* A helper function which identifies whether to create the entry in the current oplog or to make
* the switch to the next oplog. This function enables us to reuse the byte buffer which got
* created for an oplog which no longer permits us to use itself
*
* @param entry DiskEntry object representing the current Entry
*/
private void basicCreate(DiskRegion dr, DiskEntry entry, ValueWrapper value, byte userBits, boolean async) throws IOException, InterruptedException {
DiskId id = entry.getDiskId();
boolean useNextOplog = false;
long startPosForSynchOp = -1;
if (DiskStoreImpl.KRF_DEBUG) {
// wait for cache close to create krf
System.out.println("basicCreate KRF_DEBUG");
Thread.sleep(1000);
}
synchronized (this.lock) {
// TODO soplog perf analysis shows this as a
// contention point
// synchronized (this.crf) {
initOpState(OPLOG_NEW_ENTRY_0ID, dr, entry, value, userBits, false);
// Check if the current data in ByteBuffer will cause a
// potential increase in the size greater than the max allowed
long temp = (getOpStateSize() + this.crf.currSize);
if (!this.wroteNewEntryBase) {
temp += OPLOG_NEW_ENTRY_BASE_REC_SIZE;
}
if (this != getOplogSet().getChild()) {
useNextOplog = true;
} else if (temp > getMaxCrfSize() && !isFirstRecord()) {
switchOpLog(dr, getOpStateSize(), entry);
useNextOplog = true;
} else {
if (this.lockedForKRFcreate) {
CacheClosedException cce = new CacheClosedException("The disk store is closed.");
dr.getCancelCriterion().checkCancelInProgress(cce);
throw cce;
}
this.firstRecord = false;
writeNewEntryBaseRecord(async);
// Now we can finally call newOplogEntryId.
// We need to make sure the create records
// are written in the same order as they are created.
// This allows us to not encode the oplogEntryId explicitly in the
// record
long createOplogEntryId = getOplogSet().newOplogEntryId();
id.setKeyId(createOplogEntryId);
// startPosForSynchOp = this.crf.currSize;
// Allow it to be added to the OpLOg so increase the
// size of currenstartPosForSynchOpt oplog
int dataLength = getOpStateSize();
// It is necessary that we set the
// Oplog ID here without releasing the lock on object as we are
// writing to the file after releasing the lock. This can cause
// a situation where the
// switching thread has added Oplog for compaction while the previous
// thread has still not started writing. Thus compactor can
// miss an entry as the oplog Id was not set till then.
// This is because a compactor thread will iterate over the entries &
// use only those which have OplogID equal to that of Oplog being
// compacted without taking any lock. A lock is taken only if the
// entry is a potential candidate.
// Further the compactor may delete the file as a compactor thread does
// not require to take any shared/exclusive lock at DiskStoreImpl
// or Oplog level.
// It is also assumed that compactor thread will take a lock on both
// entry as well as DiskID while compacting. In case of synch
// mode we can
// safely set OplogID without taking lock on DiskId. But
// for asynch mode
// we have to take additional precaution as the asynch
// writer of previous
// oplog can interfere with the current oplog.
id.setOplogId(getOplogId());
// do the io while holding lock so that switch can set doneAppending
// Write the data to the opLog for the synch mode
startPosForSynchOp = writeOpLogBytes(this.crf, async, true);
// if (this.crf.currSize != startPosForSynchOp) {
// assert false;
// }
this.crf.currSize = temp;
if (EntryBits.isNeedsValue(userBits)) {
id.setValueLength(value.getLength());
} else {
id.setValueLength(0);
}
id.setUserBits(userBits);
if (logger.isTraceEnabled()) {
logger.trace("Oplog::basicCreate:Release dByteBuffer with data for Disk ID = {}", id);
}
// As such for any put or get operation , a synch is taken
// on the Entry object in the DiskEntry's Helper functions.
// Compactor thread will also take a lock on entry object. Therefore
// we do not require a lock on DiskID, as concurrent access for
// value will not occur.
startPosForSynchOp += getOpStateValueOffset();
if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
VersionTag tag = null;
if (entry.getVersionStamp() != null) {
tag = entry.getVersionStamp().asVersionTag();
}
logger.trace(LogMarker.PERSIST_WRITES, "basicCreate: id=<{}> key=<{}> valueOffset={} userBits={} valueLen={} valueBytes={} drId={} versionTag={} oplog#{}", abs(id.getKeyId()), entry.getKey(), startPosForSynchOp, userBits, (value != null ? value.getLength() : 0), value.getBytesAsString(), dr.getId(), tag, getOplogId());
}
id.setOffsetInOplog(startPosForSynchOp);
addLive(dr, entry);
// Size of the current oplog being increased
// due to 'create' operation. Set the change in stats.
this.dirHolder.incrementTotalOplogSize(dataLength);
incTotalCount();
// Update the region version vector for the disk store.
// This needs to be done under lock so that we don't switch oplogs
// unit the version vector accurately represents what is in this oplog
RegionVersionVector rvv = dr.getRegionVersionVector();
if (rvv != null && entry.getVersionStamp() != null) {
rvv.recordVersion(entry.getVersionStamp().getMemberID(), entry.getVersionStamp().getRegionVersion());
}
EntryLogger.logPersistPut(dr.getName(), entry.getKey(), dr.getDiskStoreID());
}
clearOpState();
// }
}
if (useNextOplog) {
if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
CacheObserverHolder.getInstance().afterSwitchingOplog();
}
Assert.assertTrue(this != getOplogSet().getChild());
getOplogSet().getChild().basicCreate(dr, entry, value, userBits, async);
} else {
if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
CacheObserverHolder.getInstance().afterSettingOplogOffSet(startPosForSynchOp);
}
}
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class Oplog method basicSaveConflictVersionTag.
private void basicSaveConflictVersionTag(DiskRegionView dr, VersionTag tag, boolean async) throws IOException, InterruptedException {
boolean useNextOplog = false;
int adjustment = 0;
synchronized (this.lock) {
if (getOplogSet().getChild() != this) {
useNextOplog = true;
} else {
this.opState.initialize(OPLOG_CONFLICT_VERSION, dr.getId(), tag);
adjustment = getOpStateSize();
assert adjustment > 0;
long temp = (this.crf.currSize + adjustment);
if (temp > getMaxCrfSize() && !isFirstRecord()) {
switchOpLog(dr, adjustment, null);
// we can't reuse it since it contains variable length data
useNextOplog = true;
} else {
if (this.lockedForKRFcreate) {
CacheClosedException cce = new CacheClosedException("The disk store is closed.");
dr.getCancelCriterion().checkCancelInProgress(cce);
throw cce;
}
this.firstRecord = false;
writeOpLogBytes(this.crf, async, true);
this.crf.currSize = temp;
if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
logger.trace(LogMarker.PERSIST_WRITES, "basicSaveConflictVersionTag: drId={} versionStamp={} oplog#{}", dr.getId(), tag, getOplogId());
}
this.dirHolder.incrementTotalOplogSize(adjustment);
// Update the region version vector for the disk store.
// This needs to be done under lock so that we don't switch oplogs
// unit the version vector accurately represents what is in this oplog
RegionVersionVector rvv = dr.getRegionVersionVector();
if (rvv != null && dr.getFlags().contains(DiskRegionFlag.IS_WITH_VERSIONING)) {
rvv.recordVersion(tag.getMemberID(), tag.getRegionVersion());
}
}
clearOpState();
}
}
if (useNextOplog) {
if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
CacheObserverHolder.getInstance().afterSwitchingOplog();
}
Assert.assertTrue(getOplogSet().getChild() != this);
getOplogSet().getChild().basicSaveConflictVersionTag(dr, tag, async);
}
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class PRFunctionStreamingResultCollector method getResult.
@Override
public Object getResult(long timeout, TimeUnit unit) throws FunctionException, InterruptedException {
long timeoutInMillis = unit.toMillis(timeout);
if (this.resultCollected) {
throw new FunctionException("Result already collected");
}
this.resultCollected = true;
if (this.hasResult) {
try {
long timeBefore = System.currentTimeMillis();
if (!this.waitForCacheOrFunctionException(timeoutInMillis)) {
throw new FunctionException("All results not recieved in time provided.");
}
long timeAfter = System.currentTimeMillis();
timeoutInMillis = timeoutInMillis - (timeAfter - timeBefore);
if (timeoutInMillis < 0) {
timeoutInMillis = 0;
}
if (!this.execution.getFailedNodes().isEmpty() && !this.execution.isClientServerMode()) {
// end the rc and clear it
endResults();
clearResults();
this.execution = this.execution.setIsReExecute();
ResultCollector newRc = null;
if (execution.isFnSerializationReqd()) {
newRc = this.execution.execute(this.fn);
} else {
newRc = this.execution.execute(this.fn.getId());
}
return newRc.getResult(timeoutInMillis, unit);
}
if (!this.execution.getWaitOnExceptionFlag() && this.fites.size() > 0) {
throw new FunctionException(this.fites.get(0));
}
} catch (FunctionInvocationTargetException fite) {
if (!this.fn.isHA()) {
throw new FunctionException(fite);
} else if (execution.isClientServerMode()) {
clearResults();
FunctionInvocationTargetException fe = new InternalFunctionInvocationTargetException(fite.getMessage(), this.execution.getFailedNodes());
throw new FunctionException(fe);
} else {
clearResults();
this.execution = this.execution.setIsReExecute();
ResultCollector newRc = null;
if (execution.isFnSerializationReqd()) {
newRc = this.execution.execute(this.fn);
} else {
newRc = this.execution.execute(this.fn.getId());
}
return newRc.getResult(timeoutInMillis, unit);
}
} catch (BucketMovedException e) {
if (!this.fn.isHA()) {
// endResults();
FunctionInvocationTargetException fite = new FunctionInvocationTargetException(e.getMessage());
throw new FunctionException(fite);
} else if (execution.isClientServerMode()) {
// endResults();
clearResults();
FunctionInvocationTargetException fite = new FunctionInvocationTargetException(e.getMessage());
throw new FunctionException(fite);
} else {
// endResults();
clearResults();
this.execution = this.execution.setIsReExecute();
ResultCollector newRc = null;
if (execution.isFnSerializationReqd()) {
newRc = this.execution.execute(this.fn);
} else {
newRc = this.execution.execute(this.fn.getId());
}
return newRc.getResult(timeoutInMillis, unit);
}
} catch (CacheClosedException e) {
if (!this.fn.isHA()) {
// endResults();
FunctionInvocationTargetException fite = new FunctionInvocationTargetException(e.getMessage());
throw new FunctionException(fite);
} else if (execution.isClientServerMode()) {
// endResults();
clearResults();
FunctionInvocationTargetException fite = new InternalFunctionInvocationTargetException(e.getMessage(), this.execution.getFailedNodes());
throw new FunctionException(fite);
} else {
// endResults();
clearResults();
this.execution = this.execution.setIsReExecute();
ResultCollector newRc = null;
if (execution.isFnSerializationReqd()) {
newRc = this.execution.execute(this.fn);
} else {
newRc = this.execution.execute(this.fn.getId());
}
return newRc.getResult(timeoutInMillis, unit);
}
} catch (CacheException e) {
// endResults();
throw new FunctionException(e);
} catch (ForceReattemptException e) {
// the function.
if (!this.fn.isHA()) {
throw new FunctionException(e);
} else if (execution.isClientServerMode()) {
clearResults();
FunctionInvocationTargetException iFITE = new InternalFunctionInvocationTargetException(e.getMessage(), this.execution.getFailedNodes());
throw new FunctionException(iFITE);
} else {
clearResults();
this.execution = this.execution.setIsReExecute();
ResultCollector newRc = null;
if (execution.isFnSerializationReqd()) {
newRc = this.execution.execute(this.fn);
} else {
newRc = this.execution.execute(this.fn.getId());
}
return newRc.getResult();
}
}
}
// As we have already waited for timeout
return this.userRC.getResult(timeoutInMillis, unit);
// earlier we expect results to be ready
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class Connection method processNIOBuffer.
/**
* processes the current NIO buffer. If there are complete messages in the buffer, they are
* deserialized and passed to TCPConduit for further processing
*/
private void processNIOBuffer() throws ConnectionException, IOException {
if (nioInputBuffer != null) {
nioInputBuffer.flip();
}
boolean done = false;
while (!done && connected) {
this.owner.getConduit().getCancelCriterion().checkCancelInProgress(null);
// long startTime = DistributionStats.getStatTime();
int remaining = nioInputBuffer.remaining();
if (nioLengthSet || remaining >= MSG_HEADER_BYTES) {
if (!nioLengthSet) {
int headerStartPos = nioInputBuffer.position();
nioMessageLength = nioInputBuffer.getInt();
/* nioMessageVersion = */
calcHdrVersion(nioMessageLength);
nioMessageLength = calcMsgByteSize(nioMessageLength);
nioMessageType = nioInputBuffer.get();
nioMsgId = nioInputBuffer.getShort();
directAck = (nioMessageType & DIRECT_ACK_BIT) != 0;
if (directAck) {
// clear the ack bit
nioMessageType &= ~DIRECT_ACK_BIT;
}
// Following validation fixes bug 31145
if (!validMsgType(nioMessageType)) {
Integer nioMessageTypeInteger = Integer.valueOf(nioMessageType);
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_UNKNOWN_P2P_MESSAGE_TYPE_0, nioMessageTypeInteger));
this.readerShuttingDown = true;
requestClose(LocalizedStrings.Connection_UNKNOWN_P2P_MESSAGE_TYPE_0.toLocalizedString(nioMessageTypeInteger));
break;
}
nioLengthSet = true;
// keep the header "in" the buffer until we have read the entire msg.
// Trust me: this will reduce copying on large messages.
nioInputBuffer.position(headerStartPos);
}
if (remaining >= nioMessageLength + MSG_HEADER_BYTES) {
nioLengthSet = false;
nioInputBuffer.position(nioInputBuffer.position() + MSG_HEADER_BYTES);
// don't trust the message deserialization to leave the position in
// the correct spot. Some of the serialization uses buffered
// streams that can leave the position at the wrong spot
int startPos = nioInputBuffer.position();
int oldLimit = nioInputBuffer.limit();
nioInputBuffer.limit(startPos + nioMessageLength);
if (this.handshakeRead) {
if (nioMessageType == NORMAL_MSG_TYPE) {
this.owner.getConduit().stats.incMessagesBeingReceived(true, nioMessageLength);
ByteBufferInputStream bbis = remoteVersion == null ? new ByteBufferInputStream(nioInputBuffer) : new VersionedByteBufferInputStream(nioInputBuffer, remoteVersion);
DistributionMessage msg = null;
try {
ReplyProcessor21.initMessageRPId();
// add serialization stats
long startSer = this.owner.getConduit().stats.startMsgDeserialization();
msg = (DistributionMessage) InternalDataSerializer.readDSFID(bbis);
this.owner.getConduit().stats.endMsgDeserialization(startSer);
if (bbis.available() != 0) {
logger.warn(LocalizedMessage.create(LocalizedStrings.Connection_MESSAGE_DESERIALIZATION_OF_0_DID_NOT_READ_1_BYTES, new Object[] { msg, Integer.valueOf(bbis.available()) }));
}
try {
if (!dispatchMessage(msg, nioMessageLength, directAck)) {
directAck = false;
}
} catch (MemberShunnedException e) {
// don't respond (bug39117)
directAck = false;
} catch (Exception de) {
this.owner.getConduit().getCancelCriterion().checkCancelInProgress(de);
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_ERROR_DISPATCHING_MESSAGE), de);
} catch (ThreadDeath td) {
throw td;
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_THROWABLE_DISPATCHING_MESSAGE), t);
}
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
sendFailureReply(ReplyProcessor21.getMessageRPId(), LocalizedStrings.Connection_ERROR_DESERIALIZING_MESSAGE.toLocalizedString(), t, directAck);
if (t instanceof ThreadDeath) {
throw (ThreadDeath) t;
}
if (t instanceof CancelException) {
if (!(t instanceof CacheClosedException)) {
// CacheClosedException; see bug 43543
throw (CancelException) t;
}
}
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_ERROR_DESERIALIZING_MESSAGE), t);
} finally {
ReplyProcessor21.clearMessageRPId();
}
} else if (nioMessageType == CHUNKED_MSG_TYPE) {
MsgDestreamer md = obtainMsgDestreamer(nioMsgId, remoteVersion);
this.owner.getConduit().stats.incMessagesBeingReceived(md.size() == 0, nioMessageLength);
try {
md.addChunk(nioInputBuffer, nioMessageLength);
} catch (IOException ex) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_FAILED_HANDLING_CHUNK_MESSAGE), ex);
}
} else /* (nioMessageType == END_CHUNKED_MSG_TYPE) */
{
// logger.info("END_CHUNK msgId="+nioMsgId);
MsgDestreamer md = obtainMsgDestreamer(nioMsgId, remoteVersion);
this.owner.getConduit().stats.incMessagesBeingReceived(md.size() == 0, nioMessageLength);
try {
md.addChunk(nioInputBuffer, nioMessageLength);
} catch (IOException ex) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_FAILED_HANDLING_END_CHUNK_MESSAGE), ex);
}
DistributionMessage msg = null;
int msgLength = 0;
String failureMsg = null;
Throwable failureEx = null;
int rpId = 0;
boolean interrupted = false;
try {
msg = md.getMessage();
} catch (ClassNotFoundException ex) {
this.owner.getConduit().stats.decMessagesBeingReceived(md.size());
failureMsg = LocalizedStrings.Connection_CLASSNOTFOUND_DESERIALIZING_MESSAGE.toLocalizedString();
failureEx = ex;
rpId = md.getRPid();
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_CLASSNOTFOUND_DESERIALIZING_MESSAGE_0, ex));
} catch (IOException ex) {
this.owner.getConduit().stats.decMessagesBeingReceived(md.size());
failureMsg = LocalizedStrings.Connection_IOEXCEPTION_DESERIALIZING_MESSAGE.toLocalizedString();
failureEx = ex;
rpId = md.getRPid();
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_IOEXCEPTION_DESERIALIZING_MESSAGE), failureEx);
} catch (InterruptedException ex) {
interrupted = true;
this.owner.getConduit().getCancelCriterion().checkCancelInProgress(ex);
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable ex) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
this.owner.getConduit().getCancelCriterion().checkCancelInProgress(ex);
this.owner.getConduit().stats.decMessagesBeingReceived(md.size());
failureMsg = LocalizedStrings.Connection_UNEXPECTED_FAILURE_DESERIALIZING_MESSAGE.toLocalizedString();
failureEx = ex;
rpId = md.getRPid();
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_UNEXPECTED_FAILURE_DESERIALIZING_MESSAGE), failureEx);
} finally {
msgLength = md.size();
releaseMsgDestreamer(nioMsgId, md);
if (interrupted) {
Thread.currentThread().interrupt();
}
}
if (msg != null) {
try {
if (!dispatchMessage(msg, msgLength, directAck)) {
directAck = false;
}
} catch (MemberShunnedException e) {
// not a member anymore - don't reply
directAck = false;
} catch (Exception de) {
this.owner.getConduit().getCancelCriterion().checkCancelInProgress(de);
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_ERROR_DISPATCHING_MESSAGE), de);
} catch (ThreadDeath td) {
throw td;
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_THROWABLE_DISPATCHING_MESSAGE), t);
}
} else if (failureEx != null) {
sendFailureReply(rpId, failureMsg, failureEx, directAck);
}
}
} else {
// read HANDSHAKE
ByteBufferInputStream bbis = new ByteBufferInputStream(nioInputBuffer);
DataInputStream dis = new DataInputStream(bbis);
if (!this.isReceiver) {
try {
this.replyCode = dis.readUnsignedByte();
if (this.replyCode == REPLY_CODE_OK_WITH_ASYNC_INFO) {
this.asyncDistributionTimeout = dis.readInt();
this.asyncQueueTimeout = dis.readInt();
this.asyncMaxQueueSize = (long) dis.readInt() * (1024 * 1024);
if (this.asyncDistributionTimeout != 0) {
logger.info(LocalizedMessage.create(LocalizedStrings.Connection_0_ASYNC_CONFIGURATION_RECEIVED_1, new Object[] { p2pReaderName(), " asyncDistributionTimeout=" + this.asyncDistributionTimeout + " asyncQueueTimeout=" + this.asyncQueueTimeout + " asyncMaxQueueSize=" + (this.asyncMaxQueueSize / (1024 * 1024)) }));
}
// read the product version ordinal for on-the-fly serialization
// transformations (for rolling upgrades)
this.remoteVersion = Version.readVersion(dis, true);
}
} catch (Exception e) {
this.owner.getConduit().getCancelCriterion().checkCancelInProgress(e);
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_ERROR_DESERIALIZING_P2P_HANDSHAKE_REPLY), e);
this.readerShuttingDown = true;
requestClose(LocalizedStrings.Connection_ERROR_DESERIALIZING_P2P_HANDSHAKE_REPLY.toLocalizedString());
return;
} catch (ThreadDeath td) {
throw td;
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_THROWABLE_DESERIALIZING_P2P_HANDSHAKE_REPLY), t);
this.readerShuttingDown = true;
requestClose(LocalizedStrings.Connection_THROWABLE_DESERIALIZING_P2P_HANDSHAKE_REPLY.toLocalizedString());
return;
}
if (this.replyCode != REPLY_CODE_OK && this.replyCode != REPLY_CODE_OK_WITH_ASYNC_INFO) {
StringId err = LocalizedStrings.Connection_UNKNOWN_HANDSHAKE_REPLY_CODE_0_NIOMESSAGELENGTH_1_PROCESSORTYPE_2;
Object[] errArgs = new Object[] { Integer.valueOf(this.replyCode), Integer.valueOf(nioMessageLength) };
if (replyCode == 0 && logger.isDebugEnabled()) {
// bug 37113
logger.debug(err.toLocalizedString(errArgs) + " (peer probably departed ungracefully)");
} else {
logger.fatal(LocalizedMessage.create(err, errArgs));
}
this.readerShuttingDown = true;
requestClose(err.toLocalizedString(errArgs));
return;
}
notifyHandshakeWaiter(true);
} else {
try {
byte b = dis.readByte();
if (b != 0) {
throw new IllegalStateException(LocalizedStrings.Connection_DETECTED_OLD_VERSION_PRE_501_OF_GEMFIRE_OR_NONGEMFIRE_DURING_HANDSHAKE_DUE_TO_INITIAL_BYTE_BEING_0.toLocalizedString(new Byte(b)));
}
byte handShakeByte = dis.readByte();
if (handShakeByte != HANDSHAKE_VERSION) {
throw new IllegalStateException(LocalizedStrings.Connection_DETECTED_WRONG_VERSION_OF_GEMFIRE_PRODUCT_DURING_HANDSHAKE_EXPECTED_0_BUT_FOUND_1.toLocalizedString(new Object[] { new Byte(HANDSHAKE_VERSION), new Byte(handShakeByte) }));
}
InternalDistributedMember remote = DSFIDFactory.readInternalDistributedMember(dis);
setRemoteAddr(remote);
this.sharedResource = dis.readBoolean();
this.preserveOrder = dis.readBoolean();
this.uniqueId = dis.readLong();
// read the product version ordinal for on-the-fly serialization
// transformations (for rolling upgrades)
this.remoteVersion = Version.readVersion(dis, true);
int dominoNumber = 0;
if (this.remoteVersion == null || (this.remoteVersion.compareTo(Version.GFE_80) >= 0)) {
dominoNumber = dis.readInt();
if (this.sharedResource) {
dominoNumber = 0;
}
dominoCount.set(dominoNumber);
// this.senderName = dis.readUTF();
}
if (!this.sharedResource) {
if (tipDomino()) {
logger.info(LocalizedMessage.create(LocalizedStrings.Connection_THREAD_OWNED_RECEIVER_FORCING_ITSELF_TO_SEND_ON_THREAD_OWNED_SOCKETS));
// bug #49565 - if domino count is >= 2 use shared resources.
// Also see DistributedCacheOperation#supportsDirectAck
} else {
// if (dominoNumber < 2) {
ConnectionTable.threadWantsOwnResources();
if (logger.isDebugEnabled()) {
logger.debug("thread-owned receiver with domino count of {} will prefer sending on thread-owned sockets", dominoNumber);
}
// } else {
// ConnectionTable.threadWantsSharedResources();
}
this.conduit.stats.incThreadOwnedReceivers(1L, dominoNumber);
// Because this thread is not shared resource, it will be used for direct
// ack. Direct ack messages can be large. This call will resize the send
// buffer.
setSendBufferSize(this.socket);
}
// String name = owner.getDM().getConfig().getName();
// if (name == null) {
// name = "pid="+OSProcess.getId();
// }
setThreadName(dominoNumber);
} catch (Exception e) {
// bug 37101
this.owner.getConduit().getCancelCriterion().checkCancelInProgress(e);
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_ERROR_DESERIALIZING_P2P_HANDSHAKE_MESSAGE), e);
this.readerShuttingDown = true;
requestClose(LocalizedStrings.Connection_ERROR_DESERIALIZING_P2P_HANDSHAKE_MESSAGE.toLocalizedString());
return;
}
if (logger.isDebugEnabled()) {
logger.debug("P2P handshake remoteAddr is {}{}", this.remoteAddr, (this.remoteVersion != null ? " (" + this.remoteVersion + ')' : ""));
}
try {
String authInit = System.getProperty(DistributionConfigImpl.SECURITY_SYSTEM_PREFIX + SECURITY_PEER_AUTH_INIT);
boolean isSecure = authInit != null && authInit.length() != 0;
if (isSecure) {
if (owner.getConduit().waitForMembershipCheck(this.remoteAddr)) {
// fix for bug 33224
sendOKHandshakeReply();
notifyHandshakeWaiter(true);
} else {
// ARB: check if we need notifyHandshakeWaiter() call.
notifyHandshakeWaiter(false);
logger.warn(LocalizedMessage.create(LocalizedStrings.Connection_0_TIMED_OUT_DURING_A_MEMBERSHIP_CHECK, p2pReaderName()));
return;
}
} else {
// fix for bug 33224
sendOKHandshakeReply();
try {
notifyHandshakeWaiter(true);
} catch (Exception e) {
logger.fatal(LocalizedMessage.create(LocalizedStrings.Connection_UNCAUGHT_EXCEPTION_FROM_LISTENER), e);
}
}
} catch (IOException ex) {
final String err = LocalizedStrings.Connection_FAILED_SENDING_HANDSHAKE_REPLY.toLocalizedString();
if (logger.isDebugEnabled()) {
logger.debug(err, ex);
}
this.readerShuttingDown = true;
requestClose(err + ": " + ex);
return;
}
}
}
if (!connected) {
continue;
}
accessed();
nioInputBuffer.limit(oldLimit);
nioInputBuffer.position(startPos + nioMessageLength);
} else {
done = true;
compactOrResizeBuffer(nioMessageLength);
}
} else {
done = true;
if (nioInputBuffer.position() != 0) {
nioInputBuffer.compact();
} else {
nioInputBuffer.position(nioInputBuffer.limit());
nioInputBuffer.limit(nioInputBuffer.capacity());
}
}
}
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class CommandService method createLocalCommandService.
/* ************** Methods to be implemented by sub-classes END ************ */
/* **************************** factory methods *************************** */
/**
* Returns a newly created or existing instance of the
* <code>CommandService<code> associated with the
* specified <code>Cache</code>.
*
* @param cache Underlying <code>Cache</code> instance to be used to create a Command Service.
* @throws CommandServiceException If command service could not be initialized.
*/
public static CommandService createLocalCommandService(Cache cache) throws CommandServiceException {
if (cache == null || cache.isClosed()) {
throw new CacheClosedException("Can not create command service as cache doesn't exist or cache is closed.");
}
if (localCommandService == null || !localCommandService.isUsable()) {
String nonExistingDependency = CliUtil.cliDependenciesExist(false);
if (nonExistingDependency != null) {
throw new DependenciesNotFoundException(LocalizedStrings.CommandServiceManager_COULD_NOT_FIND__0__LIB_NEEDED_FOR_CLI_GFSH.toLocalizedString(new Object[] { nonExistingDependency }));
}
localCommandService = new MemberCommandService(cache);
}
return localCommandService;
}
Aggregations