use of org.apache.geode.cache.Operation in project geode by apache.
the class PartitionedRegion method postDestroyRegion.
/**
* This method is invoked from recursiveDestroyRegion method of LocalRegion. This method checks
* the region type and invokes the relevant method.
*
* @param destroyDiskRegion - true if the contents on disk should be destroyed
* @param event the RegionEvent
*/
@Override
protected void postDestroyRegion(boolean destroyDiskRegion, RegionEventImpl event) {
if (logger.isDebugEnabled()) {
logger.debug("PartitionedRegion#postDestroyRegion: {}", this);
}
Assert.assertTrue(this.isDestroyed || this.isClosed);
// Fixes 44551 - wait for persistent buckets to finish
// recovering before sending the destroy region message
// any GII or wait for persistent recovery will be aborted by the destroy
// flag being set to true, so this shouldn't take long.
this.redundancyProvider.waitForPersistentBucketRecovery();
// fix #39196 OOME caused by leak in GemFireCache.partitionedRegions
this.cache.removePartitionedRegion(this);
this.cache.getInternalResourceManager(false).removeResourceListener(this);
final Operation op = event.getOperation();
stopMissingColocatedRegionLogger();
if (op.isClose() || Operation.REGION_LOCAL_DESTROY.equals(op)) {
try {
if (Operation.CACHE_CLOSE.equals(op) || Operation.FORCED_DISCONNECT.equals(op)) {
int[] serials = getRegionAdvisor().getBucketSerials();
try {
getRegionAdvisor().closeBucketAdvisors();
// BUGFIX for bug#34672 by Tushar Apshankar. It would update the
// advisors on other nodes about cache closing of this PartitionedRegion
sendDestroyRegionMessage(event, serials);
// to log the fact that those buckets are destroyed here
if (RegionLogger.isEnabled()) {
PartitionedRegionDataStore store = getDataStore();
if (store != null) {
for (BucketRegion bucket : store.getAllLocalBucketRegions()) {
RegionLogger.logDestroy(bucket.getFullPath(), getMyId(), bucket.getPersistentID(), true);
}
}
}
} catch (CancelException ignore) {
// Don't throw this; we're just trying to remove the region.
if (logger.isDebugEnabled()) {
logger.debug("postDestroyRegion: failed sending DestroyRegionMessage due to cache closure");
}
} finally {
// Since we are not calling closePartitionedRegion
// we need to cleanup any diskStore we own here.
// Why don't we call closePartitionedRegion?
// Instead of closing it, we need to register it to be closed later
// Otherwise, when the cache close tries to close all of the bucket regions,
// they'll fail because their disk store is already closed.
DiskStoreImpl dsi = getDiskStore();
if (dsi != null && dsi.getOwnedByRegion()) {
cache.addDiskStore(dsi);
}
}
// Majority of cache close operations handled by
// afterRegionsClosedByCacheClose(GemFireCache
// cache) or GemFireCache.close()
} else {
if (logger.isDebugEnabled()) {
logger.debug("Making closePartitionedRegion call for {} with origin = {} op= {}", this, event.isOriginRemote(), op);
}
try {
closePartitionedRegion(event);
} finally {
if (Operation.REGION_LOCAL_DESTROY.equals(op)) {
DiskStoreImpl dsi = getDiskStore();
if (dsi != null && dsi.getOwnedByRegion()) {
dsi.destroy();
}
}
}
}
} finally {
// tell other members to recover redundancy for any buckets
this.getRegionAdvisor().close();
getPrStats().close();
}
} else if (Operation.REGION_DESTROY.equals(op) || Operation.REGION_EXPIRE_DESTROY.equals(op)) {
if (logger.isDebugEnabled()) {
logger.debug("PartitionedRegion#postDestroyRegion: Making destroyPartitionedRegion call for {} with originRemote = {}", this, event.isOriginRemote());
}
destroyPartitionedRegion(event);
} else {
Assert.assertTrue(false, "Unknown op" + op);
}
// DistributedCacheOperation.distribute().
if (!isUsedForMetaRegion() && !isUsedForPartitionedRegionAdmin() && !isUsedForPartitionedRegionBucket() && !isUsedForParallelGatewaySenderQueue()) {
FilterRoutingInfo localCqFrInfo = getFilterProfile().getFilterRoutingInfoPart1(event, FilterProfile.NO_PROFILES, Collections.emptySet());
FilterRoutingInfo localCqInterestFrInfo = getFilterProfile().getFilterRoutingInfoPart2(localCqFrInfo, event);
if (localCqInterestFrInfo != null) {
event.setLocalFilterInfo(localCqInterestFrInfo.getLocalFilterInfo());
}
}
if (destroyDiskRegion) {
DiskStoreImpl dsi = getDiskStore();
if (dsi != null && getDataPolicy().withPersistence()) {
dsi.removePersistentPR(getFullPath());
// config from the parent disk store, if we are removing the region.
if (colocatedWithRegion != null && colocatedWithRegion.getDiskStore() != null && colocatedWithRegion.getDiskStore() != dsi) {
colocatedWithRegion.getDiskStore().removePersistentPR(getFullPath());
}
}
}
if (colocatedWithRegion != null) {
colocatedWithRegion.getColocatedByList().remove(this);
}
RegionLogger.logDestroy(getName(), this.cache.getInternalDistributedSystem().getDistributedMember(), null, op.isClose());
}
use of org.apache.geode.cache.Operation in project geode by apache.
the class Destroy65 method cmdExecute.
@Override
public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException, InterruptedException {
Part regionNamePart;
Part keyPart;
Part callbackArgPart;
Part eventPart;
Part expectedOldValuePart;
Object operation = null;
Object expectedOldValue = null;
String regionName = null;
Object callbackArg = null, key = null;
StringBuffer errMessage = new StringBuffer();
CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
CacheServerStats stats = serverConnection.getCacheServerStats();
serverConnection.setAsTrue(REQUIRES_RESPONSE);
long now = DistributionStats.getStatTime();
stats.incReadDestroyRequestTime(now - start);
// Retrieve the data from the message parts
regionNamePart = clientMessage.getPart(0);
keyPart = clientMessage.getPart(1);
expectedOldValuePart = clientMessage.getPart(2);
try {
operation = clientMessage.getPart(3).getObject();
if (((operation instanceof Operation) && ((Operation) operation == Operation.REMOVE)) || ((operation instanceof Byte) && (Byte) operation == OpType.DESTROY)) {
expectedOldValue = expectedOldValuePart.getObject();
}
} catch (Exception e) {
writeException(clientMessage, e, false, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
eventPart = clientMessage.getPart(4);
if (clientMessage.getNumberOfParts() > 5) {
callbackArgPart = clientMessage.getPart(5);
try {
callbackArg = callbackArgPart.getObject();
} catch (Exception e) {
writeException(clientMessage, e, false, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
}
regionName = regionNamePart.getString();
try {
key = keyPart.getStringOrObject();
} catch (Exception e) {
writeException(clientMessage, e, false, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Received destroy65 request ({} bytes; op={}) from {} for region {} key {}{} txId {}", serverConnection.getName(), clientMessage.getPayloadLength(), operation, serverConnection.getSocketString(), regionName, key, (operation == Operation.REMOVE ? " value=" + expectedOldValue : ""), clientMessage.getTransactionId());
}
boolean entryNotFoundForRemove = false;
// Process the destroy request
if (key == null || regionName == null) {
if (key == null) {
logger.warn(LocalizedMessage.create(LocalizedStrings.Destroy_0_THE_INPUT_KEY_FOR_THE_DESTROY_REQUEST_IS_NULL, serverConnection.getName()));
errMessage.append(LocalizedStrings.Destroy__THE_INPUT_KEY_FOR_THE_DESTROY_REQUEST_IS_NULL.toLocalizedString());
}
if (regionName == null) {
logger.warn(LocalizedMessage.create(LocalizedStrings.Destroy_0_THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REQUEST_IS_NULL, serverConnection.getName()));
errMessage.append(LocalizedStrings.Destroy__THE_INPUT_REGION_NAME_FOR_THE_DESTROY_REQUEST_IS_NULL.toLocalizedString());
}
writeErrorResponse(clientMessage, MessageType.DESTROY_DATA_ERROR, errMessage.toString(), serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
LocalRegion region = (LocalRegion) serverConnection.getCache().getRegion(regionName);
if (region == null) {
String reason = LocalizedStrings.Destroy__0_WAS_NOT_FOUND_DURING_DESTROY_REQUEST.toLocalizedString(regionName);
writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
// Destroy the entry
ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
EventIDHolder clientEvent = new EventIDHolder(eventId);
Breadcrumbs.setEventId(eventId);
// msg.isRetry might be set by v7.0 and later clients
if (clientMessage.isRetry()) {
// if (logger.isDebugEnabled()) {
// logger.debug("DEBUG: encountered isRetry in Destroy65");
// }
clientEvent.setPossibleDuplicate(true);
if (region.getAttributes().getConcurrencyChecksEnabled()) {
// recover the version tag from other servers
clientEvent.setRegion(region);
if (!recoverVersionTagForRetriedOperation(clientEvent)) {
// no-one has seen this event
clientEvent.setPossibleDuplicate(false);
}
}
}
try {
// for integrated security
this.securityService.authorizeRegionWrite(regionName, key.toString());
AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
if (authzRequest != null) {
if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
RegionDestroyOperationContext destroyContext = authzRequest.destroyRegionAuthorize((String) key, callbackArg);
callbackArg = destroyContext.getCallbackArg();
} else {
DestroyOperationContext destroyContext = authzRequest.destroyAuthorize(regionName, key, callbackArg);
callbackArg = destroyContext.getCallbackArg();
}
}
if (operation == null || operation == Operation.DESTROY) {
region.basicBridgeDestroy(key, callbackArg, serverConnection.getProxyID(), true, clientEvent);
} else {
// this throws exceptions if expectedOldValue checks fail
try {
if (expectedOldValue == null) {
expectedOldValue = Token.INVALID;
}
if (operation == Operation.REMOVE && clientMessage.isRetry() && clientEvent.getVersionTag() != null) {
// success status
if (logger.isDebugEnabled()) {
logger.debug("remove(k,v) operation was successful last time with version {}", clientEvent.getVersionTag());
}
// try the operation anyway to ensure that it's been distributed to all servers
try {
region.basicBridgeRemove(key, expectedOldValue, callbackArg, serverConnection.getProxyID(), true, clientEvent);
} catch (EntryNotFoundException e) {
// ignore, and don't set entryNotFoundForRemove because this was a successful
// operation - bug #51664
}
} else {
region.basicBridgeRemove(key, expectedOldValue, callbackArg, serverConnection.getProxyID(), true, clientEvent);
if (logger.isDebugEnabled()) {
logger.debug("region.remove succeeded");
}
}
} catch (EntryNotFoundException e) {
serverConnection.setModificationInfo(true, regionName, key);
if (logger.isDebugEnabled()) {
logger.debug("writing entryNotFound response");
}
entryNotFoundForRemove = true;
}
}
serverConnection.setModificationInfo(true, regionName, key);
} catch (EntryNotFoundException e) {
// Don't send an exception back to the client if this
// exception happens. Just log it and continue.
logger.info(LocalizedMessage.create(LocalizedStrings.Destroy_0_DURING_ENTRY_DESTROY_NO_ENTRY_WAS_FOUND_FOR_KEY_1, new Object[] { serverConnection.getName(), key }));
entryNotFoundForRemove = true;
} catch (RegionDestroyedException rde) {
writeException(clientMessage, rde, false, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
} catch (Exception e) {
// If an interrupted exception is thrown , rethrow it
checkForInterrupt(serverConnection, e);
// If an exception occurs during the destroy, preserve the connection
writeException(clientMessage, e, false, serverConnection);
serverConnection.setAsTrue(RESPONDED);
if (e instanceof GemFireSecurityException) {
// logged by the security logger
if (logger.isDebugEnabled()) {
logger.debug("{}: Unexpected Security exception", serverConnection.getName(), e);
}
} else {
logger.warn(LocalizedMessage.create(LocalizedStrings.Destroy_0_UNEXPECTED_EXCEPTION, serverConnection.getName()), e);
}
return;
}
// Update the statistics and write the reply
now = DistributionStats.getStatTime();
stats.incProcessDestroyTime(now - start);
if (region instanceof PartitionedRegion) {
PartitionedRegion pr = (PartitionedRegion) region;
if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
writeReplyWithRefreshMetadata(clientMessage, serverConnection, pr, entryNotFoundForRemove, pr.getNetworkHopType(), clientEvent.getVersionTag());
pr.clearNetworkHopData();
} else {
writeReply(clientMessage, serverConnection, entryNotFoundForRemove | clientEvent.getIsRedestroyedEntry(), clientEvent.getVersionTag());
}
} else {
writeReply(clientMessage, serverConnection, entryNotFoundForRemove | clientEvent.getIsRedestroyedEntry(), clientEvent.getVersionTag());
}
serverConnection.setAsTrue(RESPONDED);
if (logger.isDebugEnabled()) {
logger.debug("{}: Sent destroy response for region {} key {}", serverConnection.getName(), regionName, key);
}
stats.incWriteDestroyResponseTime(DistributionStats.getStatTime() - start);
}
use of org.apache.geode.cache.Operation in project geode by apache.
the class AbstractRegionMap method txApplyPut.
public void txApplyPut(Operation p_putOp, Object key, Object nv, boolean didDestroy, TransactionId txId, TXRmtEvent txEvent, EventID eventId, Object aCallbackArgument, List<EntryEventImpl> pendingCallbacks, FilterRoutingInfo filterRoutingInfo, ClientProxyMembershipID bridgeContext, TXEntryState txEntryState, VersionTag versionTag, long tailKey) {
final LocalRegion owner = _getOwner();
if (owner == null) {
// "fix" for bug 32440
Assert.assertTrue(false, "The owner for RegionMap " + this + " is null");
}
Operation putOp = p_putOp;
Object newValue = nv;
final boolean hasRemoteOrigin = !((TXId) txId).getMemberId().equals(owner.getMyId());
final boolean isTXHost = txEntryState != null;
final boolean isClientTXOriginator = owner.cache.isClient() && !hasRemoteOrigin;
final boolean isRegionReady = owner.isInitialized();
@Released EntryEventImpl cbEvent = null;
boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady);
boolean cbEventInPending = false;
cbEvent = createCBEvent(owner, putOp, key, newValue, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
if (logger.isDebugEnabled()) {
logger.debug("txApplyPut cbEvent={}", cbEvent);
}
if (owner.isUsedForPartitionedRegionBucket()) {
newValue = EntryEventImpl.getCachedDeserializable(nv, cbEvent);
txHandleWANEvent(owner, cbEvent, txEntryState);
}
boolean opCompleted = false;
// Fix for Bug #44431. We do NOT want to update the region and wait
// later for index INIT as region.clear() can cause inconsistency if
// happened in parallel as it also does index INIT.
IndexManager oqlIndexManager = owner.getIndexManager();
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
try {
if (hasRemoteOrigin && !isTXHost && !isClientTXOriginator) {
// Otherwise use the standard create/update logic
if (!owner.isAllEvents() || (!putOp.isCreate() && isRegionReady)) {
// At this point we should only apply the update if the entry exists
// Fix for bug 32347.
RegionEntry re = getEntry(key);
if (re != null) {
synchronized (re) {
if (!re.isRemoved()) {
opCompleted = true;
putOp = putOp.getCorrespondingUpdateOp();
// Net writers are not called for received transaction data
final int oldSize = owner.calculateRegionEntryValueSize(re);
if (cbEvent != null) {
cbEvent.setRegionEntry(re);
// OFFHEAP eei
cbEvent.setOldValue(re.getValueInVM(owner));
}
boolean clearOccured = false;
// Set RegionEntry updateInProgress
if (owner.indexMaintenanceSynchronous) {
re.setUpdateInProgress(true);
}
try {
txRemoveOldIndexEntry(putOp, re);
if (didDestroy) {
re.txDidDestroy(owner.cacheTimeMillis());
}
if (txEvent != null) {
txEvent.addPut(putOp, owner, re, re.getKey(), newValue, aCallbackArgument);
}
re.setValueResultOfSearch(putOp.isNetSearch());
try {
processAndGenerateTXVersionTag(owner, cbEvent, re, txEntryState);
{
re.setValue(owner, re.prepareValueForCache(owner, newValue, cbEvent, !putOp.isCreate()));
}
if (putOp.isCreate()) {
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(re));
} else if (putOp.isUpdate()) {
// Rahul : fix for 41694. Negative bucket size can also be
// an issue with normal GFE Delta and will have to be fixed
// in a similar manner and may be this fix the the one for
// other delta can be combined.
{
owner.updateSizeOnPut(key, oldSize, owner.calculateRegionEntryValueSize(re));
}
}
} catch (RegionClearedException rce) {
clearOccured = true;
}
{
long lastMod = owner.cacheTimeMillis();
EntryLogger.logTXPut(_getOwnerObject(), key, nv);
re.updateStatsForPut(lastMod, lastMod);
owner.txApplyPutPart2(re, re.getKey(), lastMod, false, didDestroy, clearOccured);
}
} finally {
if (re != null && owner.indexMaintenanceSynchronous) {
re.setUpdateInProgress(false);
}
}
if (invokeCallbacks) {
cbEvent.makeUpdate();
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_UPDATE, cbEvent, hasRemoteOrigin);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
if (!clearOccured) {
lruEntryUpdate(re);
}
}
}
if (didDestroy && !opCompleted) {
owner.txApplyInvalidatePart2(re, re.getKey(), true, false);
}
}
if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
txEntryState.setVersionTag(cbEvent.getVersionTag());
}
return;
}
}
RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.REMOVED_PHASE1);
synchronized (newRe) {
try {
RegionEntry oldRe = putEntryIfAbsent(key, newRe);
while (!opCompleted && oldRe != null) {
synchronized (oldRe) {
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(key, oldRe);
oldRe = putEntryIfAbsent(key, newRe);
} else {
opCompleted = true;
if (!oldRe.isRemoved()) {
putOp = putOp.getCorrespondingUpdateOp();
}
// Net writers are not called for received transaction data
final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
final boolean oldIsRemoved = oldRe.isDestroyedOrRemoved();
if (cbEvent != null) {
cbEvent.setRegionEntry(oldRe);
// OFFHEAP eei
cbEvent.setOldValue(oldRe.getValueInVM(owner));
}
boolean clearOccured = false;
// Set RegionEntry updateInProgress
if (owner.indexMaintenanceSynchronous) {
oldRe.setUpdateInProgress(true);
}
try {
txRemoveOldIndexEntry(putOp, oldRe);
if (didDestroy) {
oldRe.txDidDestroy(owner.cacheTimeMillis());
}
if (txEvent != null) {
txEvent.addPut(putOp, owner, oldRe, oldRe.getKey(), newValue, aCallbackArgument);
}
oldRe.setValueResultOfSearch(putOp.isNetSearch());
try {
processAndGenerateTXVersionTag(owner, cbEvent, oldRe, txEntryState);
boolean wasTombstone = oldRe.isTombstone();
{
oldRe.setValue(owner, oldRe.prepareValueForCache(owner, newValue, cbEvent, !putOp.isCreate()));
if (wasTombstone) {
owner.unscheduleTombstone(oldRe);
}
}
if (putOp.isCreate()) {
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(oldRe));
} else if (putOp.isUpdate()) {
// Rahul : fix for 41694. Negative bucket size can also be
// an issue with normal GFE Delta and will have to be fixed
// in a similar manner and may be this fix the the one for
// other delta can be combined.
{
owner.updateSizeOnPut(key, oldSize, owner.calculateRegionEntryValueSize(oldRe));
}
}
} catch (RegionClearedException rce) {
clearOccured = true;
}
{
long lastMod = owner.cacheTimeMillis();
EntryLogger.logTXPut(_getOwnerObject(), key, nv);
oldRe.updateStatsForPut(lastMod, lastMod);
owner.txApplyPutPart2(oldRe, oldRe.getKey(), lastMod, false, didDestroy, clearOccured);
}
} finally {
if (oldRe != null && owner.indexMaintenanceSynchronous) {
oldRe.setUpdateInProgress(false);
}
}
if (invokeCallbacks) {
if (!oldIsRemoved) {
cbEvent.makeUpdate();
}
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(cbEvent.op.isCreate() ? EnumListenerEvent.AFTER_CREATE : EnumListenerEvent.AFTER_UPDATE, cbEvent, true);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
if (!clearOccured) {
lruEntryUpdate(oldRe);
}
}
}
}
if (!opCompleted) {
putOp = putOp.getCorrespondingCreateOp();
if (cbEvent != null) {
cbEvent.setRegionEntry(newRe);
cbEvent.setOldValue(null);
}
boolean clearOccured = false;
// Set RegionEntry updateInProgress
if (owner.indexMaintenanceSynchronous) {
newRe.setUpdateInProgress(true);
}
try {
txRemoveOldIndexEntry(putOp, newRe);
// creating a new entry
if (didDestroy) {
newRe.txDidDestroy(owner.cacheTimeMillis());
}
if (txEvent != null) {
txEvent.addPut(putOp, owner, newRe, newRe.getKey(), newValue, aCallbackArgument);
}
newRe.setValueResultOfSearch(putOp.isNetSearch());
try {
processAndGenerateTXVersionTag(owner, cbEvent, newRe, txEntryState);
{
newRe.setValue(owner, newRe.prepareValueForCache(owner, newValue, cbEvent, !putOp.isCreate()));
}
owner.updateSizeOnCreate(newRe.getKey(), owner.calculateRegionEntryValueSize(newRe));
} catch (RegionClearedException rce) {
clearOccured = true;
}
{
long lastMod = owner.cacheTimeMillis();
EntryLogger.logTXPut(_getOwnerObject(), key, nv);
newRe.updateStatsForPut(lastMod, lastMod);
owner.txApplyPutPart2(newRe, newRe.getKey(), lastMod, true, didDestroy, clearOccured);
}
} finally {
if (newRe != null && owner.indexMaintenanceSynchronous) {
newRe.setUpdateInProgress(false);
}
}
opCompleted = true;
if (invokeCallbacks) {
cbEvent.makeCreate();
cbEvent.setOldValue(null);
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_CREATE, cbEvent, true);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
if (!clearOccured) {
lruEntryCreate(newRe);
incEntryCount(1);
}
}
} finally {
if (!opCompleted) {
removeEntry(key, newRe, false);
}
}
}
if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
txEntryState.setVersionTag(cbEvent.getVersionTag());
}
} catch (DiskAccessException dae) {
owner.handleDiskAccessException(dae);
throw dae;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
}
} finally {
if (!cbEventInPending)
cbEvent.release();
}
}
use of org.apache.geode.cache.Operation in project geode by apache.
the class LocalRegion method expireRegion.
/**
* Return true if the region expiry task should be rescheduled
*/
boolean expireRegion(RegionExpiryTask regionExpiryTask, boolean distributed, boolean destroy) {
synchronized (this.regionExpiryLock) {
if (regionExpiryTask instanceof RegionTTLExpiryTask) {
if (regionExpiryTask != this.regionTTLExpiryTask) {
// We must be an old task so defer to the currently scheduled one
return false;
} else {
this.regionTTLExpiryTask = null;
}
} else {
if (regionExpiryTask != this.regionIdleExpiryTask) {
// We must be an old task so defer to the currently scheduled one
return false;
} else {
this.regionIdleExpiryTask = null;
}
}
if (this.txRefCount > 0) {
return false;
}
}
// release the sync before doing the operation to prevent deadlock caused by r48875
Operation op = destroy ? distributed ? Operation.REGION_EXPIRE_DESTROY : Operation.REGION_EXPIRE_LOCAL_DESTROY : distributed ? Operation.REGION_EXPIRE_INVALIDATE : Operation.REGION_EXPIRE_LOCAL_INVALIDATE;
RegionEventImpl event = new RegionEventImpl(this, op, null, false, getMyId(), generateEventID());
if (destroy) {
basicDestroyRegion(event, distributed);
} else {
basicInvalidateRegion(event);
}
return true;
}
use of org.apache.geode.cache.Operation in project geode by apache.
the class LocalRegion method txApplyPutPart2.
void txApplyPutPart2(RegionEntry regionEntry, Object key, long lastModified, boolean isCreate, boolean didDestroy, boolean clearConflict) {
if (this.testCallable != null) {
Operation op = isCreate ? Operation.CREATE : Operation.UPDATE;
this.testCallable.call(this, op, regionEntry);
}
if (isCreate) {
updateStatsForCreate();
}
if (!isProxy() && !clearConflict) {
if (this.indexManager != null) {
try {
this.indexManager.updateIndexes(regionEntry, isCreate ? IndexManager.ADD_ENTRY : IndexManager.UPDATE_ENTRY, isCreate ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
} catch (QueryException e) {
throw new IndexMaintenanceException(e);
}
}
}
if (didDestroy) {
if (this.entryUserAttributes != null) {
this.entryUserAttributes.remove(key);
}
}
if (this.statisticsEnabled && !clearConflict) {
addExpiryTaskIfAbsent(regionEntry);
}
setLastModifiedTime(lastModified);
}
Aggregations