use of org.apache.geode.cache.query.internal.index.IndexManager in project geode by apache.
the class ExecutionContext method addToIndependentRuntimeItrMap.
/**
* This function populates the Map itrDefToIndpndtRuntimeItrMap. It creates a Set of
* RuntimeIterators to which the current CompilediteratorDef is dependent upon. Also it sets the
* index_internal_id for the RuntimeIterator, which is used for calculating the canonicalized
* iterator definitions for identifying the available index.
*
* @param itrDef CompiledIteratorDef object representing iterator in the query from clause
*/
public void addToIndependentRuntimeItrMap(CompiledIteratorDef itrDef) throws AmbiguousNameException, TypeMismatchException, NameResolutionException {
Set set = new HashSet();
this.computeUltimateDependencies(itrDef, set);
RuntimeIterator itr = null;
String rgnPath = null;
// If the set is empty then add the self RuntimeIterator to the Map.
if (set.isEmpty()) {
itr = itrDef.getRuntimeIterator(this);
set.add(itr);
// Since it is a an independent RuntimeIterator , check if its Collection Expr boils down to a
// Region. If it is , we need to store the QRegion in the Map
CompiledValue startVal = QueryUtils.obtainTheBottomMostCompiledValue(itrDef.getCollectionExpr());
if (startVal.getType() == OQLLexerTokenTypes.RegionPath) {
rgnPath = ((QRegion) ((CompiledRegion) startVal).evaluate(this)).getFullPath();
this.indpndtItrToRgnMap.put(itr, rgnPath);
} else if (startVal.getType() == OQLLexerTokenTypes.QUERY_PARAM) {
Object rgn;
CompiledBindArgument cba = (CompiledBindArgument) startVal;
if ((rgn = cba.evaluate(this)) instanceof Region) {
this.indpndtItrToRgnMap.put(itr, rgnPath = ((Region) rgn).getFullPath());
}
}
}
this.itrDefToIndpndtRuntimeItrMap.put(itrDef, set);
IndexManager mgr = null;
// Set the canonicalized index_internal_id if the condition is satisfied
if (set.size() == 1) {
if (itr == null) {
itr = (RuntimeIterator) set.iterator().next();
if (itr.getScopeID() == this.currentScope().getScopeID()) {
rgnPath = (String) this.indpndtItrToRgnMap.get(itr);
}
}
if (rgnPath != null) {
mgr = IndexUtils.getIndexManager(this.cache.getRegion(rgnPath), false);
// put a check for null and see if we will be executing on a bucket region.
if ((null == mgr) && (null != this.bukRgn)) {
// for bucket region index use
mgr = IndexUtils.getIndexManager(this.cache.getRegion(this.bukRgn.getFullPath()), false);
}
}
}
String tempIndexID = null;
RuntimeIterator currItr = itrDef.getRuntimeIterator(this);
currItr.setIndexInternalID((mgr == null || (tempIndexID = mgr.getCanonicalizedIteratorName(itrDef.genFromClause(this))) == null) ? currItr.getInternalId() : tempIndexID);
}
use of org.apache.geode.cache.query.internal.index.IndexManager in project geode by apache.
the class AbstractRegionMap method txApplyInvalidate.
public void txApplyInvalidate(Object key, Object newValue, boolean didDestroy, TransactionId txId, TXRmtEvent txEvent, boolean localOp, EventID eventId, Object aCallbackArgument, List<EntryEventImpl> pendingCallbacks, FilterRoutingInfo filterRoutingInfo, ClientProxyMembershipID bridgeContext, TXEntryState txEntryState, VersionTag versionTag, long tailKey) {
// boolean didInvalidate = false;
final LocalRegion owner = _getOwner();
@Released EntryEventImpl cbEvent = null;
boolean forceNewEntry = !owner.isInitialized() && owner.isAllEvents();
final boolean hasRemoteOrigin = !((TXId) txId).getMemberId().equals(owner.getMyId());
DiskRegion dr = owner.getDiskRegion();
// Fix for Bug #44431. We do NOT want to update the region and wait
// later for index INIT as region.clear() can cause inconsistency if
// happened in parallel as it also does index INIT.
IndexManager oqlIndexManager = owner.getIndexManager();
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
try {
if (forceNewEntry) {
boolean opCompleted = false;
RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.REMOVED_PHASE1);
synchronized (newRe) {
try {
RegionEntry oldRe = putEntryIfAbsent(key, newRe);
while (!opCompleted && oldRe != null) {
synchronized (oldRe) {
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(key, oldRe);
oldRe = putEntryIfAbsent(key, newRe);
} else {
opCompleted = true;
final boolean oldWasTombstone = oldRe.isTombstone();
final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
// OFFHEAP eei
Object oldValue = oldRe.getValueInVM(owner);
// Create an entry event only if the calling context is
// a receipt of a TXCommitMessage AND there are callbacks
// installed
// for this region
boolean invokeCallbacks = shouldCreateCBEvent(owner, owner.isInitialized());
boolean cbEventInPending = false;
cbEvent = createCBEvent(owner, localOp ? Operation.LOCAL_INVALIDATE : Operation.INVALIDATE, key, newValue, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
cbEvent.setRegionEntry(oldRe);
cbEvent.setOldValue(oldValue);
if (logger.isDebugEnabled()) {
logger.debug("txApplyInvalidate cbEvent={}", cbEvent);
}
txRemoveOldIndexEntry(Operation.INVALIDATE, oldRe);
if (didDestroy) {
oldRe.txDidDestroy(owner.cacheTimeMillis());
}
if (txEvent != null) {
txEvent.addInvalidate(owner, oldRe, oldRe.getKey(), newValue, aCallbackArgument);
}
oldRe.setValueResultOfSearch(false);
processAndGenerateTXVersionTag(owner, cbEvent, oldRe, txEntryState);
boolean clearOccured = false;
try {
oldRe.setValue(owner, oldRe.prepareValueForCache(owner, newValue, true));
EntryLogger.logTXInvalidate(_getOwnerObject(), key);
owner.updateSizeOnPut(key, oldSize, 0);
if (oldWasTombstone) {
owner.unscheduleTombstone(oldRe);
}
} catch (RegionClearedException rce) {
clearOccured = true;
}
owner.txApplyInvalidatePart2(oldRe, oldRe.getKey(), didDestroy, true);
// didInvalidate = true;
if (invokeCallbacks) {
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_INVALIDATE, cbEvent, true);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
if (!clearOccured) {
lruEntryUpdate(oldRe);
}
if (shouldPerformConcurrencyChecks(owner, cbEvent) && txEntryState != null) {
txEntryState.setVersionTag(cbEvent.getVersionTag());
}
} finally {
if (!cbEventInPending)
cbEvent.release();
}
}
}
}
if (!opCompleted) {
boolean invokeCallbacks = shouldCreateCBEvent(owner, owner.isInitialized());
boolean cbEventInPending = false;
cbEvent = createCBEvent(owner, localOp ? Operation.LOCAL_INVALIDATE : Operation.INVALIDATE, key, newValue, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
cbEvent.setRegionEntry(newRe);
txRemoveOldIndexEntry(Operation.INVALIDATE, newRe);
newRe.setValueResultOfSearch(false);
boolean clearOccured = false;
try {
processAndGenerateTXVersionTag(owner, cbEvent, newRe, txEntryState);
newRe.setValue(owner, newRe.prepareValueForCache(owner, newValue, true));
EntryLogger.logTXInvalidate(_getOwnerObject(), key);
// we are putting in a new invalidated
owner.updateSizeOnCreate(newRe.getKey(), 0);
// entry
} catch (RegionClearedException rce) {
clearOccured = true;
}
owner.txApplyInvalidatePart2(newRe, newRe.getKey(), didDestroy, true);
if (invokeCallbacks) {
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_INVALIDATE, cbEvent, true);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
opCompleted = true;
if (!clearOccured) {
lruEntryCreate(newRe);
incEntryCount(1);
}
if (shouldPerformConcurrencyChecks(owner, cbEvent) && txEntryState != null) {
txEntryState.setVersionTag(cbEvent.getVersionTag());
}
} finally {
if (!cbEventInPending)
cbEvent.release();
}
}
} finally {
if (!opCompleted) {
removeEntry(key, newRe, false);
}
}
}
} else {
/* !forceNewEntry */
RegionEntry re = getEntry(key);
if (re != null) {
synchronized (re) {
{
final int oldSize = owner.calculateRegionEntryValueSize(re);
boolean wasTombstone = re.isTombstone();
// OFFHEAP eei
Object oldValue = re.getValueInVM(owner);
// Create an entry event only if the calling context is
// a receipt of a TXCommitMessage AND there are callbacks
// installed
// for this region
boolean invokeCallbacks = shouldCreateCBEvent(owner, owner.isInitialized());
boolean cbEventInPending = false;
cbEvent = createCBEvent(owner, localOp ? Operation.LOCAL_INVALIDATE : Operation.INVALIDATE, key, newValue, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
cbEvent.setRegionEntry(re);
cbEvent.setOldValue(oldValue);
txRemoveOldIndexEntry(Operation.INVALIDATE, re);
if (didDestroy) {
re.txDidDestroy(owner.cacheTimeMillis());
}
if (txEvent != null) {
txEvent.addInvalidate(owner, re, re.getKey(), newValue, aCallbackArgument);
}
re.setValueResultOfSearch(false);
processAndGenerateTXVersionTag(owner, cbEvent, re, txEntryState);
boolean clearOccured = false;
try {
re.setValue(owner, re.prepareValueForCache(owner, newValue, true));
EntryLogger.logTXInvalidate(_getOwnerObject(), key);
if (wasTombstone) {
owner.unscheduleTombstone(re);
}
owner.updateSizeOnPut(key, oldSize, 0);
} catch (RegionClearedException rce) {
clearOccured = true;
}
owner.txApplyInvalidatePart2(re, re.getKey(), didDestroy, true);
// didInvalidate = true;
if (invokeCallbacks) {
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_INVALIDATE, cbEvent, true);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
if (!clearOccured) {
lruEntryUpdate(re);
}
if (shouldPerformConcurrencyChecks(owner, cbEvent) && txEntryState != null) {
txEntryState.setVersionTag(cbEvent.getVersionTag());
}
} finally {
if (!cbEventInPending)
cbEvent.release();
}
}
}
} else {
// re == null
// Fix bug#43594
// In cases where bucket region is re-created, it may so happen
// that the invalidate is already applied on the Initial image
// provider, thus causing region entry to be absent.
// Notify clients with client events.
boolean cbEventInPending = false;
cbEvent = createCBEvent(owner, localOp ? Operation.LOCAL_INVALIDATE : Operation.INVALIDATE, key, newValue, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_INVALIDATE, cbEvent, false);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
} finally {
if (!cbEventInPending)
cbEvent.release();
}
}
}
} catch (DiskAccessException dae) {
owner.handleDiskAccessException(dae);
throw dae;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
}
}
use of org.apache.geode.cache.query.internal.index.IndexManager in project geode by apache.
the class AbstractRegionMap method txApplyPut.
public void txApplyPut(Operation p_putOp, Object key, Object nv, boolean didDestroy, TransactionId txId, TXRmtEvent txEvent, EventID eventId, Object aCallbackArgument, List<EntryEventImpl> pendingCallbacks, FilterRoutingInfo filterRoutingInfo, ClientProxyMembershipID bridgeContext, TXEntryState txEntryState, VersionTag versionTag, long tailKey) {
final LocalRegion owner = _getOwner();
if (owner == null) {
// "fix" for bug 32440
Assert.assertTrue(false, "The owner for RegionMap " + this + " is null");
}
Operation putOp = p_putOp;
Object newValue = nv;
final boolean hasRemoteOrigin = !((TXId) txId).getMemberId().equals(owner.getMyId());
final boolean isTXHost = txEntryState != null;
final boolean isClientTXOriginator = owner.cache.isClient() && !hasRemoteOrigin;
final boolean isRegionReady = owner.isInitialized();
@Released EntryEventImpl cbEvent = null;
boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady);
boolean cbEventInPending = false;
cbEvent = createCBEvent(owner, putOp, key, newValue, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
if (logger.isDebugEnabled()) {
logger.debug("txApplyPut cbEvent={}", cbEvent);
}
if (owner.isUsedForPartitionedRegionBucket()) {
newValue = EntryEventImpl.getCachedDeserializable(nv, cbEvent);
txHandleWANEvent(owner, cbEvent, txEntryState);
}
boolean opCompleted = false;
// Fix for Bug #44431. We do NOT want to update the region and wait
// later for index INIT as region.clear() can cause inconsistency if
// happened in parallel as it also does index INIT.
IndexManager oqlIndexManager = owner.getIndexManager();
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
try {
if (hasRemoteOrigin && !isTXHost && !isClientTXOriginator) {
// Otherwise use the standard create/update logic
if (!owner.isAllEvents() || (!putOp.isCreate() && isRegionReady)) {
// At this point we should only apply the update if the entry exists
// Fix for bug 32347.
RegionEntry re = getEntry(key);
if (re != null) {
synchronized (re) {
if (!re.isRemoved()) {
opCompleted = true;
putOp = putOp.getCorrespondingUpdateOp();
// Net writers are not called for received transaction data
final int oldSize = owner.calculateRegionEntryValueSize(re);
if (cbEvent != null) {
cbEvent.setRegionEntry(re);
// OFFHEAP eei
cbEvent.setOldValue(re.getValueInVM(owner));
}
boolean clearOccured = false;
// Set RegionEntry updateInProgress
if (owner.indexMaintenanceSynchronous) {
re.setUpdateInProgress(true);
}
try {
txRemoveOldIndexEntry(putOp, re);
if (didDestroy) {
re.txDidDestroy(owner.cacheTimeMillis());
}
if (txEvent != null) {
txEvent.addPut(putOp, owner, re, re.getKey(), newValue, aCallbackArgument);
}
re.setValueResultOfSearch(putOp.isNetSearch());
try {
processAndGenerateTXVersionTag(owner, cbEvent, re, txEntryState);
{
re.setValue(owner, re.prepareValueForCache(owner, newValue, cbEvent, !putOp.isCreate()));
}
if (putOp.isCreate()) {
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(re));
} else if (putOp.isUpdate()) {
// Rahul : fix for 41694. Negative bucket size can also be
// an issue with normal GFE Delta and will have to be fixed
// in a similar manner and may be this fix the the one for
// other delta can be combined.
{
owner.updateSizeOnPut(key, oldSize, owner.calculateRegionEntryValueSize(re));
}
}
} catch (RegionClearedException rce) {
clearOccured = true;
}
{
long lastMod = owner.cacheTimeMillis();
EntryLogger.logTXPut(_getOwnerObject(), key, nv);
re.updateStatsForPut(lastMod, lastMod);
owner.txApplyPutPart2(re, re.getKey(), lastMod, false, didDestroy, clearOccured);
}
} finally {
if (re != null && owner.indexMaintenanceSynchronous) {
re.setUpdateInProgress(false);
}
}
if (invokeCallbacks) {
cbEvent.makeUpdate();
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_UPDATE, cbEvent, hasRemoteOrigin);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
if (!clearOccured) {
lruEntryUpdate(re);
}
}
}
if (didDestroy && !opCompleted) {
owner.txApplyInvalidatePart2(re, re.getKey(), true, false);
}
}
if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
txEntryState.setVersionTag(cbEvent.getVersionTag());
}
return;
}
}
RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.REMOVED_PHASE1);
synchronized (newRe) {
try {
RegionEntry oldRe = putEntryIfAbsent(key, newRe);
while (!opCompleted && oldRe != null) {
synchronized (oldRe) {
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(key, oldRe);
oldRe = putEntryIfAbsent(key, newRe);
} else {
opCompleted = true;
if (!oldRe.isRemoved()) {
putOp = putOp.getCorrespondingUpdateOp();
}
// Net writers are not called for received transaction data
final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
final boolean oldIsRemoved = oldRe.isDestroyedOrRemoved();
if (cbEvent != null) {
cbEvent.setRegionEntry(oldRe);
// OFFHEAP eei
cbEvent.setOldValue(oldRe.getValueInVM(owner));
}
boolean clearOccured = false;
// Set RegionEntry updateInProgress
if (owner.indexMaintenanceSynchronous) {
oldRe.setUpdateInProgress(true);
}
try {
txRemoveOldIndexEntry(putOp, oldRe);
if (didDestroy) {
oldRe.txDidDestroy(owner.cacheTimeMillis());
}
if (txEvent != null) {
txEvent.addPut(putOp, owner, oldRe, oldRe.getKey(), newValue, aCallbackArgument);
}
oldRe.setValueResultOfSearch(putOp.isNetSearch());
try {
processAndGenerateTXVersionTag(owner, cbEvent, oldRe, txEntryState);
boolean wasTombstone = oldRe.isTombstone();
{
oldRe.setValue(owner, oldRe.prepareValueForCache(owner, newValue, cbEvent, !putOp.isCreate()));
if (wasTombstone) {
owner.unscheduleTombstone(oldRe);
}
}
if (putOp.isCreate()) {
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(oldRe));
} else if (putOp.isUpdate()) {
// Rahul : fix for 41694. Negative bucket size can also be
// an issue with normal GFE Delta and will have to be fixed
// in a similar manner and may be this fix the the one for
// other delta can be combined.
{
owner.updateSizeOnPut(key, oldSize, owner.calculateRegionEntryValueSize(oldRe));
}
}
} catch (RegionClearedException rce) {
clearOccured = true;
}
{
long lastMod = owner.cacheTimeMillis();
EntryLogger.logTXPut(_getOwnerObject(), key, nv);
oldRe.updateStatsForPut(lastMod, lastMod);
owner.txApplyPutPart2(oldRe, oldRe.getKey(), lastMod, false, didDestroy, clearOccured);
}
} finally {
if (oldRe != null && owner.indexMaintenanceSynchronous) {
oldRe.setUpdateInProgress(false);
}
}
if (invokeCallbacks) {
if (!oldIsRemoved) {
cbEvent.makeUpdate();
}
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(cbEvent.op.isCreate() ? EnumListenerEvent.AFTER_CREATE : EnumListenerEvent.AFTER_UPDATE, cbEvent, true);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
if (!clearOccured) {
lruEntryUpdate(oldRe);
}
}
}
}
if (!opCompleted) {
putOp = putOp.getCorrespondingCreateOp();
if (cbEvent != null) {
cbEvent.setRegionEntry(newRe);
cbEvent.setOldValue(null);
}
boolean clearOccured = false;
// Set RegionEntry updateInProgress
if (owner.indexMaintenanceSynchronous) {
newRe.setUpdateInProgress(true);
}
try {
txRemoveOldIndexEntry(putOp, newRe);
// creating a new entry
if (didDestroy) {
newRe.txDidDestroy(owner.cacheTimeMillis());
}
if (txEvent != null) {
txEvent.addPut(putOp, owner, newRe, newRe.getKey(), newValue, aCallbackArgument);
}
newRe.setValueResultOfSearch(putOp.isNetSearch());
try {
processAndGenerateTXVersionTag(owner, cbEvent, newRe, txEntryState);
{
newRe.setValue(owner, newRe.prepareValueForCache(owner, newValue, cbEvent, !putOp.isCreate()));
}
owner.updateSizeOnCreate(newRe.getKey(), owner.calculateRegionEntryValueSize(newRe));
} catch (RegionClearedException rce) {
clearOccured = true;
}
{
long lastMod = owner.cacheTimeMillis();
EntryLogger.logTXPut(_getOwnerObject(), key, nv);
newRe.updateStatsForPut(lastMod, lastMod);
owner.txApplyPutPart2(newRe, newRe.getKey(), lastMod, true, didDestroy, clearOccured);
}
} finally {
if (newRe != null && owner.indexMaintenanceSynchronous) {
newRe.setUpdateInProgress(false);
}
}
opCompleted = true;
if (invokeCallbacks) {
cbEvent.makeCreate();
cbEvent.setOldValue(null);
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_CREATE, cbEvent, true);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
if (!clearOccured) {
lruEntryCreate(newRe);
incEntryCount(1);
}
}
} finally {
if (!opCompleted) {
removeEntry(key, newRe, false);
}
}
}
if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
txEntryState.setVersionTag(cbEvent.getVersionTag());
}
} catch (DiskAccessException dae) {
owner.handleDiskAccessException(dae);
throw dae;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
}
} finally {
if (!cbEventInPending)
cbEvent.release();
}
}
use of org.apache.geode.cache.query.internal.index.IndexManager in project geode by apache.
the class AbstractRegionMap method basicPut.
/*
* returns null if the operation fails
*/
public RegionEntry basicPut(EntryEventImpl event, final long lastModified, final boolean ifNew, // only non-null if ifOld
final boolean ifOld, // only non-null if ifOld
Object expectedOldValue, boolean requireOldValue, final boolean overwriteDestroyed) throws CacheWriterException, TimeoutException {
final LocalRegion owner = _getOwner();
boolean clearOccured = false;
if (owner == null) {
// "fix" for bug 32440
Assert.assertTrue(false, "The owner for RegionMap " + this + " is null for event " + event);
}
if (logger.isTraceEnabled(LogMarker.LRU_TOMBSTONE_COUNT) && !(owner instanceof HARegion)) {
logger.trace(LogMarker.LRU_TOMBSTONE_COUNT, "ARM.basicPut called for {} expectedOldValue={} requireOldValue={} ifNew={} ifOld={} initialized={} overwriteDestroyed={}", event, expectedOldValue, requireOldValue, ifNew, ifOld, owner.isInitialized(), overwriteDestroyed);
}
RegionEntry result = null;
long lastModifiedTime = 0;
// copy into local var to prevent race condition with setter
final CacheWriter cacheWriter = owner.basicGetWriter();
final boolean cacheWrite = !event.isOriginRemote() && !event.isNetSearch() && event.isGenerateCallbacks() && (cacheWriter != null || owner.hasServerProxy() || owner.scope.isDistributed());
/*
* For performance reason, we try to minimize object creation and do as much work as we can
* outside of synchronization, especially getting distribution advice.
*/
final Set netWriteRecipients;
if (cacheWrite) {
if (cacheWriter == null && owner.scope.isDistributed()) {
netWriteRecipients = ((DistributedRegion) owner).getCacheDistributionAdvisor().adviseNetWrite();
} else {
netWriteRecipients = null;
}
} else {
netWriteRecipients = null;
}
// mbid: this has been added to maintain consistency between the disk region
// and the region map after clear() has been called. This will set the
// reference of the diskSegmentRegion as a ThreadLocal so that if the diskRegionSegment
// is later changed by another thread, we can do the necessary.
boolean uninitialized = !owner.isInitialized();
boolean retrieveOldValueForDelta = event.getDeltaBytes() != null && event.getRawNewValue() == null;
IndexManager oqlIndexManager = null;
lockForCacheModification(owner, event);
try {
try {
// Fix for Bug #44431. We do NOT want to update the region and wait
// later for index INIT as region.clear() can cause inconsistency if
// happened in parallel as it also does index INIT.
oqlIndexManager = owner.getIndexManager();
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
// fix for bug #42169, replace must go to server if entry not on client
boolean replaceOnClient = event.getOperation() == Operation.REPLACE && owner.getServerProxy() != null;
// Rather than having two different blocks for synchronizing oldRe
// and newRe, have only one block and synchronize re
RegionEntry re = null;
boolean eventRecorded = false;
boolean onlyExisting = ifOld && !replaceOnClient;
re = getOrCreateRegionEntry(owner, event, Token.REMOVED_PHASE1, null, onlyExisting, false);
if (re == null) {
return null;
}
while (true) {
synchronized (re) {
// and change its state
if (re.isRemovedPhase2()) {
_getOwner().getCachePerfStats().incRetries();
_getMap().remove(event.getKey(), re);
re = getOrCreateRegionEntry(owner, event, Token.REMOVED_PHASE1, null, onlyExisting, false);
if (re == null) {
// this will happen when onlyExisting is true
return null;
}
continue;
} else {
@Released Object oldValueForDelta = null;
if (retrieveOldValueForDelta) {
// defer the lruUpdateCallback to prevent a deadlock (see bug 51121).
final boolean disabled = disableLruUpdateCallback();
try {
// Old value is faulted in from disk if not found in memory.
// OFFHEAP: if we are synced on oldRe no
oldValueForDelta = re.getValue(owner);
// issue since we can use ARE's ref
} finally {
if (disabled) {
enableLruUpdateCallback();
}
}
}
try {
event.setRegionEntry(re);
// set old value in event
setOldValueInEvent(event, re, cacheWrite, requireOldValue);
if (!continueUpdate(re, event, ifOld, replaceOnClient)) {
return null;
}
// overwrite destroyed?
if (!continueOverwriteDestroyed(re, event, overwriteDestroyed, ifNew)) {
return null;
}
// check expectedOldValue
if (!satisfiesExpectedOldValue(event, re, expectedOldValue, replaceOnClient)) {
return null;
}
// invoke cacheWriter
invokeCacheWriter(re, event, cacheWrite, cacheWriter, netWriteRecipients, requireOldValue, expectedOldValue, replaceOnClient);
// notify index of an update
notifyIndex(re, true);
try {
try {
if (// if there is a
(cacheWrite && event.getOperation().isUpdate()) || // set
!re.isRemoved() || replaceOnClient) {
// update
updateEntry(event, requireOldValue, oldValueForDelta, re);
} else {
// create
createEntry(event, owner, re);
}
owner.recordEvent(event);
eventRecorded = true;
} catch (RegionClearedException rce) {
clearOccured = true;
owner.recordEvent(event);
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
if (uninitialized) {
event.inhibitCacheListenerNotification(true);
}
updateLru(clearOccured, re, event);
lastModifiedTime = owner.basicPutPart2(event, re, !uninitialized, lastModifiedTime, clearOccured);
} finally {
notifyIndex(re, false);
}
result = re;
break;
} finally {
OffHeapHelper.release(oldValueForDelta);
if (re != null && !onlyExisting && !isOpComplete(re, event)) {
owner.cleanUpOnIncompleteOp(event, re);
} else if (re != null && owner.isUsedForPartitionedRegionBucket()) {
BucketRegion br = (BucketRegion) owner;
CachePerfStats stats = br.getPartitionedRegion().getCachePerfStats();
}
}
// try
}
}
// sync re
}
// end while
} catch (DiskAccessException dae) {
// Asif:Feel that it is safe to destroy the region here as there appears
// to be no chance of deadlock during region destruction
result = null;
this._getOwner().handleDiskAccessException(dae);
throw dae;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
if (result != null) {
try {
// Note we do distribution after releasing all sync to avoid deadlock
final boolean invokeListeners = event.basicGetNewValue() != Token.TOMBSTONE;
owner.basicPutPart3(event, result, !uninitialized, lastModifiedTime, invokeListeners, ifNew, ifOld, expectedOldValue, requireOldValue);
} finally {
// for any recipients
if (!clearOccured) {
try {
lruUpdateCallback();
} catch (DiskAccessException dae) {
// Asif:Feel that it is safe to destroy the region here as there appears
// to be no chance of deadlock during region destruction
result = null;
this._getOwner().handleDiskAccessException(dae);
throw dae;
}
}
}
// finally
} else {
resetThreadLocals();
}
}
} finally {
releaseCacheModificationLock(owner, event);
}
return result;
}
use of org.apache.geode.cache.query.internal.index.IndexManager in project geode by apache.
the class AbstractRegionMap method destroy.
public boolean destroy(EntryEventImpl event, boolean inTokenMode, boolean duringRI, boolean cacheWrite, boolean isEviction, Object expectedOldValue, boolean removeRecoveredEntry) throws CacheWriterException, EntryNotFoundException, TimeoutException {
final LocalRegion owner = _getOwner();
if (owner == null) {
Assert.assertTrue(false, // "fix" for bug 32440
"The owner for RegionMap " + this + " is null for event " + event);
}
boolean retry = true;
lockForCacheModification(owner, event);
try {
while (retry) {
retry = false;
boolean opCompleted = false;
boolean doPart3 = false;
// We need to acquire the region entry while holding the lock to avoid #45620.
// The outer try/finally ensures that the lock will be released without fail.
// I'm avoiding indenting just to preserve the ability
// to track diffs since the code is fairly complex.
RegionEntry re = getOrCreateRegionEntry(owner, event, Token.REMOVED_PHASE1, null, true, true);
RegionEntry tombstone = null;
boolean haveTombstone = false;
/*
* Execute the test hook runnable inline (not threaded) if it is not null.
*/
if (null != testHookRunnableFor48182) {
testHookRunnableFor48182.run();
}
try {
if (logger.isTraceEnabled(LogMarker.LRU_TOMBSTONE_COUNT) && !(owner instanceof HARegion)) {
logger.trace(LogMarker.LRU_TOMBSTONE_COUNT, "ARM.destroy() inTokenMode={}; duringRI={}; riLocalDestroy={}; withRepl={}; fromServer={}; concurrencyEnabled={}; isOriginRemote={}; isEviction={}; operation={}; re={}", inTokenMode, duringRI, event.isFromRILocalDestroy(), owner.dataPolicy.withReplication(), event.isFromServer(), owner.concurrencyChecksEnabled, event.isOriginRemote(), isEviction, event.getOperation(), re);
}
if (event.isFromRILocalDestroy()) {
// for RI local-destroy we don't want to keep tombstones.
// In order to simplify things we just set this recovery
// flag to true to force the entry to be removed
removeRecoveredEntry = true;
}
// for a tombstone here and, if found, pretend for a bit that the entry is null
if (re != null && re.isTombstone() && !removeRecoveredEntry) {
tombstone = re;
haveTombstone = true;
re = null;
}
IndexManager oqlIndexManager = owner.getIndexManager();
if (re == null) {
// we need to create an entry if in token mode or if we've received
// a destroy from a peer or WAN gateway and we need to retain version
// information for concurrency checks
boolean retainForConcurrency = (!haveTombstone && (owner.dataPolicy.withReplication() || event.isFromServer()) && owner.concurrencyChecksEnabled && (event.isOriginRemote() || /* destroy received from other must create tombstone */
event.isFromWANAndVersioned() || /* wan event must create a tombstone */
event.isBridgeEvent()));
/*
* event from client must create a tombstone so
* client has a version #
*/
if (inTokenMode || retainForConcurrency) {
// removeRecoveredEntry should be false in this case
RegionEntry newRe = getEntryFactory().createEntry(owner, event.getKey(), Token.REMOVED_PHASE1);
// happened in parallel as it also does index INIT.
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
try {
synchronized (newRe) {
RegionEntry oldRe = putEntryIfAbsent(event.getKey(), newRe);
while (!opCompleted && oldRe != null) {
synchronized (oldRe) {
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(event.getKey(), oldRe);
oldRe = putEntryIfAbsent(event.getKey(), newRe);
} else {
event.setRegionEntry(oldRe);
// is being added to transaction state.
if (isEviction) {
if (!confirmEvictionDestroy(oldRe)) {
opCompleted = false;
return opCompleted;
}
}
try {
// if concurrency checks are enabled, destroy will
// set the version tag
boolean destroyed = destroyEntry(oldRe, event, inTokenMode, cacheWrite, expectedOldValue, false, removeRecoveredEntry);
if (destroyed) {
if (retainForConcurrency) {
owner.basicDestroyBeforeRemoval(oldRe, event);
}
owner.basicDestroyPart2(oldRe, event, inTokenMode, false, /* conflict with clear */
duringRI, true);
lruEntryDestroy(oldRe);
doPart3 = true;
}
} catch (RegionClearedException rce) {
// Ignore. The exception will ensure that we do not update
// the LRU List
owner.basicDestroyPart2(oldRe, event, inTokenMode, true, /* conflict with clear */
duringRI, true);
doPart3 = true;
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
re = oldRe;
opCompleted = true;
}
}
// synchronized oldRe
}
// while
if (!opCompleted) {
// happens if we didn't get completed with oldRe in the above while loop.
try {
re = newRe;
event.setRegionEntry(newRe);
try {
// set the version tag
if (isEviction) {
opCompleted = false;
return opCompleted;
}
opCompleted = destroyEntry(newRe, event, inTokenMode, cacheWrite, expectedOldValue, true, removeRecoveredEntry);
if (opCompleted) {
// This is a new entry that was created because we are in
// token mode or are accepting a destroy operation by adding
// a tombstone. There is no oldValue, so we don't need to
// call updateSizeOnRemove
// owner.recordEvent(event);
// native clients need to know if the
event.setIsRedestroyedEntry(true);
// entry didn't exist
if (retainForConcurrency) {
owner.basicDestroyBeforeRemoval(oldRe, event);
}
owner.basicDestroyPart2(newRe, event, inTokenMode, false, /* conflict with clear */
duringRI, true);
doPart3 = true;
}
} catch (RegionClearedException rce) {
// Ignore. The exception will ensure that we do not update
// the LRU List
opCompleted = true;
EntryLogger.logDestroy(event);
owner.basicDestroyPart2(newRe, event, inTokenMode, true, /* conflict with clear */
duringRI, true);
doPart3 = true;
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
// Note no need for LRU work since the entry is destroyed
// and will be removed when gii completes
} finally {
if (!opCompleted && !haveTombstone) /* to fix bug 51583 do this for all operations */
{
removeEntry(event.getKey(), newRe, false);
}
if (!opCompleted && isEviction) {
removeEntry(event.getKey(), newRe, false);
}
}
}
// !opCompleted
}
// synchronized newRe
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
}
} else // inTokenMode or tombstone creation
{
if (!isEviction || owner.concurrencyChecksEnabled) {
// The following ensures that there is not a concurrent operation
// on the entry and leaves behind a tombstone if concurrencyChecksEnabled.
// It fixes bug #32467 by propagating the destroy to the server even though
// the entry isn't in the client
RegionEntry newRe = haveTombstone ? tombstone : getEntryFactory().createEntry(owner, event.getKey(), Token.REMOVED_PHASE1);
synchronized (newRe) {
if (haveTombstone && !tombstone.isTombstone()) {
// we have to check this again under synchronization since it may have changed
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
}
re = (RegionEntry) _getMap().putIfAbsent(event.getKey(), newRe);
if (re != null && re != tombstone) {
// concurrent change - try again
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
} else if (!isEviction) {
boolean throwex = false;
EntryNotFoundException ex = null;
try {
if (!cacheWrite) {
throwex = true;
} else {
try {
if (!removeRecoveredEntry) {
throwex = !owner.bridgeWriteBeforeDestroy(event, expectedOldValue);
}
} catch (EntryNotFoundException e) {
throwex = true;
ex = e;
}
}
if (throwex) {
if (!event.isOriginRemote() && !event.getOperation().isLocal() && (// if this is a replayed client
event.isFromBridgeAndVersioned() || // version
event.isFromWANAndVersioned())) {
// in peers
if (logger.isDebugEnabled()) {
logger.debug("ARM.destroy is allowing wan/client destroy of {} to continue", event.getKey());
}
throwex = false;
event.setIsRedestroyedEntry(true);
// distributing this destroy op.
if (re == null) {
re = newRe;
}
doPart3 = true;
}
}
if (throwex) {
if (ex == null) {
// Fix for 48182, check cache state and/or region state before sending
// entry not found.
// this is from the server and any exceptions will propogate to the client
owner.checkEntryNotFound(event.getKey());
} else {
throw ex;
}
}
} finally {
// either remove the entry or leave a tombstone
try {
if (!event.isOriginRemote() && event.getVersionTag() != null && owner.concurrencyChecksEnabled) {
// this shouldn't fail since we just created the entry.
// it will either generate a tag or apply a server's version tag
processVersionTag(newRe, event);
if (doPart3) {
owner.generateAndSetVersionTag(event, newRe);
}
try {
owner.recordEvent(event);
newRe.makeTombstone(owner, event.getVersionTag());
} catch (RegionClearedException e) {
// that's okay - when writing a tombstone into a disk, the
// region has been cleared (including this tombstone)
}
opCompleted = true;
// lruEntryCreate(newRe);
} else if (!haveTombstone) {
try {
assert newRe != tombstone;
newRe.setValue(owner, Token.REMOVED_PHASE2);
removeEntry(event.getKey(), newRe, false);
} catch (RegionClearedException e) {
// that's okay - we just need to remove the new entry
}
} else if (event.getVersionTag() != null) {
// haveTombstone - update the
// tombstone version info
processVersionTag(tombstone, event);
if (doPart3) {
owner.generateAndSetVersionTag(event, newRe);
}
// version tag
try {
tombstone.setValue(owner, Token.TOMBSTONE);
} catch (RegionClearedException e) {
// that's okay - when writing a tombstone into a disk, the
// region has been cleared (including this tombstone)
}
owner.recordEvent(event);
owner.rescheduleTombstone(tombstone, event.getVersionTag());
owner.basicDestroyPart2(tombstone, event, inTokenMode, true, /* conflict with clear */
duringRI, true);
opCompleted = true;
}
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
}
}
}
// synchronized(newRe)
}
}
} else // no current entry
{
// current entry exists
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
try {
synchronized (re) {
// if the entry is a tombstone and the event is from a peer or a client
// then we allow the operation to be performed so that we can update the
// version stamp. Otherwise we would retain an old version stamp and may allow
// an operation that is older than the destroy() to be applied to the cache
// Bug 45170: If removeRecoveredEntry, we treat tombstone as regular entry to be
// deleted
boolean createTombstoneForConflictChecks = (owner.concurrencyChecksEnabled && (event.isOriginRemote() || event.getContext() != null || removeRecoveredEntry));
if (!re.isRemoved() || createTombstoneForConflictChecks) {
if (re.isRemovedPhase2()) {
_getMap().remove(event.getKey(), re);
owner.getCachePerfStats().incRetries();
retry = true;
continue;
}
if (!event.isOriginRemote() && event.getOperation().isExpiration()) {
// used by a tx.
if (re.isInUseByTransaction()) {
opCompleted = false;
return opCompleted;
}
}
event.setRegionEntry(re);
// See comment above about eviction checks
if (isEviction) {
assert expectedOldValue == null;
if (!confirmEvictionDestroy(re)) {
opCompleted = false;
return opCompleted;
}
}
boolean removed = false;
try {
opCompleted = destroyEntry(re, event, inTokenMode, cacheWrite, expectedOldValue, false, removeRecoveredEntry);
if (opCompleted) {
// It is very, very important for Partitioned Regions to keep
// the entry in the map until after distribution occurs so that other
// threads performing a create on this entry wait until the destroy
// distribution is finished.
// keeping backup copies consistent. Fix for bug 35906.
// -- mthomas 07/02/2007 <-- how about that date, kinda cool eh?
owner.basicDestroyBeforeRemoval(re, event);
// do this before basicDestroyPart2 to fix bug 31786
if (!inTokenMode) {
if (re.getVersionStamp() == null) {
re.removePhase2();
removeEntry(event.getKey(), re, true, event, owner);
removed = true;
}
}
if (inTokenMode && !duringRI) {
event.inhibitCacheListenerNotification(true);
}
doPart3 = true;
owner.basicDestroyPart2(re, event, inTokenMode, false, /* conflict with clear */
duringRI, true);
// if (!re.isTombstone() || isEviction) {
lruEntryDestroy(re);
// } else {
// lruEntryUpdate(re);
// lruUpdateCallback = true;
// }
} else {
if (!inTokenMode) {
EntryLogger.logDestroy(event);
owner.recordEvent(event);
if (re.getVersionStamp() == null) {
re.removePhase2();
removeEntry(event.getKey(), re, true, event, owner);
lruEntryDestroy(re);
} else {
if (re.isTombstone()) {
// again, so we need to reschedule the tombstone's expiration
if (event.isOriginRemote()) {
owner.rescheduleTombstone(re, re.getVersionStamp().asVersionTag());
}
}
}
lruEntryDestroy(re);
opCompleted = true;
}
}
} catch (RegionClearedException rce) {
// Ignore. The exception will ensure that we do not update
// the LRU List
opCompleted = true;
owner.recordEvent(event);
if (inTokenMode && !duringRI) {
event.inhibitCacheListenerNotification(true);
}
owner.basicDestroyPart2(re, event, inTokenMode, true, /* conflict with clear */
duringRI, true);
doPart3 = true;
} finally {
if (re.isRemoved() && !re.isTombstone()) {
if (!removed) {
removeEntry(event.getKey(), re, true, event, owner);
}
}
}
} else // !isRemoved
{
// already removed
if (re.isTombstone() && event.getVersionTag() != null) {
// if we're dealing with a tombstone and this is a remote event
// (e.g., from cache client update thread) we need to update
// the tombstone's version information
// TODO use destroyEntry() here
processVersionTag(re, event);
try {
re.makeTombstone(owner, event.getVersionTag());
} catch (RegionClearedException e) {
// that's okay - when writing a tombstone into a disk, the
// region has been cleared (including this tombstone)
}
}
if (expectedOldValue != null) {
// if re is removed then there is no old value, so return false
return false;
}
if (!inTokenMode && !isEviction) {
owner.checkEntryNotFound(event.getKey());
}
}
}
// synchronized re
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
}
// No need to call lruUpdateCallback since the only lru action
// we may have taken was lruEntryDestroy. This fixes bug 31759.
}
// current entry exists
if (opCompleted) {
EntryLogger.logDestroy(event);
}
return opCompleted;
} finally {
try {
// do NOT distribute.
if (event.isConcurrencyConflict() && (event.getVersionTag() != null && event.getVersionTag().isGatewayTag())) {
doPart3 = false;
}
// distribution and listener notification
if (doPart3) {
owner.basicDestroyPart3(re, event, inTokenMode, duringRI, true, expectedOldValue);
}
} finally {
if (opCompleted) {
if (re != null) {
// we only want to cancel if concurrency-check is not enabled
// re(regionentry) will be null when concurrency-check is enable and removeTombstone
// method
// will call cancelExpiryTask on regionEntry
owner.cancelExpiryTask(re);
}
}
}
}
}
// retry loop
} finally {
// failsafe on the read lock...see comment above
releaseCacheModificationLock(owner, event);
}
return false;
}
Aggregations