use of org.apache.geode.cache.query.internal.index.IndexManager in project geode by apache.
the class AbstractRegionMap method invalidate.
public boolean invalidate(EntryEventImpl event, boolean invokeCallbacks, boolean forceNewEntry, boolean forceCallbacks) throws EntryNotFoundException {
final boolean isDebugEnabled = logger.isDebugEnabled();
final LocalRegion owner = _getOwner();
if (owner == null) {
// "fix" for bug 32440
Assert.assertTrue(false, "The owner for RegionMap " + this + " is null for event " + event);
}
boolean didInvalidate = false;
RegionEntry invalidatedRe = null;
boolean clearOccured = false;
DiskRegion dr = owner.getDiskRegion();
boolean ownerIsInitialized = owner.isInitialized();
try {
// Fix for Bug #44431. We do NOT want to update the region and wait
// later for index INIT as region.clear() can cause inconsistency if
// happened in parallel as it also does index INIT.
IndexManager oqlIndexManager = owner.getIndexManager();
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
lockForCacheModification(owner, event);
try {
try {
if (forceNewEntry || forceCallbacks) {
boolean opCompleted = false;
RegionEntry newRe = getEntryFactory().createEntry(owner, event.getKey(), Token.REMOVED_PHASE1);
synchronized (newRe) {
try {
RegionEntry oldRe = putEntryIfAbsent(event.getKey(), newRe);
while (!opCompleted && oldRe != null) {
synchronized (oldRe) {
// proceed to phase 2 of removal.
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(event.getKey(), oldRe);
oldRe = putEntryIfAbsent(event.getKey(), newRe);
} else {
opCompleted = true;
event.setRegionEntry(oldRe);
if (oldRe.isDestroyed()) {
if (isDebugEnabled) {
logger.debug("mapInvalidate: Found DESTROYED token, not invalidated; key={}", event.getKey());
}
} else if (oldRe.isInvalid()) {
// was already invalid, do not invoke listeners or increment stat
if (isDebugEnabled) {
logger.debug("mapInvalidate: Entry already invalid: '{}'", event.getKey());
}
processVersionTag(oldRe, event);
try {
// OFFHEAP noop setting
oldRe.setValue(owner, oldRe.getValueInVM(owner));
// an already invalid to
// invalid; No need to
// call
// prepareValueForCache
// since it is an
// invalid token.
} catch (RegionClearedException e) {
// that's okay - when writing an invalid into a disk, the
// region has been cleared (including this token)
}
} else {
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled && event.noVersionReceivedFromServer()) {
// entry here
return false;
}
final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
// added for cq which needs old value. rdubey
FilterProfile fp = owner.getFilterProfile();
if (!oldRe.isRemoved() && (fp != null && fp.getCqCount() > 0)) {
// OFFHEAP EntryEventImpl
Object oldValue = oldRe.getValueInVM(owner);
// this will not fault in the value.
if (oldValue == Token.NOT_AVAILABLE) {
event.setOldValue(oldRe.getValueOnDiskOrBuffer(owner));
} else {
event.setOldValue(oldValue);
}
}
boolean isCreate = false;
try {
if (oldRe.isRemoved()) {
processVersionTag(oldRe, event);
event.putNewEntry(owner, oldRe);
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
if (!oldRe.isTombstone()) {
owner.updateSizeOnPut(event.getKey(), oldSize, event.getNewValueBucketSize());
} else {
owner.updateSizeOnCreate(event.getKey(), event.getNewValueBucketSize());
isCreate = true;
}
} else {
processVersionTag(oldRe, event);
event.putExistingEntry(owner, oldRe);
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
owner.updateSizeOnPut(event.getKey(), oldSize, event.getNewValueBucketSize());
}
} catch (RegionClearedException e) {
// generate versionTag for the event
EntryLogger.logInvalidate(event);
owner.recordEvent(event);
clearOccured = true;
}
owner.basicInvalidatePart2(oldRe, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
if (isCreate) {
lruEntryCreate(oldRe);
} else {
lruEntryUpdate(oldRe);
}
}
didInvalidate = true;
invalidatedRe = oldRe;
}
}
}
// synchronized oldRe
}
if (!opCompleted) {
if (forceNewEntry && event.isFromServer()) {
// CCU invalidations before 7.0, and listeners don't care
if (!FORCE_INVALIDATE_EVENT) {
event.inhibitCacheListenerNotification(true);
}
}
event.setRegionEntry(newRe);
owner.serverInvalidate(event);
if (!forceNewEntry && event.noVersionReceivedFromServer()) {
// entry here
return false;
}
try {
ownerIsInitialized = owner.isInitialized();
if (!ownerIsInitialized && owner.getDataPolicy().withReplication()) {
final int oldSize = owner.calculateRegionEntryValueSize(newRe);
invalidateEntry(event, newRe, oldSize);
} else {
invalidateNewEntry(event, owner, newRe);
}
} catch (RegionClearedException e) {
// TODO: deltaGII: do we even need RegionClearedException?
// generate versionTag for the event
owner.recordEvent(event);
clearOccured = true;
}
owner.basicInvalidatePart2(newRe, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
lruEntryCreate(newRe);
incEntryCount(1);
}
opCompleted = true;
didInvalidate = true;
invalidatedRe = newRe;
// for this invalidate
if (!forceNewEntry) {
removeEntry(event.getKey(), newRe, false);
}
}
// !opCompleted
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
} finally {
if (!opCompleted) {
removeEntry(event.getKey(), newRe, false);
}
}
}
// synchronized newRe
} else // forceNewEntry
{
// !forceNewEntry
boolean retry = true;
while (retry) {
retry = false;
boolean entryExisted = false;
RegionEntry re = getEntry(event.getKey());
RegionEntry tombstone = null;
boolean haveTombstone = false;
if (re != null && re.isTombstone()) {
tombstone = re;
haveTombstone = true;
re = null;
}
if (re == null) {
ownerIsInitialized = owner.isInitialized();
if (!ownerIsInitialized) {
// when GII message arrived or processed later than invalidate
// message, the entry should be created as placeholder
RegionEntry newRe = haveTombstone ? tombstone : getEntryFactory().createEntry(owner, event.getKey(), Token.INVALID);
synchronized (newRe) {
if (haveTombstone && !tombstone.isTombstone()) {
// state of the tombstone has changed so we need to retry
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
}
re = putEntryIfAbsent(event.getKey(), newRe);
if (re == tombstone) {
// pretend we don't have an entry
re = null;
}
}
} else if (owner.getServerProxy() != null) {
Object sync = haveTombstone ? tombstone : new Object();
synchronized (sync) {
if (haveTombstone && !tombstone.isTombstone()) {
// bug 45295: state of the tombstone has changed so we need to retry
retry = true;
// retryEntry = tombstone; // leave this in place for debugging
continue;
}
// bug #43287 - send event to server even if it's not in the client (LRU may
// have evicted it)
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled) {
if (event.getVersionTag() == null) {
// entry here
return false;
} else if (tombstone != null) {
processVersionTag(tombstone, event);
try {
if (!tombstone.isTombstone()) {
if (isDebugEnabled) {
logger.debug("tombstone is no longer a tombstone. {}:event={}", tombstone, event);
}
}
tombstone.setValue(owner, Token.TOMBSTONE);
} catch (RegionClearedException e) {
// that's okay - when writing a tombstone into a disk, the
// region has been cleared (including this tombstone)
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
// update the tombstone's version to prevent an older CCU/putAll from
// overwriting it
owner.rescheduleTombstone(tombstone, event.getVersionTag());
}
}
}
entryExisted = true;
}
}
if (re != null) {
// normal invalidate operation
synchronized (re) {
if (!event.isOriginRemote() && event.getOperation().isExpiration()) {
// used by a tx.
if (re.isInUseByTransaction()) {
return false;
}
}
if (re.isTombstone() || (!re.isRemoved() && !re.isDestroyed())) {
entryExisted = true;
if (re.isInvalid()) {
// stat
if (isDebugEnabled) {
logger.debug("Invalidate: Entry already invalid: '{}'", event.getKey());
}
if (event.getVersionTag() != null && owner.getVersionVector() != null) {
owner.getVersionVector().recordVersion((InternalDistributedMember) event.getDistributedMember(), event.getVersionTag());
}
} else {
// previous value not invalid
event.setRegionEntry(re);
owner.serverInvalidate(event);
if (owner.concurrencyChecksEnabled && event.noVersionReceivedFromServer()) {
// entry here
if (isDebugEnabled) {
logger.debug("returning early because server did not generate a version stamp for this event:{}", event);
}
return false;
}
// in case of overflow to disk we need the old value for cqs.
if (owner.getFilterProfile().getCqCount() > 0) {
// use to be getValue and can cause dead lock rdubey.
if (re.isValueNull()) {
event.setOldValue(re.getValueOnDiskOrBuffer(owner));
} else {
Object v = re.getValueInVM(owner);
// OFFHEAP escapes to EntryEventImpl oldValue
event.setOldValue(v);
}
}
final boolean oldWasTombstone = re.isTombstone();
final int oldSize = _getOwner().calculateRegionEntryValueSize(re);
try {
invalidateEntry(event, re, oldSize);
} catch (RegionClearedException rce) {
// generate versionTag for the event
EntryLogger.logInvalidate(event);
_getOwner().recordEvent(event);
clearOccured = true;
} catch (ConcurrentCacheModificationException ccme) {
VersionTag tag = event.getVersionTag();
if (tag != null && tag.isTimeStampUpdated()) {
// Notify gateways of new time-stamp.
owner.notifyTimestampsToGateways(event);
}
throw ccme;
}
owner.basicInvalidatePart2(re, event, clearOccured, /* conflict with clear */
invokeCallbacks);
if (!clearOccured) {
if (oldWasTombstone) {
lruEntryCreate(re);
} else {
lruEntryUpdate(re);
}
}
didInvalidate = true;
invalidatedRe = re;
}
// previous value not invalid
}
}
// synchronized re
} else // re != null
{
// At this point, either it's not in GII mode, or the placeholder
// is in region, do nothing
}
if (!entryExisted) {
owner.checkEntryNotFound(event.getKey());
}
}
// while(retry)
}
// !forceNewEntry
} catch (DiskAccessException dae) {
invalidatedRe = null;
didInvalidate = false;
this._getOwner().handleDiskAccessException(dae);
throw dae;
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
if (invalidatedRe != null) {
owner.basicInvalidatePart3(invalidatedRe, event, invokeCallbacks);
}
if (didInvalidate && !clearOccured) {
try {
lruUpdateCallback();
} catch (DiskAccessException dae) {
this._getOwner().handleDiskAccessException(dae);
throw dae;
}
} else if (!didInvalidate) {
resetThreadLocals();
}
}
return didInvalidate;
} finally {
if (ownerIsInitialized) {
forceInvalidateEvent(event, owner);
}
}
} finally {
releaseCacheModificationLock(owner, event);
}
}
use of org.apache.geode.cache.query.internal.index.IndexManager in project geode by apache.
the class AbstractRegionMap method txApplyDestroy.
public void txApplyDestroy(Object key, TransactionId txId, TXRmtEvent txEvent, boolean inTokenMode, boolean inRI, Operation op, EventID eventId, Object aCallbackArgument, List<EntryEventImpl> pendingCallbacks, FilterRoutingInfo filterRoutingInfo, ClientProxyMembershipID bridgeContext, boolean isOriginRemote, TXEntryState txEntryState, VersionTag versionTag, long tailKey) {
final boolean isDebugEnabled = logger.isDebugEnabled();
final LocalRegion owner = _getOwner();
final boolean isRegionReady = !inTokenMode;
final boolean hasRemoteOrigin = !((TXId) txId).getMemberId().equals(owner.getMyId());
boolean cbEventInPending = false;
IndexManager oqlIndexManager = owner.getIndexManager();
try {
RegionEntry re = getEntry(key);
if (re != null) {
// happened in parallel as it also does index INIT.
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
try {
synchronized (re) {
if (!re.isRemoved() || re.isTombstone()) {
Object oldValue = re.getValueInVM(owner);
final int oldSize = owner.calculateRegionEntryValueSize(re);
// Create an entry event only if the calling context is
// a receipt of a TXCommitMessage AND there are callbacks installed
// for this region
boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady || inRI);
@Released EntryEventImpl cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
if (owner.isUsedForPartitionedRegionBucket()) {
txHandleWANEvent(owner, cbEvent, txEntryState);
}
cbEvent.setRegionEntry(re);
cbEvent.setOldValue(oldValue);
if (isDebugEnabled) {
logger.debug("txApplyDestroy cbEvent={}", cbEvent);
}
txRemoveOldIndexEntry(Operation.DESTROY, re);
if (txEvent != null) {
txEvent.addDestroy(owner, re, re.getKey(), aCallbackArgument);
}
boolean clearOccured = false;
try {
processAndGenerateTXVersionTag(owner, cbEvent, re, txEntryState);
if (inTokenMode) {
if (oldValue == Token.TOMBSTONE) {
owner.unscheduleTombstone(re);
}
re.setValue(owner, Token.DESTROYED);
} else {
if (!re.isTombstone()) {
{
if (shouldPerformConcurrencyChecks(owner, cbEvent) && cbEvent.getVersionTag() != null) {
re.makeTombstone(owner, cbEvent.getVersionTag());
} else {
// fix for bug 43063
re.removePhase1(owner, false);
re.removePhase2();
removeEntry(key, re, false);
}
}
} else {
owner.rescheduleTombstone(re, re.getVersionStamp().asVersionTag());
}
}
EntryLogger.logTXDestroy(_getOwnerObject(), key);
owner.updateSizeOnRemove(key, oldSize);
} catch (RegionClearedException rce) {
clearOccured = true;
}
owner.txApplyDestroyPart2(re, re.getKey(), inTokenMode, clearOccured);
if (invokeCallbacks) {
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, true);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
if (!clearOccured) {
lruEntryDestroy(re);
}
if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
txEntryState.setVersionTag(cbEvent.getVersionTag());
}
} finally {
if (!cbEventInPending)
cbEvent.release();
}
}
}
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
}
} else if (inTokenMode || owner.concurrencyChecksEnabled) {
// treating tokenMode and re == null as same, since we now want to
// generate versions and Tombstones for destroys
boolean dispatchListenerEvent = inTokenMode;
boolean opCompleted = false;
// TODO: if inTokenMode then Token.DESTROYED is ok but what about !inTokenMode because
// owner.concurrencyChecksEnabled? In that case we do not want a DESTROYED token.
RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.DESTROYED);
if (oqlIndexManager != null) {
oqlIndexManager.waitForIndexInit();
}
EntryEventImpl cbEvent = null;
try {
synchronized (newRe) {
RegionEntry oldRe = putEntryIfAbsent(key, newRe);
while (!opCompleted && oldRe != null) {
synchronized (oldRe) {
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(key, oldRe);
oldRe = putEntryIfAbsent(key, newRe);
} else {
try {
boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady || inRI);
cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
cbEvent.setRegionEntry(oldRe);
cbEvent.setOldValue(Token.NOT_AVAILABLE);
if (isDebugEnabled) {
logger.debug("txApplyDestroy token mode cbEvent={}", cbEvent);
}
if (owner.isUsedForPartitionedRegionBucket()) {
txHandleWANEvent(owner, cbEvent, txEntryState);
}
processAndGenerateTXVersionTag(owner, cbEvent, oldRe, txEntryState);
if (invokeCallbacks) {
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, dispatchListenerEvent);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
int oldSize = 0;
boolean wasTombstone = oldRe.isTombstone();
{
if (!wasTombstone) {
oldSize = owner.calculateRegionEntryValueSize(oldRe);
}
}
oldRe.setValue(owner, Token.DESTROYED);
EntryLogger.logTXDestroy(_getOwnerObject(), key);
if (wasTombstone) {
owner.unscheduleTombstone(oldRe);
}
owner.updateSizeOnRemove(oldRe.getKey(), oldSize);
owner.txApplyDestroyPart2(oldRe, oldRe.getKey(), inTokenMode, false);
lruEntryDestroy(oldRe);
} finally {
if (!cbEventInPending)
cbEvent.release();
}
} catch (RegionClearedException rce) {
owner.txApplyDestroyPart2(oldRe, oldRe.getKey(), inTokenMode, true);
}
if (shouldPerformConcurrencyChecks(owner, cbEvent) && cbEvent.getVersionTag() != null) {
oldRe.makeTombstone(owner, cbEvent.getVersionTag());
} else if (!inTokenMode) {
// only remove for NORMAL regions if they do not generate versions see 51781
// fix for bug 43063
oldRe.removePhase1(owner, false);
oldRe.removePhase2();
removeEntry(key, oldRe, false);
}
opCompleted = true;
}
}
}
if (!opCompleted) {
// already has value set to Token.DESTROYED
opCompleted = true;
boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady || inRI);
cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
cbEvent.setRegionEntry(newRe);
cbEvent.setOldValue(Token.NOT_AVAILABLE);
if (isDebugEnabled) {
logger.debug("txApplyDestroy token mode cbEvent={}", cbEvent);
}
if (owner.isUsedForPartitionedRegionBucket()) {
txHandleWANEvent(owner, cbEvent, txEntryState);
}
processAndGenerateTXVersionTag(owner, cbEvent, newRe, txEntryState);
if (invokeCallbacks) {
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, dispatchListenerEvent);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
}
EntryLogger.logTXDestroy(_getOwnerObject(), key);
owner.updateSizeOnCreate(newRe.getKey(), 0);
if (shouldPerformConcurrencyChecks(owner, cbEvent) && cbEvent.getVersionTag() != null) {
newRe.makeTombstone(owner, cbEvent.getVersionTag());
} else if (!inTokenMode) {
// only remove for NORMAL regions if they do not generate versions see 51781
// fix for bug 43063
newRe.removePhase1(owner, false);
newRe.removePhase2();
removeEntry(key, newRe, false);
}
owner.txApplyDestroyPart2(newRe, newRe.getKey(), inTokenMode, false);
// Note no need for LRU work since the entry is destroyed
// and will be removed when gii completes
} finally {
if (!cbEventInPending)
cbEvent.release();
}
}
if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
txEntryState.setVersionTag(cbEvent.getVersionTag());
}
}
} catch (RegionClearedException e) {
// TODO
} finally {
if (oqlIndexManager != null) {
oqlIndexManager.countDownIndexUpdaters();
}
}
} else if (re == null) {
// Fix bug#43594
// In cases where bucket region is re-created, it may so happen that
// the destroy is already applied on the Initial image provider, thus
// causing region entry to be absent.
// Notify clients with client events.
@Released EntryEventImpl cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
try {
if (owner.isUsedForPartitionedRegionBucket()) {
txHandleWANEvent(owner, cbEvent, txEntryState);
}
switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
if (pendingCallbacks == null) {
owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, false);
} else {
pendingCallbacks.add(cbEvent);
cbEventInPending = true;
}
} finally {
if (!cbEventInPending)
cbEvent.release();
}
}
} catch (DiskAccessException dae) {
owner.handleDiskAccessException(dae);
throw dae;
}
}
use of org.apache.geode.cache.query.internal.index.IndexManager in project geode by apache.
the class PartitionedRegion method populateEmptyIndexes.
private boolean populateEmptyIndexes(Set<Index> indexes, HashMap<String, Exception> exceptionsMap) {
boolean throwException = false;
if (getDataStore() != null && indexes.size() > 0) {
Set localBuckets = getDataStore().getAllLocalBuckets();
Iterator it = localBuckets.iterator();
while (it.hasNext()) {
Map.Entry entry = (Map.Entry) it.next();
Region bucket = (Region) entry.getValue();
if (bucket == null) {
continue;
}
IndexManager bucketIndexManager = IndexUtils.getIndexManager(bucket, true);
Set<Index> bucketIndexes = getBucketIndexesForPRIndexes(bucket, indexes);
try {
bucketIndexManager.populateIndexes(bucketIndexes);
} catch (MultiIndexCreationException ex) {
exceptionsMap.putAll(ex.getExceptionsMap());
throwException = true;
}
}
}
return throwException;
}
use of org.apache.geode.cache.query.internal.index.IndexManager in project geode by apache.
the class PartitionedRegion method removeIndexes.
/**
* Removes all the indexes on this partitioned regions instance and send remove index message
*/
public int removeIndexes(boolean remotelyOriginated) throws CacheException, ForceReattemptException {
int numBuckets = 0;
if (!this.hasPartitionedIndex || this.indexes.isEmpty()) {
if (logger.isDebugEnabled()) {
logger.debug("This partitioned regions does not have any index : {}", this);
}
return numBuckets;
}
this.hasPartitionedIndex = false;
logger.info(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_REMOVING_ALL_THE_INDEXES_ON_THIS_PARITITION_REGION__0, this));
try {
for (Object bucketEntryObject : dataStore.getAllLocalBuckets()) {
LocalRegion bucket = null;
Map.Entry bucketEntry = (Map.Entry) bucketEntryObject;
bucket = (LocalRegion) bucketEntry.getValue();
if (bucket != null) {
bucket.waitForData();
IndexManager indexMang = IndexUtils.getIndexManager(bucket, false);
if (indexMang != null) {
indexMang.removeIndexes();
numBuckets++;
if (logger.isDebugEnabled()) {
logger.debug("Removed all the indexes on bucket {}", bucket);
}
}
}
}
// ends while
if (logger.isDebugEnabled()) {
logger.debug("Removed this many indexes on the buckets : {}", numBuckets);
}
RemoveIndexesMessage.RemoveIndexesResponse response;
if (!remotelyOriginated) {
logger.info(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_SENDING_REMOVEINDEX_MESSAGE_TO_ALL_THE_PARTICIPATING_PRS));
response = (RemoveIndexesMessage.RemoveIndexesResponse) RemoveIndexesMessage.send(this, null, true);
if (null != response) {
response.waitForResults();
logger.info(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_DONE_WATING_FOR_REMOVE_INDEX));
if (logger.isDebugEnabled()) {
logger.debug("Total number of buckets which removed indexes , locally : {} and remotely removed : {} and the total number of remote buckets : {}", numBuckets, response.getRemoteRemovedIndexes(), response.getTotalRemoteBuckets());
}
}
}
this.indexManager.removeIndexes();
return numBuckets;
} finally // outer try block
{
this.indexes.clear();
}
}
use of org.apache.geode.cache.query.internal.index.IndexManager in project geode by apache.
the class LimitClauseJUnitTest method tearDown.
@After
public void tearDown() throws Exception {
CacheUtils.closeCache();
IndexManager indexManager = ((LocalRegion) region).getIndexManager();
if (indexManager != null)
indexManager.destroy();
}
Aggregations