use of org.apache.geode.internal.offheap.annotations.Retained in project geode by apache.
the class DistributedRemoveAllOperation method getEventForPosition.
@Unretained
public EntryEventImpl getEventForPosition(int position) {
RemoveAllEntryData entry = this.removeAllData[position];
if (entry == null) {
return null;
}
if (entry.event != null) {
return entry.event;
}
LocalRegion region = (LocalRegion) this.event.getRegion();
// owned by this.removeAllData once entry.event = ev is done
@Retained EntryEventImpl ev = EntryEventImpl.create(region, entry.getOp(), entry.getKey(), null, /* value */
this.event.getCallbackArgument(), false, /* originRemote */
this.event.getDistributedMember(), this.event.isGenerateCallbacks(), entry.getEventID());
boolean returnedEv = false;
try {
ev.setPossibleDuplicate(entry.isPossibleDuplicate());
ev.setIsRedestroyedEntry(entry.getRedestroyedEntry());
if (entry.versionTag != null && region.concurrencyChecksEnabled) {
VersionSource id = entry.versionTag.getMemberID();
if (id != null) {
entry.versionTag.setMemberID(ev.getRegion().getVersionVector().getCanonicalId(id));
}
ev.setVersionTag(entry.versionTag);
}
entry.event = ev;
returnedEv = true;
ev.setOldValue(entry.getOldValue());
CqService cqService = region.getCache().getCqService();
if (cqService.isRunning() && !entry.getOp().isCreate() && !ev.hasOldValue()) {
ev.setOldValueForQueryProcessing();
}
ev.setInvokePRCallbacks(!entry.isNotifyOnly());
if (getBaseEvent().getContext() != null) {
ev.setContext(getBaseEvent().getContext());
}
ev.callbacksInvoked(entry.isCallbacksInvoked());
ev.setTailKey(entry.getTailKey());
return ev;
} finally {
if (!returnedEv) {
ev.release();
}
}
}
use of org.apache.geode.internal.offheap.annotations.Retained in project geode by apache.
the class TXRmtEvent method createEvent.
@Retained
private EntryEventImpl createEvent(LocalRegion r, Operation op, RegionEntry re, Object key, Object newValue, Object aCallbackArgument) {
DistributedMember originator = ((TXId) this.txId).getMemberId();
// TODO:ASIF :EventID will not be generated with this constructor . Check if
// this is correct
LocalRegion eventRegion = r;
if (r.isUsedForPartitionedRegionBucket()) {
eventRegion = r.getPartitionedRegion();
}
@Retained EntryEventImpl event = // callbackArg
EntryEventImpl.create(// callbackArg
eventRegion, // callbackArg
op, // callbackArg
key, // callbackArg
newValue, // callbackArg
aCallbackArgument, // originRemote
true, originator);
// OFFHEAP: copy into heap cd
event.setOldValue(re.getValueInVM(r));
event.setTransactionId(getTransactionId());
return event;
}
use of org.apache.geode.internal.offheap.annotations.Retained in project geode by apache.
the class SearchLoadAndWriteProcessor method getEventForListener.
/**
* Returns an event for listener notification. The event's operation may be altered to conform to
* the ConcurrentMap implementation specification. If the returned value is not == to the event
* parameter then the caller is responsible for releasing it.
*
* @param event the original event
* @return the original event or a new event having a change in operation
*/
@Retained
private CacheEvent getEventForListener(CacheEvent event) {
Operation op = event.getOperation();
if (!op.isEntry()) {
return event;
} else {
EntryEventImpl r = (EntryEventImpl) event;
@Retained EntryEventImpl result = r;
if (r.isSingleHop()) {
// fix for bug #46130 - origin remote incorrect for one-hop operation in receiver
result = new EntryEventImpl(r);
result.setOriginRemote(true);
// because that's what the sender would use in notifying listeners. bug #46955
if (result.getOperation().isUpdate() && (result.getTransactionId() == null)) {
result.makeCreate();
}
}
if (op == Operation.REPLACE) {
if (result == r)
result = new EntryEventImpl(r);
result.setOperation(Operation.UPDATE);
} else if (op == Operation.PUT_IF_ABSENT) {
if (result == r)
result = new EntryEventImpl(r);
result.setOperation(Operation.CREATE);
} else if (op == Operation.REMOVE) {
if (result == r)
result = new EntryEventImpl(r);
result.setOperation(Operation.DESTROY);
}
return result;
}
}
use of org.apache.geode.internal.offheap.annotations.Retained in project geode by apache.
the class AbstractRegionMap method initialImagePut.
public boolean initialImagePut(final Object key, final long lastModified, Object newValue, final boolean wasRecovered, boolean deferLRUCallback, VersionTag entryVersion, InternalDistributedMember sender, boolean isSynchronizing) {
boolean result = false;
boolean done = false;
boolean cleared = false;
final LocalRegion owner = _getOwner();
if (newValue == Token.TOMBSTONE && !owner.getConcurrencyChecksEnabled()) {
return false;
}
if (owner instanceof HARegion && newValue instanceof CachedDeserializable) {
Object actualVal = ((CachedDeserializable) newValue).getDeserializedValue(null, null);
if (actualVal instanceof HAEventWrapper) {
HAEventWrapper haEventWrapper = (HAEventWrapper) actualVal;
// Key was removed at sender side so not putting it into the HARegion
if (haEventWrapper.getClientUpdateMessage() == null) {
return false;
}
// Getting the instance from singleton CCN..This assumes only one bridge
// server in the VM
HAContainerWrapper haContainer = (HAContainerWrapper) CacheClientNotifier.getInstance().getHaContainer();
if (haContainer == null) {
return false;
}
HAEventWrapper original = null;
// synchronized (haContainer) {
do {
ClientUpdateMessageImpl oldMsg = (ClientUpdateMessageImpl) haContainer.putIfAbsent(haEventWrapper, haEventWrapper.getClientUpdateMessage());
if (oldMsg != null) {
original = (HAEventWrapper) haContainer.getKey(haEventWrapper);
if (original == null) {
continue;
}
synchronized (original) {
if ((HAEventWrapper) haContainer.getKey(original) != null) {
original.incAndGetReferenceCount();
HARegionQueue.addClientCQsAndInterestList(oldMsg, haEventWrapper, haContainer, owner.getName());
haEventWrapper.setClientUpdateMessage(null);
newValue = CachedDeserializableFactory.create(original, ((CachedDeserializable) newValue).getSizeInBytes());
} else {
original = null;
}
}
} else {
// putIfAbsent successful
synchronized (haEventWrapper) {
haEventWrapper.incAndGetReferenceCount();
haEventWrapper.setHAContainer(haContainer);
haEventWrapper.setClientUpdateMessage(null);
haEventWrapper.setIsRefFromHAContainer(true);
}
break;
}
// try until we either get a reference to HAEventWrapper from
// HAContainer or successfully put one into it.
} while (original == null);
/*
* entry = (Map.Entry)haContainer.getEntry(haEventWrapper); if (entry != null) { original =
* (HAEventWrapper)entry.getKey(); original.incAndGetReferenceCount(); } else {
* haEventWrapper.incAndGetReferenceCount(); haEventWrapper.setHAContainer(haContainer);
* haContainer.put(haEventWrapper, haEventWrapper .getClientUpdateMessage());
* haEventWrapper.setClientUpdateMessage(null);
* haEventWrapper.setIsRefFromHAContainer(true); } } if (entry != null) {
* HARegionQueue.addClientCQsAndInterestList(entry, haEventWrapper, haContainer,
* owner.getName()); haEventWrapper.setClientUpdateMessage(null); newValue =
* CachedDeserializableFactory.create(original,
* ((CachedDeserializable)newValue).getSizeInBytes()); }
*/
}
}
try {
RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.REMOVED_PHASE1);
EntryEventImpl event = null;
@Retained @Released Object oldValue = null;
try {
RegionEntry oldRe = null;
synchronized (newRe) {
try {
oldRe = putEntryIfAbsent(key, newRe);
while (!done && oldRe != null) {
synchronized (oldRe) {
if (oldRe.isRemovedPhase2()) {
owner.getCachePerfStats().incRetries();
_getMap().remove(key, oldRe);
oldRe = putEntryIfAbsent(key, newRe);
} else {
boolean acceptedVersionTag = false;
if (entryVersion != null && owner.concurrencyChecksEnabled) {
Assert.assertTrue(entryVersion.getMemberID() != null, "GII entry versions must have identifiers");
try {
boolean isTombstone = (newValue == Token.TOMBSTONE);
// don't reschedule the tombstone if it hasn't changed
boolean isSameTombstone = oldRe.isTombstone() && isTombstone && oldRe.getVersionStamp().asVersionTag().equals(entryVersion);
if (isSameTombstone) {
return true;
}
processVersionTagForGII(oldRe, owner, entryVersion, isTombstone, sender, !wasRecovered || isSynchronizing);
acceptedVersionTag = true;
} catch (ConcurrentCacheModificationException e) {
return false;
}
}
final boolean oldIsTombstone = oldRe.isTombstone();
final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
try {
result = oldRe.initialImagePut(owner, lastModified, newValue, wasRecovered, acceptedVersionTag);
if (result) {
if (oldIsTombstone) {
owner.unscheduleTombstone(oldRe);
if (newValue != Token.TOMBSTONE) {
lruEntryCreate(oldRe);
} else {
lruEntryUpdate(oldRe);
}
}
if (newValue == Token.TOMBSTONE) {
owner.updateSizeOnRemove(key, oldSize);
if (owner.getServerProxy() == null && owner.getVersionVector().isTombstoneTooOld(entryVersion.getMemberID(), entryVersion.getRegionVersion())) {
// the received tombstone has already been reaped, so don't retain it
removeTombstone(oldRe, entryVersion, false, false);
return false;
} else {
owner.scheduleTombstone(oldRe, entryVersion);
lruEntryDestroy(oldRe);
}
} else {
int newSize = owner.calculateRegionEntryValueSize(oldRe);
if (!oldIsTombstone) {
owner.updateSizeOnPut(key, oldSize, newSize);
} else {
owner.updateSizeOnCreate(key, newSize);
}
EntryLogger.logInitialImagePut(_getOwnerObject(), key, newValue);
}
}
if (owner.getIndexManager() != null) {
// as the update could not locate the old key
if (!oldRe.isRemoved()) {
owner.getIndexManager().updateIndexes(oldRe, IndexManager.REMOVE_ENTRY, IndexProtocol.BEFORE_UPDATE_OP);
}
owner.getIndexManager().updateIndexes(oldRe, oldRe.isRemoved() ? IndexManager.ADD_ENTRY : IndexManager.UPDATE_ENTRY, oldRe.isRemoved() ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
}
done = true;
} finally {
if (event != null) {
event.release();
event = null;
}
}
}
}
}
if (!done) {
boolean versionTagAccepted = false;
if (entryVersion != null && owner.concurrencyChecksEnabled) {
Assert.assertTrue(entryVersion.getMemberID() != null, "GII entry versions must have identifiers");
try {
boolean isTombstone = (newValue == Token.TOMBSTONE);
processVersionTagForGII(newRe, owner, entryVersion, isTombstone, sender, !wasRecovered || isSynchronizing);
versionTagAccepted = true;
} catch (ConcurrentCacheModificationException e) {
return false;
}
}
result = newRe.initialImageInit(owner, lastModified, newValue, true, wasRecovered, versionTagAccepted);
try {
if (result) {
if (newValue == Token.TOMBSTONE) {
owner.scheduleTombstone(newRe, entryVersion);
} else {
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(newRe));
EntryLogger.logInitialImagePut(_getOwnerObject(), key, newValue);
lruEntryCreate(newRe);
}
incEntryCount(1);
}
// Update local indexes
if (owner.getIndexManager() != null) {
// the update could not locate the old key
if (oldRe != null && !oldRe.isRemoved()) {
owner.getIndexManager().updateIndexes(oldRe, IndexManager.REMOVE_ENTRY, IndexProtocol.BEFORE_UPDATE_OP);
}
owner.getIndexManager().updateIndexes(newRe, newRe.isRemoved() ? IndexManager.REMOVE_ENTRY : IndexManager.UPDATE_ENTRY, newRe.isRemoved() ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
}
done = true;
} finally {
if (event != null) {
event.release();
event = null;
}
}
}
} finally {
if (done && result) {
initialImagePutEntry(newRe);
}
if (!done) {
removeEntry(key, newRe, false);
if (owner.getIndexManager() != null) {
owner.getIndexManager().updateIndexes(newRe, IndexManager.REMOVE_ENTRY, IndexProtocol.OTHER_OP);
}
}
}
}
// synchronized
} finally {
if (event != null)
event.release();
OffHeapHelper.release(oldValue);
}
} catch (RegionClearedException rce) {
// Asif: do not issue any sort of callbacks
done = false;
cleared = true;
} catch (QueryException qe) {
done = false;
cleared = true;
} finally {
if (done && !deferLRUCallback) {
lruUpdateCallback();
} else if (!cleared) {
resetThreadLocals();
}
}
return result;
}
use of org.apache.geode.internal.offheap.annotations.Retained in project geode by apache.
the class AbstractRegionMap method setOldValueInEvent.
// PRECONDITION: caller must be synced on re
private void setOldValueInEvent(EntryEventImpl event, RegionEntry re, boolean cacheWrite, boolean requireOldValue) {
boolean needToSetOldValue = cacheWrite || requireOldValue || event.getOperation().guaranteesOldValue();
if (needToSetOldValue) {
if (event.getOperation().guaranteesOldValue()) {
// In these cases we want to even get the old value from disk if it is not in memory
ReferenceCountHelper.skipRefCountTracking();
@Released Object oldValueInVMOrDisk = re.getValueOffHeapOrDiskWithoutFaultIn(event.getLocalRegion());
ReferenceCountHelper.unskipRefCountTracking();
try {
event.setOldValue(oldValueInVMOrDisk, needToSetOldValue);
} finally {
OffHeapHelper.releaseWithNoTracking(oldValueInVMOrDisk);
}
} else {
// In these cases only need the old value if it is in memory
ReferenceCountHelper.skipRefCountTracking();
@Retained @Released Object // OFFHEAP: re
oldValueInVM = re._getValueRetain(event.getLocalRegion(), true);
// synced so can use
// its ref.
ReferenceCountHelper.unskipRefCountTracking();
try {
event.setOldValue(oldValueInVM, needToSetOldValue);
} finally {
OffHeapHelper.releaseWithNoTracking(oldValueInVM);
}
}
} else {
// if the old value is in memory then if it is a GatewaySenderEventImpl then
// we want to set the old value.
@Unretained Object // OFFHEAP _getValue is ok since re is synced and we only use it
ov = re._getValue();
// we don't need to worry about ov being compressed.
if (ov instanceof GatewaySenderEventImpl) {
event.setOldValue(ov, true);
}
}
}
Aggregations