use of org.apache.geode.internal.offheap.annotations.Released in project geode by apache.
the class LocalRegion method basicBridgePutAll.
/**
* Called on a bridge server when it has a received a putAll command from a client.
*
* @param map a map of key->value for the entries we are putting
* @param retryVersions a map of key->version tag. If any of the entries are the result of a
* retried client event, we need to make sure we send the original version tag along with
* the event.
* @param callbackArg callback argument from client
*/
public VersionedObjectList basicBridgePutAll(Map map, Map<Object, VersionTag> retryVersions, ClientProxyMembershipID memberId, EventID eventId, boolean skipCallbacks, Object callbackArg) throws TimeoutException, CacheWriterException {
long startPut = CachePerfStats.getStatTime();
if (isGatewaySenderEnabled()) {
callbackArg = new GatewaySenderEventCallbackArgument(callbackArg);
}
@Released final EntryEventImpl event = EntryEventImpl.create(this, Operation.PUTALL_CREATE, null, null, /* new value */
callbackArg, false, /* origin remote */
memberId.getDistributedMember(), !skipCallbacks, /* generateCallbacks */
eventId);
try {
event.setContext(memberId);
DistributedPutAllOperation putAllOp = new DistributedPutAllOperation(event, map.size(), true);
try {
VersionedObjectList result = basicPutAll(map, putAllOp, retryVersions);
getCachePerfStats().endPutAll(startPut);
return result;
} finally {
putAllOp.freeOffHeapResources();
}
} finally {
event.release();
}
}
use of org.apache.geode.internal.offheap.annotations.Released in project geode by apache.
the class LocalRegion method findObjectInSystem.
/**
*
* Search for the value in a server (if one exists), then try a loader.
*
* If we find a value, we put it in the cache.
*
* @param preferCD return the CacheDeserializable, if that's what the value is.
* @param requestingClient the client making the request, if any
* @param clientEvent the client's event, if any. If not null, we set the version tag
* @return the deserialized value
*/
protected Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate, TXStateInterface tx, boolean generateCallbacks, Object localValue, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones) throws CacheLoaderException, TimeoutException {
final Object key = keyInfo.getKey();
final Object aCallbackArgument = keyInfo.getCallbackArg();
Object value = null;
boolean fromServer = false;
VersionTagHolder holder = null;
/*
* First lets try the server
*/
ServerRegionProxy mySRP = getServerProxy();
if (mySRP != null) {
holder = new VersionTagHolder();
value = mySRP.get(key, aCallbackArgument, holder);
fromServer = value != null;
}
/*
* If we didn't get anything from the server, try the loader
*/
if (!fromServer || value == Token.TOMBSTONE) {
// copy into local var to prevent race condition
CacheLoader loader = basicGetLoader();
if (loader != null) {
final LoaderHelper loaderHelper = this.loaderHelperFactory.createLoaderHelper(key, aCallbackArgument, false, /* netSearchAllowed */
true, /* netloadAllowed */
null);
CachePerfStats stats = getCachePerfStats();
long statStart = stats.startLoad();
try {
value = loader.load(loaderHelper);
fromServer = false;
} finally {
stats.endLoad(statStart);
}
}
}
// have concurrency checks enabled
if (fromServer && value == Token.TOMBSTONE && !this.concurrencyChecksEnabled) {
value = null;
}
/*
* If we got a value back, let's put it in the cache.
*/
RegionEntry re = null;
if (value != null && !isMemoryThresholdReachedForLoad()) {
long startPut = CachePerfStats.getStatTime();
validateKey(key);
Operation op;
if (isCreate) {
op = Operation.LOCAL_LOAD_CREATE;
} else {
op = Operation.LOCAL_LOAD_UPDATE;
}
@Released EntryEventImpl event = EntryEventImpl.create(this, op, key, value, aCallbackArgument, false, getMyId(), generateCallbacks);
try {
// already one there with the same version
if (fromServer) {
if (alreadyInvalid(key, event)) {
return null;
}
event.setFromServer(fromServer);
event.setVersionTag(holder.getVersionTag());
if (clientEvent != null) {
clientEvent.setVersionTag(holder.getVersionTag());
}
}
// the value to the server
if (!fromServer) {
event.setNewEventId(this.cache.getDistributedSystem());
}
try {
try {
re = basicPutEntry(event, 0L);
if (!fromServer && clientEvent != null) {
clientEvent.setVersionTag(event.getVersionTag());
clientEvent.isConcurrencyConflict(event.isConcurrencyConflict());
}
if (fromServer && event.getRawNewValue() == Token.TOMBSTONE) {
// tombstones are destroyed entries
return null;
}
} catch (ConcurrentCacheModificationException ignore) {
// this means the value attempted to overwrite a newer modification and was rejected
if (logger.isDebugEnabled()) {
logger.debug("caught concurrent modification attempt when applying {}", event);
}
notifyBridgeClients(event);
}
if (!getDataView().isDeferredStats()) {
getCachePerfStats().endPut(startPut, event.isOriginRemote());
}
} catch (CacheWriterException cwe) {
if (logger.isDebugEnabled()) {
logger.debug("findObjectInSystem: writer exception putting entry {}", event, cwe);
}
}
} finally {
event.release();
}
}
if (isCreate) {
recordMiss(re, key);
}
return value;
}
use of org.apache.geode.internal.offheap.annotations.Released in project geode by apache.
the class Oplog method getBytesAndBitsForCompaction.
/**
* This function retrieves the value for an entry being compacted subject to entry referencing the
* oplog being compacted. Attempt is made to retrieve the value from in memory , if available,
* else from asynch buffers ( if asynch mode is enabled), else from the Oplog being compacted. It
* is invoked from switchOplog as well as OplogCompactor's compact function.
*
* @param entry DiskEntry being compacted referencing the Oplog being compacted
* @param wrapper Object of type BytesAndBitsForCompactor. The data if found is set in the wrapper
* Object. The wrapper Object also contains the user bit associated with the entry
* @return boolean false indicating that entry need not be compacted. If true it means that
* wrapper has been appropriately filled with data
*/
private boolean getBytesAndBitsForCompaction(DiskRegionView dr, DiskEntry entry, BytesAndBitsForCompactor wrapper) {
// caller is synced on did
DiskId did = entry.getDiskId();
byte userBits = 0;
long oplogOffset = did.getOffsetInOplog();
ReferenceCountHelper.skipRefCountTracking();
@Retained @Released Object value = entry._getValueRetain(dr, true);
ReferenceCountHelper.unskipRefCountTracking();
boolean foundData = false;
if (value == null) {
// If the mode is synch it is guaranteed to be present in the disk
foundData = basicGetForCompactor(dr, oplogOffset, false, did.getValueLength(), did.getUserBits(), wrapper);
// it is impossible for this oplogId to change.
if (did.getOplogId() != getOplogId()) {
// if it is not then no need to compact it
return false;
} else {
// then we should have found data
assert foundData : "compactor get failed on oplog#" + getOplogId();
}
userBits = wrapper.getBits();
if (EntryBits.isAnyInvalid(userBits)) {
if (EntryBits.isInvalid(userBits)) {
wrapper.setData(DiskEntry.INVALID_BYTES, userBits, DiskEntry.INVALID_BYTES.length, false);
} else {
wrapper.setData(DiskEntry.LOCAL_INVALID_BYTES, userBits, DiskEntry.LOCAL_INVALID_BYTES.length, false);
}
} else if (EntryBits.isTombstone(userBits)) {
wrapper.setData(DiskEntry.TOMBSTONE_BYTES, userBits, DiskEntry.TOMBSTONE_BYTES.length, false);
}
if (EntryBits.isWithVersions(did.getUserBits())) {
userBits = EntryBits.setWithVersions(userBits, true);
}
} else {
foundData = true;
userBits = 0;
if (EntryBits.isRecoveredFromDisk(did.getUserBits())) {
userBits = EntryBits.setRecoveredFromDisk(userBits, true);
}
if (EntryBits.isWithVersions(did.getUserBits())) {
userBits = EntryBits.setWithVersions(userBits, true);
}
// (the compactor) are writing the value out to disk.
if (value == Token.INVALID) {
userBits = EntryBits.setInvalid(userBits, true);
wrapper.setData(DiskEntry.INVALID_BYTES, userBits, DiskEntry.INVALID_BYTES.length, false);
} else if (value == Token.LOCAL_INVALID) {
userBits = EntryBits.setLocalInvalid(userBits, true);
wrapper.setData(DiskEntry.LOCAL_INVALID_BYTES, userBits, DiskEntry.LOCAL_INVALID_BYTES.length, false);
} else if (value == Token.TOMBSTONE) {
userBits = EntryBits.setTombstone(userBits, true);
wrapper.setData(DiskEntry.TOMBSTONE_BYTES, userBits, DiskEntry.TOMBSTONE_BYTES.length, false);
} else if (value instanceof CachedDeserializable) {
CachedDeserializable proxy = (CachedDeserializable) value;
if (proxy instanceof StoredObject) {
@Released StoredObject ohproxy = (StoredObject) proxy;
try {
ohproxy.fillSerializedValue(wrapper, userBits);
} finally {
OffHeapHelper.releaseWithNoTracking(ohproxy);
}
} else {
userBits = EntryBits.setSerialized(userBits, true);
proxy.fillSerializedValue(wrapper, userBits);
}
} else if (value instanceof byte[]) {
byte[] valueBytes = (byte[]) value;
// If the value is already a byte array then the user bit
// is 0, which is the default value of the userBits variable,
// indicating that it is non serialized data. Thus it is
// to be used as it is & not to be deserialized to
// convert into Object
wrapper.setData(valueBytes, userBits, valueBytes.length, false);
} else if (Token.isRemoved(value) && value != Token.TOMBSTONE) {
// TODO - RVV - We need to handle tombstones differently here!
if (entry.getDiskId().isPendingAsync()) {
entry.getDiskId().setPendingAsync(false);
try {
getOplogSet().getChild().basicRemove(dr, entry, false, false);
} catch (IOException ex) {
getParent().getCancelCriterion().checkCancelInProgress(ex);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0.toLocalizedString(this.diskFile.getPath()), ex, dr.getName());
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
getParent().getCache().getCancelCriterion().checkCancelInProgress(ie);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0_DUE_TO_FAILURE_IN_ACQUIRING_READ_LOCK_FOR_ASYNCH_WRITING.toLocalizedString(this.diskFile.getPath()), ie, dr.getName());
}
} else {
rmLive(dr, entry);
}
foundData = false;
} else {
userBits = EntryBits.setSerialized(userBits, true);
EntryEventImpl.fillSerializedValue(wrapper, value, userBits);
}
}
if (foundData) {
// since the compactor is writing it out clear the async flag
entry.getDiskId().setPendingAsync(false);
}
return foundData;
}
use of org.apache.geode.internal.offheap.annotations.Released in project geode by apache.
the class LocalRegion method invalidateAllEntries.
/**
* @param rgnEvent the RegionEvent for region invalidation
*/
protected void invalidateAllEntries(RegionEvent rgnEvent) {
Operation operation = Operation.LOCAL_INVALIDATE;
if (rgnEvent.getOperation().isDistributed()) {
operation = Operation.INVALIDATE;
}
// region operation so it is ok to ignore tx state
for (Object keyObject : keySet()) {
try {
// EventID will not be generated by this constructor
@Released EntryEventImpl event = EntryEventImpl.create(this, operation, keyObject, null, null, rgnEvent.isOriginRemote(), rgnEvent.getDistributedMember());
try {
event.setLocalInvalid(!rgnEvent.getOperation().isDistributed());
basicInvalidate(event, false);
} finally {
event.release();
}
} catch (EntryNotFoundException ignore) {
// ignore
}
}
}
use of org.apache.geode.internal.offheap.annotations.Released in project geode by apache.
the class PutAllPRMessage method doLocalPutAll.
/**
* This method is called by both operateOnPartitionedRegion() when processing a remote msg or by
* sendMsgByBucket() when processing a msg targeted to local Jvm. PartitionedRegion Note: It is
* very important that this message does NOT cause any deadlocks as the sender will wait
* indefinitely for the acknowledgment
*
* @param r partitioned region eventSender the endpoint server who received request from client
* lastModified timestamp for last modification
* @return If succeeds, return true, otherwise, throw exception
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings("IMSE_DONT_CATCH_IMSE")
public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember eventSender, long lastModified) throws EntryExistsException, ForceReattemptException, DataLocationException {
boolean didPut = false;
long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT;
if (r.hasServerProxy()) {
clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout();
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage: doLocalPutAll: clientReadTimeOut is {}", clientReadTimeOut);
}
}
DistributedPutAllOperation dpao = null;
@Released EntryEventImpl baseEvent = null;
BucketRegion bucketRegion = null;
PartitionedRegionDataStore ds = r.getDataStore();
InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
try {
if (!notificationOnly) {
// bucketRegion is not null only when !notificationOnly
bucketRegion = ds.getInitializedBucketForId(null, bucketId);
this.versions = new VersionedObjectList(this.putAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
// create a base event and a DPAO for PutAllMessage distributed btw redundant buckets
baseEvent = EntryEventImpl.create(bucketRegion, Operation.PUTALL_CREATE, null, null, this.callbackArg, true, eventSender, !skipCallbacks, true);
// set baseEventId to the first entry's event id. We need the thread id for DACE
baseEvent.setEventId(putAllPRData[0].getEventID());
if (this.bridgeContext != null) {
baseEvent.setContext(this.bridgeContext);
}
baseEvent.setPossibleDuplicate(this.posDup);
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage.doLocalPutAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
}
dpao = new DistributedPutAllOperation(baseEvent, putAllPRDataSize, false);
}
// Fix the updateMsg misorder issue
// Lock the keys when doing postPutAll
Object[] keys = new Object[putAllPRDataSize];
for (int i = 0; i < putAllPRDataSize; ++i) {
keys[i] = putAllPRData[i].getKey();
}
if (!notificationOnly) {
try {
if (putAllPRData.length > 0) {
if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
if (logger.isDebugEnabled()) {
logger.debug("attempting to locate version tags for retried event");
}
// of the previous attempt
for (int i = 0; i < putAllPRDataSize; i++) {
if (putAllPRData[i].versionTag == null) {
putAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(putAllPRData[i].getEventID());
if (putAllPRData[i].versionTag != null) {
putAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember());
}
}
}
}
EventID eventID = putAllPRData[0].getEventID();
ThreadIdentifier membershipID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
bucketRegion.recordBulkOpStart(membershipID, eventID);
}
bucketRegion.waitUntilLocked(keys);
boolean lockedForPrimary = false;
final HashMap succeeded = new HashMap();
PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize);
Object key = keys[0];
try {
bucketRegion.doLockForPrimary(false);
lockedForPrimary = true;
/*
* The real work to be synchronized, it will take long time. We don't worry about
* another thread to send any msg which has the same key in this request, because these
* request will be blocked by foundKey
*/
for (int i = 0; i < putAllPRDataSize; i++) {
@Released EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
try {
key = ev.getKey();
ev.setPutAllOperation(dpao);
// make sure a local update inserts a cache de-serializable
ev.makeSerializedNewValue();
// then in basicPutPart3(), the ev is added into dpao
try {
didPut = r.getDataView().putEntryOnRemote(ev, false, false, null, false, lastModified, true);
if (didPut && logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage.doLocalPutAll:putLocally success for {}", ev);
}
} catch (ConcurrentCacheModificationException e) {
didPut = true;
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage.doLocalPutAll:putLocally encountered concurrent cache modification for {}", ev, e);
}
}
putAllPRData[i].setTailKey(ev.getTailKey());
if (!didPut) {
// make sure the region hasn't gone away
r.checkReadiness();
ForceReattemptException fre = new ForceReattemptException("unable to perform put in PutAllPR, but operation should not fail");
fre.setHash(ev.getKey().hashCode());
throw fre;
} else {
succeeded.put(putAllPRData[i].getKey(), putAllPRData[i].getValue());
this.versions.addKeyAndVersion(putAllPRData[i].getKey(), ev.getVersionTag());
}
} finally {
ev.release();
}
}
// for
} catch (IllegalMonitorStateException ignore) {
throw new ForceReattemptException("unable to get lock for primary, retrying... ");
} catch (CacheWriterException cwe) {
// encounter cacheWriter exception
partialKeys.saveFailedKey(key, cwe);
} finally {
try {
// Only PutAllPRMessage knows if the thread id is fake. Event has no idea.
// So we have to manually set useFakeEventId for this DPAO
dpao.setUseFakeEventId(true);
r.checkReadiness();
bucketRegion.getDataView().postPutAll(dpao, this.versions, bucketRegion);
} finally {
if (lockedForPrimary) {
bucketRegion.doUnlockForPrimary();
}
}
}
if (partialKeys.hasFailure()) {
partialKeys.addKeysAndVersions(this.versions);
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage: partial keys applied, map to bucket {}'s keys: {}. Applied {}", bucketId, Arrays.toString(keys), succeeded);
}
throw new PutAllPartialResultException(partialKeys);
}
} catch (RegionDestroyedException e) {
ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
} finally {
bucketRegion.removeAndNotifyKeys(keys);
}
} else {
for (int i = 0; i < putAllPRDataSize; i++) {
EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
try {
ev.setOriginRemote(true);
if (this.callbackArg != null) {
ev.setCallbackArgument(this.callbackArg);
}
r.invokePutCallbacks(ev.getOperation().isCreate() ? EnumListenerEvent.AFTER_CREATE : EnumListenerEvent.AFTER_UPDATE, ev, r.isInitialized(), true);
} finally {
ev.release();
}
}
}
} finally {
if (baseEvent != null)
baseEvent.release();
if (dpao != null)
dpao.freeOffHeapResources();
}
return true;
}
Aggregations