use of org.apache.geode.cache.EntryDestroyedException in project geode by apache.
the class CompiledOperation method evaluate.
public Object evaluate(ExecutionContext context) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException {
CompiledValue rcvr = getReceiver(context);
Object result;
Object evalRcvr;
if (rcvr == null) {
// must be intended as implicit iterator operation
// see if it's an implicit operation name
RuntimeIterator rcvrItr = context.resolveImplicitOperationName(this.methodName, this.args.size(), true);
evalRcvr = rcvrItr.evaluate(context);
/*
* // evaluate on current iteration of collection if (rcvrItr != null) { result =
* eval0(rcvrItr.evaluate(context), rcvrItr.getElementType().resolveClass(), context); }
*
* // function call: no functions implemented except keywords in the grammar throw new
* TypeMismatchException(LocalizedStrings.CompiledOperation_COULD_NOT_RESOLVE_METHOD_NAMED_0.
* toLocalizedString(this.methodName));
*/
} else {
// if not null, then explicit receiver
evalRcvr = rcvr.evaluate(context);
}
// short circuit null immediately
if (evalRcvr == null) {
return QueryService.UNDEFINED;
}
if (context.isCqQueryContext() && evalRcvr instanceof Region.Entry) {
Region.Entry re = (Region.Entry) evalRcvr;
if (re.isDestroyed()) {
return QueryService.UNDEFINED;
}
try {
evalRcvr = re.getValue();
} catch (EntryDestroyedException ede) {
// throw EntryDestroyedException if the value becomes null.
return QueryService.UNDEFINED;
}
}
// check if the receiver is the iterator, in which
// case we resolve the method on the constraint rather
// than the runtime type of the receiver
Class resolveClass = null;
// if (resolveClass == null)
if (evalRcvr instanceof PdxInstance) {
String className = ((PdxInstance) evalRcvr).getClassName();
try {
resolveClass = InternalDataSerializer.getCachedClass(className);
} catch (ClassNotFoundException cnfe) {
throw new QueryInvocationTargetException(cnfe);
}
} else if (evalRcvr instanceof PdxString) {
resolveClass = String.class;
} else {
resolveClass = evalRcvr.getClass();
}
result = eval0(evalRcvr, resolveClass, context);
// }
// check for PR substitution
// check for BucketRegion substitution
PartitionedRegion pr = context.getPartitionedRegion();
if (pr != null && (result instanceof Region)) {
if (pr.getFullPath().equals(((Region) result).getFullPath())) {
result = context.getBucketRegion();
}
}
return result;
}
use of org.apache.geode.cache.EntryDestroyedException in project geode by apache.
the class LocalRegion method saveSnapshot.
@Override
public void saveSnapshot(OutputStream outputStream) throws IOException {
if (isProxy()) {
throw new UnsupportedOperationException(LocalizedStrings.LocalRegion_REGIONS_WITH_DATAPOLICY_0_DO_NOT_SUPPORT_SAVESNAPSHOT.toLocalizedString(getDataPolicy()));
}
checkForNoAccess();
DataOutputStream out = new DataOutputStream(outputStream);
try {
out.writeByte(SNAPSHOT_VERSION);
for (Object entryObject : entrySet(false)) {
Entry entry = (Entry) entryObject;
try {
Object key = entry.getKey();
Object value = entry.getValue();
if (value == Token.TOMBSTONE) {
continue;
}
DataSerializer.writeObject(key, out);
if (value == null) {
// fix for bug 33311
NonTXEntry lre = (NonTXEntry) entry;
RegionEntry re = lre.getRegionEntry();
// OFFHEAP: incrc, copy info heap cd for serialization, decrc
value = re.getValue(this);
if (value == Token.INVALID) {
out.writeByte(SNAPSHOT_VALUE_INVALID);
} else if (value == Token.LOCAL_INVALID) {
out.writeByte(SNAPSHOT_VALUE_LOCAL_INVALID);
} else {
out.writeByte(SNAPSHOT_VALUE_OBJ);
DataSerializer.writeObject(value, out);
}
} else {
out.writeByte(SNAPSHOT_VALUE_OBJ);
DataSerializer.writeObject(value, out);
}
} catch (EntryDestroyedException ignore) {
// continue to next entry
}
}
// write NULL terminator
DataSerializer.writeObject(null, out);
} finally {
out.close();
}
}
use of org.apache.geode.cache.EntryDestroyedException in project geode by apache.
the class LocalRegion method clearViaList.
/**
* Do localDestroy on a list of keys, if they exist
*
* @param keys the list of arrays of keys to invalidate
* @see #registerInterest(Object)
*/
private void clearViaList(List keys) {
for (Object entryObject : entrySet(false)) {
Entry entry = (Entry) entryObject;
try {
Object entryKey = entry.getKey();
boolean match = false;
for (Object key : keys) {
if (entryKey.equals(key)) {
match = true;
break;
}
}
// for
if (!match) {
continue;
}
localDestroyNoCallbacks(entryKey);
} catch (EntryDestroyedException ignore) {
// ignore to fix bug 35534
}
}
}
use of org.apache.geode.cache.EntryDestroyedException in project geode by apache.
the class OverflowOplog method getBytesAndBits.
/**
* Returns the unserialized bytes and bits for the given Entry. If Oplog is destroyed while
* querying, then the DiskRegion is queried again to obatin the value This method should never get
* invoked for an entry which has been destroyed
*
* @since GemFire 3.2.1
* @param id The DiskId for the entry @param offset The offset in this OpLog where the entry is
* present. @param faultingIn @param bitOnly boolean indicating whether to extract just the
* UserBit or UserBit with value @return BytesAndBits object wrapping the value & user bit
*/
public BytesAndBits getBytesAndBits(DiskRegionView dr, DiskId id, boolean faultingIn, boolean bitOnly) {
OverflowOplog retryOplog = null;
long offset = 0;
synchronized (id) {
int opId = (int) id.getOplogId();
if (opId != getOplogId()) {
// the oplog changed on us so we need to do a recursive
// call after unsyncing
retryOplog = this.getOplogSet().getChild(opId);
} else {
// fetch this while synced so it will be consistent with oplogId
offset = id.getOffsetInOplog();
}
}
if (retryOplog != null) {
return retryOplog.getBytesAndBits(dr, id, faultingIn, bitOnly);
}
BytesAndBits bb = null;
long start = this.stats.startRead();
// the data is present in the current oplog file.
if (offset == -1) {
// Since it is given that a get operation has alreadty
// taken a
// lock on an entry , no put operation could have modified the
// oplog ID
// there fore synchronization is not needed
// synchronized (id) {
offset = id.getOffsetInOplog();
}
// is still open) we can retrieve the value from this oplog.
try {
bb = basicGet(dr, offset, bitOnly, id.getValueLength(), id.getUserBits());
} catch (DiskAccessException dae) {
logger.error(LocalizedMessage.create(LocalizedStrings.Oplog_OPLOGBASICGET_ERROR_IN_READING_THE_DATA_FROM_DISK_FOR_DISK_ID_HAVING_DATA_AS_0, id), dae);
throw dae;
}
if (bb == null) {
throw new EntryDestroyedException(LocalizedStrings.Oplog_NO_VALUE_WAS_FOUND_FOR_ENTRY_WITH_DISK_ID_0_ON_A_REGION_WITH_SYNCHRONOUS_WRITING_SET_TO_1.toLocalizedString(new Object[] { id, dr.isSync() }));
}
if (bitOnly) {
dr.endRead(start, this.stats.endRead(start, 1), 1);
} else {
dr.endRead(start, this.stats.endRead(start, bb.getBytes().length), bb.getBytes().length);
}
return bb;
}
use of org.apache.geode.cache.EntryDestroyedException in project geode by apache.
the class Oplog method getBytesAndBits.
/**
* Returns the unserialized bytes and bits for the given Entry. If Oplog is destroyed while
* querying, then the DiskRegion is queried again to obatin the value This method should never get
* invoked for an entry which has been destroyed
*
* @since GemFire 3.2.1
* @param id The DiskId for the entry @param offset The offset in this OpLog where the entry is
* present. @param faultingIn @param bitOnly boolean indicating whether to extract just the
* UserBit or UserBit with value @return BytesAndBits object wrapping the value & user bit
*/
public BytesAndBits getBytesAndBits(DiskRegionView dr, DiskId id, boolean faultingIn, boolean bitOnly) {
Oplog retryOplog = null;
long offset = 0;
synchronized (id) {
long opId = id.getOplogId();
if (opId != getOplogId()) {
// the oplog changed on us so we need to do a recursive
// call after unsyncing
retryOplog = getOplogSet().getChild(opId);
} else {
// fetch this while synced so it will be consistent with oplogId
offset = id.getOffsetInOplog();
}
}
if (retryOplog != null) {
return retryOplog.getBytesAndBits(dr, id, faultingIn, bitOnly);
}
BytesAndBits bb = null;
long start = this.stats.startRead();
// the data is present in the current oplog file.
if (offset == -1) {
// Since it is given that a get operation has alreadty
// taken a
// lock on an entry , no put operation could have modified the
// oplog ID
// there fore synchronization is not needed
offset = id.getOffsetInOplog();
}
// is still open) we can retrieve the value from this oplog.
try {
bb = basicGet(dr, offset, bitOnly, id.getValueLength(), id.getUserBits());
} catch (DiskAccessException dae) {
logger.error(LocalizedMessage.create(LocalizedStrings.Oplog_OPLOGBASICGET_ERROR_IN_READING_THE_DATA_FROM_DISK_FOR_DISK_ID_HAVING_DATA_AS_0, id), dae);
throw dae;
}
if (bb == null) {
throw new EntryDestroyedException(LocalizedStrings.Oplog_NO_VALUE_WAS_FOUND_FOR_ENTRY_WITH_DISK_ID_0_ON_A_REGION_WITH_SYNCHRONOUS_WRITING_SET_TO_1.toLocalizedString(new Object[] { id, dr.isSync() }));
}
if (bitOnly) {
dr.endRead(start, this.stats.endRead(start, 1), 1);
} else {
dr.endRead(start, this.stats.endRead(start, bb.getBytes().length), bb.getBytes().length);
}
return bb;
}
Aggregations