use of org.apache.geode.InternalGemFireError in project geode by apache.
the class SerializerUtil method keyToBytes.
/**
* Convert a key to a byte array.
*/
private static BytesRef keyToBytes(Object key) {
ByteArrayOutputStream buffer = LOCAL_BUFFER.get();
try {
DataOutputStream out = new DataOutputStream(buffer);
DataSerializer.writeObject(key, out);
out.flush();
BytesRef result = new BytesRef(buffer.toByteArray());
buffer.reset();
return result;
} catch (IOException e) {
throw new InternalGemFireError("Unable to serialize key", e);
}
}
use of org.apache.geode.InternalGemFireError in project geode by apache.
the class ShutdownAllRequest method createResponse.
@Override
protected AdminResponse createResponse(DistributionManager dm) {
boolean isToShutdown = hasCache();
if (isToShutdown) {
boolean isSuccess = false;
try {
GemFireCacheImpl.getInstance().shutDownAll();
isSuccess = true;
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable t) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
if (t instanceof InternalGemFireError) {
logger.fatal("DistributedSystem is closed due to InternalGemFireError", t);
} else {
logger.fatal("DistributedSystem is closed due to unexpected exception", t);
}
} finally {
if (!isSuccess) {
InternalDistributedMember me = dm.getDistributionManagerId();
InternalDistributedSystem ids = dm.getSystem();
if (!this.getSender().equals(me)) {
if (ids.isConnected()) {
logger.fatal("ShutdownAllRequest: disconnect distributed without response.");
ids.disconnect();
}
}
}
}
}
return new ShutdownAllResponse(this.getSender(), isToShutdown);
}
use of org.apache.geode.InternalGemFireError in project geode by apache.
the class FilterProfile method registerClientInterest.
/**
* Registers interest in the input region name and key
*
* @param inputClientID The identity of the interested client
* @param interest The key in which to register interest
* @param typeOfInterest the type of interest the client is registering
* @param updatesAsInvalidates whether the client just wants invalidations
* @return a set of the keys that were registered, which may be null
*/
public Set registerClientInterest(Object inputClientID, Object interest, int typeOfInterest, boolean updatesAsInvalidates) {
Set keysRegistered = new HashSet();
operationType opType = null;
Long clientID = getClientIDForMaps(inputClientID);
synchronized (this.interestListLock) {
switch(typeOfInterest) {
case InterestType.KEY:
opType = operationType.REGISTER_KEY;
Map<Object, Set> koi = updatesAsInvalidates ? getKeysOfInterestInv() : getKeysOfInterest();
registerKeyInMap(interest, keysRegistered, clientID, koi);
break;
case InterestType.REGULAR_EXPRESSION:
opType = operationType.REGISTER_PATTERN;
if (((String) interest).equals(".*")) {
Set akc = updatesAsInvalidates ? getAllKeyClientsInv() : getAllKeyClients();
if (akc.add(clientID)) {
keysRegistered.add(interest);
}
} else {
Map<Object, Map<Object, Pattern>> pats = updatesAsInvalidates ? getPatternsOfInterestInv() : getPatternsOfInterest();
registerPatternInMap(interest, keysRegistered, clientID, pats);
}
break;
case InterestType.FILTER_CLASS:
{
opType = operationType.REGISTER_FILTER;
Map<Object, Map> filts = updatesAsInvalidates ? getFiltersOfInterestInv() : getFiltersOfInterest();
registerFilterClassInMap(interest, clientID, filts);
break;
}
default:
throw new InternalGemFireError(LocalizedStrings.CacheClientProxy_UNKNOWN_INTEREST_TYPE.toLocalizedString());
}
// switch
if (this.isLocalProfile && opType != null) {
sendProfileOperation(clientID, opType, interest, updatesAsInvalidates);
}
}
// synchronized
return keysRegistered;
}
use of org.apache.geode.InternalGemFireError in project geode by apache.
the class PartitionedRegionDataStore method getLocally.
/**
* Returns value corresponding to this key.
*
* @param key the key to look for
* @param preferCD
* @param requestingClient the client making the request, or null
* @param clientEvent client's event (for returning version tag)
* @param returnTombstones whether tombstones should be returned
* @param opScopeIsLocal if true then just check local storage for a value; if false then try to
* find the value if it is not local
* @throws ForceReattemptException if bucket region is null
* @return value from the bucket region
* @throws PrimaryBucketException if the locally managed bucket is not primary
* @throws PRLocallyDestroyedException if the PartitionRegion is locally destroyed
*/
public Object getLocally(int bucketId, final Object key, final Object aCallbackArgument, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones, boolean opScopeIsLocal) throws PrimaryBucketException, ForceReattemptException, PRLocallyDestroyedException {
final BucketRegion bucketRegion = getInitializedBucketForId(key, Integer.valueOf(bucketId));
// check for primary (when a loader is present) done deeper in the BucketRegion
Object ret = null;
if (logger.isDebugEnabled()) {
logger.debug("getLocally: key {}) bucketId={}{}{} region {} returnTombstones {} ", key, this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, bucketId, bucketRegion.getName(), returnTombstones);
}
invokeBucketReadHook();
try {
ret = bucketRegion.get(key, aCallbackArgument, true, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones, opScopeIsLocal, false);
checkIfBucketMoved(bucketRegion);
} catch (RegionDestroyedException rde) {
if (this.partitionedRegion.isLocallyDestroyed || this.partitionedRegion.isClosed) {
throw new PRLocallyDestroyedException(rde);
} else {
this.getPartitionedRegion().checkReadiness();
if (bucketRegion.isBucketDestroyed()) {
// bucket moved by rebalance
throw new ForceReattemptException("Bucket removed during get", rde);
} else {
throw new InternalGemFireError("Got region destroyed message, but neither bucket nor PR was destroyed", rde);
}
}
}
return ret;
}
use of org.apache.geode.InternalGemFireError in project geode by apache.
the class ProxyBucketRegion method recoverFromDisk.
public void recoverFromDisk() {
final boolean isDebugEnabled = logger.isDebugEnabled();
RuntimeException exception = null;
if (isDebugEnabled) {
logger.debug("{} coming to recover from disk. wasHosting {}", getFullPath(), persistenceAdvisor.wasHosting());
}
try {
if (persistenceAdvisor.wasHosting()) {
if (isDebugEnabled) {
logger.debug("{} used to host data. Attempting to recover.", getFullPath());
}
CreateBucketResult result;
if (hasPersistentChildRegion()) {
// If this is a parent PR, create the bucket, possibly going over
// redundancy. We need to do this so that we can create the child
// region in this member. This member may have the latest data for the
// child region.
result = partitionedRegion.getDataStore().grabBucket(bid, getDistributionManager().getDistributionManagerId(), true, true, false, null, true);
} else {
if (this.partitionedRegion.isShadowPR() && this.partitionedRegion.getColocatedWith() != null) {
PartitionedRegion colocatedRegion = ColocationHelper.getColocatedRegion(this.partitionedRegion);
if (this.partitionedRegion.getDataPolicy().withPersistence() && !colocatedRegion.getDataPolicy().withPersistence()) {
result = colocatedRegion.getDataStore().grabBucket(bid, getDistributionManager().getDistributionManagerId(), true, true, false, null, true);
if (result.nowExists()) {
result = partitionedRegion.getDataStore().grabBucket(bid, null, true, false, false, null, true);
}
} else {
result = partitionedRegion.getDataStore().grabBucket(bid, null, true, false, false, null, true);
}
} else {
result = partitionedRegion.getDataStore().grabBucket(bid, null, true, false, false, null, true);
}
}
if (result.nowExists()) {
return;
} else if (result != CreateBucketResult.REDUNDANCY_ALREADY_SATISFIED) {
// TODO prpersist - check cache closure, create new error message
this.partitionedRegion.checkReadiness();
throw new InternalGemFireError("Unable to restore the persistent bucket " + this.getName());
}
if (isDebugEnabled) {
logger.debug("{} redundancy is already satisfied, so discarding persisted data. Current hosts {}", getFullPath(), advisor.adviseReplicates());
}
// Destroy the data if we can't create the bucket, or if the redundancy is already satisfied
destroyOfflineData();
}
if (isDebugEnabled) {
logger.debug("{} initializing membership view from peers", getFullPath());
}
persistenceAdvisor.initializeMembershipView();
} catch (DiskAccessException dae) {
this.partitionedRegion.handleDiskAccessException(dae);
throw dae;
} catch (RuntimeException e) {
exception = e;
throw e;
} finally {
persistenceAdvisor.recoveryDone(exception);
}
}
Aggregations