use of org.apache.geode.cache.TimeoutException in project geode by apache.
the class PartitionedRegion method updateEntryVersionInBucket.
public void updateEntryVersionInBucket(EntryEventImpl event) {
final boolean isDebugEnabled = logger.isDebugEnabled();
final Integer bucketId = event.getKeyInfo().getBucketId();
assert bucketId != KeyInfo.UNKNOWN_BUCKET;
final InternalDistributedMember targetNode = getOrCreateNodeForBucketWrite(bucketId, null);
final int retryAttempts = calcRetry();
int count = 0;
RetryTimeKeeper retryTime = null;
InternalDistributedMember retryNode = targetNode;
while (count <= retryAttempts) {
// It's possible this is a GemFire thread e.g. ServerConnection
// which got to this point because of a distributed system shutdown or
// region closure which uses interrupt to break any sleep() or wait()
// calls
// e.g. waitForPrimary or waitForBucketRecovery
checkShutdown();
if (retryNode == null) {
checkReadiness();
if (retryTime == null) {
retryTime = new RetryTimeKeeper(this.retryTimeout);
}
try {
retryNode = getOrCreateNodeForBucketWrite(bucketId, retryTime);
} catch (TimeoutException ignore) {
if (getRegionAdvisor().isStorageAssignedForBucket(bucketId)) {
// bucket no longer exists
throw new EntryNotFoundException(LocalizedStrings.PartitionedRegion_ENTRY_NOT_FOUND_FOR_KEY_0.toLocalizedString(event.getKey()));
}
// fall out to failed exception
break;
}
if (retryNode == null) {
checkEntryNotFound(event.getKey());
}
continue;
}
final boolean isLocal = (this.localMaxMemory > 0) && retryNode.equals(getMyId());
try {
if (isLocal) {
this.dataStore.updateEntryVersionLocally(bucketId, event);
} else {
updateEntryVersionRemotely(retryNode, bucketId, event);
}
return;
} catch (ConcurrentCacheModificationException e) {
if (isDebugEnabled) {
logger.debug("updateEntryVersionInBucket: caught concurrent cache modification exception", e);
}
event.isConcurrencyConflict(true);
if (isDebugEnabled) {
logger.debug("ConcurrentCacheModificationException received for updateEntryVersionInBucket for bucketId: {}{}{} for event: {} No reattampt is done, returning from here", getPRId(), BUCKET_ID_SEPARATOR, bucketId, event);
}
return;
} catch (ForceReattemptException prce) {
prce.checkKey(event.getKey());
if (isDebugEnabled) {
logger.debug("updateEntryVersionInBucket: retry attempt:{} of {}", count, retryAttempts, prce);
}
checkReadiness();
InternalDistributedMember lastNode = retryNode;
retryNode = getOrCreateNodeForBucketWrite(bucketId, retryTime);
if (lastNode.equals(retryNode)) {
if (retryTime == null) {
retryTime = new RetryTimeKeeper(this.retryTimeout);
}
if (retryTime.overMaximum()) {
break;
}
retryTime.waitToRetryNode();
}
} catch (PrimaryBucketException notPrimary) {
if (isDebugEnabled) {
logger.debug("updateEntryVersionInBucket {} on Node {} not primary", notPrimary.getLocalizedMessage(), retryNode);
}
getRegionAdvisor().notPrimary(bucketId, retryNode);
retryNode = getOrCreateNodeForBucketWrite(bucketId, retryTime);
}
count++;
if (isDebugEnabled) {
logger.debug("updateEntryVersionInBucket: Attempting to resend update version to node {} after {} failed attempts", retryNode, count);
}
}
// while
// No target was found
PartitionedRegionDistributionException e = new PartitionedRegionDistributionException(LocalizedStrings.PartitionedRegion_NO_VM_AVAILABLE_FOR_UPDATE_ENTRY_VERSION_IN_0_ATTEMPTS.toLocalizedString(// Fix for bug 36014
count));
if (!isDebugEnabled) {
logger.warn(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_NO_VM_AVAILABLE_FOR_UPDATE_ENTRY_VERSION_IN_0_ATTEMPTS, count));
} else {
logger.warn(e.getMessage(), e);
}
throw e;
}
use of org.apache.geode.cache.TimeoutException in project geode by apache.
the class PartitionedRegion method getBucketKeys.
/**
* Fetch the keys for the given bucket identifier, if the bucket is local or remote. This version
* of the method allows you to retrieve Tombstone entries as well as undestroyed entries.
*
* @param allowTombstones whether to include destroyed entries in the result
* @return A set of keys from bucketNum or {@link Collections#EMPTY_SET}if no keys can be found.
*/
public Set getBucketKeys(int bucketNum, boolean allowTombstones) {
Integer buck = bucketNum;
final int retryAttempts = calcRetry();
Set ret = null;
int count = 0;
InternalDistributedMember nod = getOrCreateNodeForBucketRead(bucketNum);
RetryTimeKeeper snoozer = null;
while (count <= retryAttempts) {
// It's possible this is a GemFire thread e.g. ServerConnection
// which got to this point because of a distributed system shutdown or
// region closure which uses interrupt to break any sleep() or wait()
// calls
// e.g. waitForPrimary or waitForBucketRecovery
checkShutdown();
if (nod == null) {
if (snoozer == null) {
snoozer = new RetryTimeKeeper(this.retryTimeout);
}
nod = getOrCreateNodeForBucketRead(bucketNum);
// No storage found for bucket, early out preventing hot loop, bug 36819
if (nod == null) {
checkShutdown();
break;
}
count++;
continue;
}
try {
if (nod.equals(getMyId())) {
ret = this.dataStore.getKeysLocally(buck, allowTombstones);
} else {
FetchKeysResponse r = FetchKeysMessage.send(nod, this, buck, allowTombstones);
ret = r.waitForKeys();
}
if (ret != null) {
return ret;
}
} catch (PRLocallyDestroyedException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("getBucketKeys: Encountered PRLocallyDestroyedException");
}
checkReadiness();
} catch (ForceReattemptException prce) {
if (logger.isDebugEnabled()) {
logger.debug("getBucketKeys: attempt:{}", (count + 1), prce);
}
checkReadiness();
if (snoozer == null) {
snoozer = new RetryTimeKeeper(this.retryTimeout);
}
InternalDistributedMember oldNode = nod;
nod = getNodeForBucketRead(buck);
if (nod != null && nod.equals(oldNode)) {
if (snoozer.overMaximum()) {
checkReadiness();
throw new TimeoutException(LocalizedStrings.PartitionedRegion_ATTEMPT_TO_ACQUIRE_PRIMARY_NODE_FOR_READ_ON_BUCKET_0_TIMED_OUT_IN_1_MS.toLocalizedString(new Object[] { getBucketName(buck), snoozer.getRetryTime() }));
}
snoozer.waitToRetryNode();
}
}
count++;
}
if (logger.isDebugEnabled()) {
logger.debug("getBucketKeys: no keys found returning empty set");
}
return Collections.emptySet();
}
use of org.apache.geode.cache.TimeoutException in project geode by apache.
the class TXDistributedDUnitTest method testRemoteCommitFailure.
@Ignore("TODO: Disabled for #51260")
@Test
public void testRemoteCommitFailure() throws Exception {
try {
disconnectAllFromDS();
final String rgnName1 = getUniqueName() + "_1";
final String rgnName2 = getUniqueName() + "_2";
final String diskStoreName = getUniqueName() + "_store";
Host host = Host.getHost(0);
VM origin = host.getVM(0);
VM trouble1 = host.getVM(1);
VM trouble2 = host.getVM(2);
VM noTrouble = host.getVM(3);
CacheSerializableRunnable initRegions = new CacheSerializableRunnable("Initialize no trouble regions") {
@Override
public void run2() {
getCache().createDiskStoreFactory().setDiskDirs(getDiskDirs()).create(diskStoreName);
TXManagerImpl.ALLOW_PERSISTENT_TRANSACTIONS = true;
AttributesFactory af = new AttributesFactory();
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
af.setScope(Scope.DISTRIBUTED_ACK);
af.setDiskStoreName(diskStoreName);
getCache().createRegion(rgnName1, af.create());
getCache().createRegion(rgnName2, af.create());
}
};
origin.invoke(initRegions);
noTrouble.invoke(initRegions);
SerializableRunnable initTroulbeRegions = new CacheSerializableRunnable("Initialize regions that cause trouble") {
@Override
public void run2() {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
InternalRegionArguments ira = new InternalRegionArguments().setTestCallable(new TXTroubleMaker());
try {
getCache().createDiskStoreFactory().setDiskDirs(getDiskDirs()).create(diskStoreName);
TXManagerImpl.ALLOW_PERSISTENT_TRANSACTIONS = true;
AttributesFactory af = new AttributesFactory();
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
af.setScope(Scope.DISTRIBUTED_ACK);
af.setDiskStoreName(diskStoreName);
gfc.createVMRegion(rgnName1, af.create(), ira);
gfc.createVMRegion(rgnName2, af.create(), ira);
gfc.getInternalDistributedSystem().addResourceListener(new ShutdownListener());
} catch (IOException ioe) {
fail(ioe.toString());
} catch (TimeoutException e) {
fail(e.toString());
} catch (ClassNotFoundException e) {
fail(e.toString());
}
}
};
trouble1.invoke(initTroulbeRegions);
trouble2.invoke(initTroulbeRegions);
SerializableRunnable doTransaction = new CacheSerializableRunnable("Run failing transaction") {
@Override
public void run2() {
Cache c = getCache();
Region r1 = c.getRegion(rgnName1);
assertNotNull(r1);
Region r2 = c.getRegion(rgnName2);
assertNotNull(r2);
CacheTransactionManager txmgr = c.getCacheTransactionManager();
txmgr.begin();
r1.put("k1", "k1");
r1.put("k2", "k2");
r1.put(TROUBLE_KEY, TROUBLE_KEY);
r2.put("k1", "k1");
r2.put("k2", "k2");
r2.put(TROUBLE_KEY, TROUBLE_KEY);
try {
txmgr.commit();
fail("Expected an tx incomplete exception");
} catch (CommitIncompleteException yay) {
String msg = yay.getMessage();
// getLogWriter().info("failing exception", yay);
// Each region on a trouble VM should be mentioned (two regions per trouble VM)
int ind = 0, match = 0;
while ((ind = msg.indexOf(rgnName1, ind)) >= 0) {
ind++;
match++;
}
assertEquals(2, match);
ind = match = 0;
while ((ind = msg.indexOf(rgnName2, ind)) >= 0) {
ind++;
match++;
}
assertEquals(2, match);
// DiskAccessExcpetions should be mentioned four times
ind = match = 0;
while ((ind = msg.indexOf(DiskAccessException.class.getName(), ind)) >= 0) {
ind++;
match++;
}
assertEquals(4, match);
}
}
};
IgnoredException ee = null;
try {
ee = IgnoredException.addIgnoredException(DiskAccessException.class.getName() + "|" + CommitIncompleteException.class.getName() + "|" + CommitReplyException.class.getName());
origin.invoke(doTransaction);
} finally {
if (ee != null)
ee.remove();
}
SerializableCallable allowCacheToShutdown = new SerializableCallable() {
@Override
public Object call() throws Exception {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
List<ResourceEventsListener> listeners = cache.getInternalDistributedSystem().getResourceListeners();
for (ResourceEventsListener l : listeners) {
if (l instanceof ShutdownListener) {
ShutdownListener shutListener = (ShutdownListener) l;
shutListener.unblockShutdown();
}
}
return null;
}
};
trouble1.invoke(allowCacheToShutdown);
trouble2.invoke(allowCacheToShutdown);
// Assert proper content on failing VMs
SerializableRunnable assertTroubledContent = new CacheSerializableRunnable("Assert partail commit data") {
@Override
public void run2() {
final Cache c = getCache();
Wait.waitForCriterion(new WaitCriterion() {
@Override
public boolean done() {
return c.getRegion(rgnName1) == null;
}
@Override
public String description() {
return null;
}
}, 30000, 1000, true);
Region r2 = c.getRegion(rgnName2);
assertNull(r2);
}
};
trouble1.invoke(assertTroubledContent);
trouble2.invoke(assertTroubledContent);
// Assert proper content on successful VMs
SerializableRunnable assertSuccessfulContent = new CacheSerializableRunnable("Assert complete commit of data on successful VMs") {
@Override
public void run2() {
Cache c = getCache();
{
Region r1 = c.getRegion(rgnName1);
assertNotNull(r1);
assertEquals("k1", r1.getEntry("k1").getValue());
assertEquals("k2", r1.getEntry("k2").getValue());
assertEquals(TROUBLE_KEY, r1.getEntry(TROUBLE_KEY).getValue());
}
{
Region r2 = c.getRegion(rgnName2);
assertNotNull(r2);
assertEquals("k1", r2.getEntry("k1").getValue());
assertEquals("k2", r2.getEntry("k2").getValue());
assertEquals(TROUBLE_KEY, r2.getEntry(TROUBLE_KEY).getValue());
}
}
};
noTrouble.invoke(assertSuccessfulContent);
// Assert no content on originating VM
SerializableRunnable assertNoContent = new CacheSerializableRunnable("Assert data survives on origin VM") {
@Override
public void run2() {
Cache c = getCache();
{
Region r1 = c.getRegion(rgnName1);
assertNotNull(r1);
assertNotNull(r1.getEntry("k1"));
assertNotNull(r1.getEntry("k2"));
assertNotNull(r1.getEntry(TROUBLE_KEY));
}
{
Region r2 = c.getRegion(rgnName2);
assertNotNull(r2);
assertNotNull(r2.getEntry("k1"));
assertNotNull(r2.getEntry("k2"));
assertNotNull(r2.getEntry(TROUBLE_KEY));
}
}
};
origin.invoke(assertNoContent);
} finally {
Invoke.invokeInEveryVM(new SerializableCallable() {
@Override
public Object call() throws Exception {
TXManagerImpl.ALLOW_PERSISTENT_TRANSACTIONS = false;
return null;
}
});
}
}
use of org.apache.geode.cache.TimeoutException in project geode by apache.
the class SearchAndLoadDUnitTest method testOneHopNetWrite.
@Test
public void testOneHopNetWrite() throws CacheException, InterruptedException {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
final String name = this.getUniqueName() + "Region";
final String objectName = "Object7";
final Integer value = new Integer(483);
final Integer updateValue = new Integer(484);
vm0.invoke(new SerializableRunnable("Create replicated region with cacheWriter") {
public void run() {
netWriteInvoked = false;
operationWasCreate = false;
originWasRemote = false;
writerInvocationCount = 0;
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
factory.setCacheWriter(new CacheWriter() {
public void beforeCreate(EntryEvent e) throws CacheWriterException {
e.getRegion().getCache().getLogger().info("cache writer beforeCreate invoked for " + e);
netWriteInvoked = true;
operationWasCreate = true;
originWasRemote = e.isOriginRemote();
writerInvocationCount++;
return;
}
public void beforeUpdate(EntryEvent e) throws CacheWriterException {
e.getRegion().getCache().getLogger().info("cache writer beforeUpdate invoked for " + e);
netWriteInvoked = true;
operationWasCreate = false;
originWasRemote = e.isOriginRemote();
writerInvocationCount++;
return;
}
public void beforeDestroy(EntryEvent e) throws CacheWriterException {
}
public void beforeRegionDestroy(RegionEvent e) throws CacheWriterException {
}
public void beforeRegionClear(RegionEvent e) throws CacheWriterException {
}
public void close() {
}
});
createRegion(name, factory.create());
} catch (CacheException ex) {
Assert.fail("While creating replicated region", ex);
}
}
});
vm1.invoke(new SerializableRunnable("Create empty Region") {
public void run() {
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.EMPTY);
createRegion(name, factory.create());
} catch (CacheException ex) {
Assert.fail("While creating empty region", ex);
}
}
});
vm1.invoke(new SerializableRunnable("do a put that should be proxied in the other vm and invoke its cache writer") {
public void run() {
try {
getRootRegion().getSubregion(name).put(objectName, value);
} catch (CacheWriterException cwe) {
} catch (TimeoutException te) {
}
}
});
vm0.invoke(new SerializableRunnable("ensure that cache writer was invoked with correct settings in event") {
public void run() {
assertTrue("expected cache writer to be invoked", netWriteInvoked);
assertTrue("expected originRemote to be true", originWasRemote);
assertTrue("expected event to be create", operationWasCreate);
assertEquals("expected only one cache writer invocation", 1, writerInvocationCount);
// set flags for the next test - updating the same key
netWriteInvoked = false;
writerInvocationCount = 0;
}
});
vm1.invoke(new SerializableRunnable("do an update that should be proxied in the other vm and invoke its cache writer") {
public void run() {
try {
getRootRegion().getSubregion(name).put(objectName, updateValue);
} catch (CacheWriterException cwe) {
} catch (TimeoutException te) {
}
}
});
vm0.invoke(new SerializableRunnable("ensure that cache writer was invoked with correct settings in event") {
public void run() {
assertTrue("expected cache writer to be invoked", netWriteInvoked);
assertTrue("expected originRemote to be true", originWasRemote);
assertTrue("expected event to be create", operationWasCreate);
assertEquals("expected only one cache writer invocation", 1, writerInvocationCount);
}
});
}
use of org.apache.geode.cache.TimeoutException in project geode by apache.
the class SearchAndLoadDUnitTest method testNetLoad.
@Test
public void testNetLoad() throws CacheException, InterruptedException {
disconnectAllFromDS();
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
final String name = this.getUniqueName() + "-ACK";
final String objectName = "B";
final Integer value = new Integer(43);
loaderInvoked = false;
remoteLoaderInvoked = false;
vm0.invoke(new SerializableRunnable("Create ACK Region") {
public void run() {
try {
loaderInvoked = false;
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setEarlyAck(false);
// factory.setCacheLoader(new CacheLoader() {
// public Object load(LoaderHelper helper) {
/// loaderInvoked = true;
// return value;
// }
//
// public void close() {
//
// }
// });
Region region = createRegion(name, factory.create());
region.create(objectName, null);
} catch (CacheException ex) {
Assert.fail("While creating ACK region", ex);
}
}
});
vm1.invoke(new SerializableRunnable("Create ACK Region") {
public void run() {
try {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setEarlyAck(false);
factory.setCacheLoader(new CacheLoader() {
public Object load(LoaderHelper helper) {
remoteLoaderInvoked = true;
return value;
}
public void close() {
}
});
createRegion(name, factory.create());
} catch (CacheException ex) {
Assert.fail("While creating ACK region", ex);
}
}
});
vm0.invoke(new SerializableRunnable("Get a value from remote loader") {
public void run() {
for (int i = 0; i < 1; i++) {
try {
Object result = getRootRegion().getSubregion(name).get(objectName);
assertEquals(value, result);
assertEquals(new Boolean(loaderInvoked), Boolean.FALSE);
// getRootRegion().getSubregion(name).invalidate(objectName);
} catch (CacheLoaderException cle) {
Assert.fail("While getting value for ACK region", cle);
}/*
* catch(EntryNotFoundException enfe) { fail("While getting value for ACK region", enfe);
*
* }
*/
catch (TimeoutException te) {
Assert.fail("While getting value for ACK region", te);
}
}
}
});
}
Aggregations