use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class ParallelGatewaySenderQueue method destroyEventFromQueue.
private void destroyEventFromQueue(PartitionedRegion prQ, int bucketId, Object key) {
boolean isPrimary = prQ.getRegionAdvisor().getBucketAdvisor(bucketId).isPrimary();
BucketRegionQueue brq = getBucketRegionQueueByBucketId(prQ, bucketId);
// before destroying a key from it
try {
if (brq != null) {
brq.destroyKey(key);
}
stats.decQueueSize();
} catch (EntryNotFoundException e) {
if (!this.sender.isBatchConflationEnabled() && logger.isDebugEnabled()) {
logger.debug("ParallelGatewaySenderQueue#remove: Got EntryNotFoundException while removing key {} for {} for bucket = {} for GatewaySender {}", key, this, bucketId, this.sender);
}
} catch (ForceReattemptException e) {
if (logger.isDebugEnabled()) {
logger.debug("Bucket :{} moved to other member", bucketId);
}
} catch (PrimaryBucketException e) {
if (logger.isDebugEnabled()) {
logger.debug("Primary bucket :{} moved to other member", bucketId);
}
} catch (RegionDestroyedException e) {
if (logger.isDebugEnabled()) {
logger.debug("Caught RegionDestroyedException attempting to remove key {} from bucket {} in {}", key, bucketId, prQ.getFullPath());
}
}
addRemovedEvent(prQ, bucketId, key);
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class PartitionedRegionSingleNodeOperationsJUnitTest method test003DestroyRegion.
/**
* This is a test for PartitionedRegion destroyRegion() operation.
*/
@Test
public void test003DestroyRegion() throws Exception {
String regionName = "testDestroyRegion";
PartitionedRegion pr = (PartitionedRegion) PartitionedRegionTestHelper.createPartitionedRegion(regionName, String.valueOf(200), 0);
pr.put(new Integer(1), new Integer(1));
pr.get(new Integer(1));
if (pr.isDestroyed()) {
fail("PartitionedRegionSingleNodeOperationsJUnitTest:testDestroyRegion(): Returns true in isDestroyed method, before the region is destroyed");
}
logWriter.info("JDEBUG 1");
pr.destroyRegion();
logWriter.info("JDEBUG 2");
// Validate that the meta-data and bucket regions are cleaned.
assertTrue(pr.isDestroyed());
// assertTrue(pr.getBucket2Node().isEmpty());
Region root = PartitionedRegionHelper.getPRRoot(PartitionedRegionTestHelper.createCache());
// assertNull(PartitionedRegionHelper.getPRConfigRegion(root,
// PartitionedRegionTestHelper.createCache()).get(regionName));
java.util.Iterator regItr = root.subregions(false).iterator();
while (regItr.hasNext()) {
Region rg = (Region) regItr.next();
// System.out.println("Region = " + rg.getName());
assertEquals(rg.getName().indexOf(PartitionedRegionHelper.BUCKET_REGION_PREFIX + pr.getPRId() + "_"), -1);
}
if (!pr.isDestroyed()) {
fail("testDestroyRegion(): " + "Returns false in isDestroyed method, after the region is destroyed");
}
logWriter.info("JDEBUG 3");
try {
pr.put(new Integer(2), new Integer(2));
fail("testdestroyRegion() Expected RegionDestroyedException not thrown");
} catch (RegionDestroyedException ex) {
if (logWriter.fineEnabled()) {
logWriter.fine("testDestroyRegion() got a corect RegionDestroyedException for put() after destroyRegion()");
}
}
logWriter.info("JDEBUG 4");
try {
pr.get(new Integer(2));
fail("testdestroyRegion() - Expected RegionDestroyedException not thrown");
} catch (RegionDestroyedException ex) {
if (logWriter.fineEnabled()) {
logWriter.fine("PartitionedRegionSingleNodeOperationsJUnitTest - " + "testDestroyRegion() got a correct RegionDestroyedException for get() after destroyRegion()");
}
}
logWriter.info("JDEBUG 5");
try {
pr.destroy(new Integer(1));
fail("testdestroyRegion() Expected RegionDestroyedException not thrown");
} catch (RegionDestroyedException ex) {
if (logWriter.fineEnabled()) {
logWriter.fine("PartitionedRegionSingleNodeOperationsJUnitTest - " + "testDestroyRegion() got a correct RegionDestroyedException for destroy() after destroyRegion()");
}
}
logWriter.info("JDEBUG 6");
pr = (PartitionedRegion) PartitionedRegionTestHelper.createPartitionedRegion(regionName, String.valueOf(200), 0);
if (logWriter.fineEnabled()) {
logWriter.fine("PartitionedRegionSingleNodeOperationsJUnitTest - testDestroyRegion():" + " PartitionedRegion with same name as the destroyed region, can be created.");
}
if (logWriter.fineEnabled()) {
logWriter.fine("PartitionedRegionSingleNodeOperationsJUnitTest - testDestroyRegion(): Completed Successfully ...");
}
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class PartitionedRegionDestroyDUnitTest method testDestroyRegion.
@Test
public void testDestroyRegion() throws Exception, Throwable {
Host host = Host.getHost(0);
vm0 = host.getVM(0);
vm1 = host.getVM(1);
vm2 = host.getVM(2);
vm3 = host.getVM(3);
AsyncInvocation async1 = null;
CacheSerializableRunnable createPRs = new CacheSerializableRunnable("createPrRegions") {
public void run2() throws CacheException {
Cache cache = getCache();
for (int i = 0; i < MAX_REGIONS; i++) {
cache.createRegion(PR_PREFIX + i, createRegionAttrsForPR(0, 200));
}
LogWriterUtils.getLogWriter().info("Successfully created " + MAX_REGIONS + " PartitionedRegions.");
}
};
// Create PRs
vm0.invoke(createPRs);
vm1.invoke(createPRs);
vm2.invoke(createPRs);
vm3.invoke(createPRs);
vm1.invoke(new CacheSerializableRunnable("doPutOperations-1") {
public void run2() {
int j = 0;
final String expectedExistsException = RegionDestroyedException.class.getName();
getCache().getLogger().info("<ExpectedException action=add>" + expectedExistsException + "</ExpectedException>");
try {
Cache cache = getCache();
for (; j < MAX_REGIONS; j++) {
PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX + j);
assertNotNull(pr);
// Create enough entries such that all bucket are created, integer keys assumes mod
// distribution
int totalEntries = pr.getTotalNumberOfBuckets() * 2;
for (int k = 0; k < totalEntries; k++) {
pr.put(new Integer(k), PR_PREFIX + k);
}
}
} catch (RegionDestroyedException e) {
// getLogWriter().info (
// "RegionDestroyedException occurred for Region = " + PR_PREFIX + j);
}
getCache().getLogger().info("<ExpectedException action=remove>" + expectedExistsException + "</ExpectedException>");
}
});
async1 = vm2.invokeAsync(new CacheSerializableRunnable("doPutOperations-2") {
public void run2() throws CacheException {
int j = 0;
final String expectedException = RegionDestroyedException.class.getName();
getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
try {
Cache cache = getCache();
// Grab the regions right away, before they get destroyed
// by the other thread
PartitionedRegion[] prs = new PartitionedRegion[MAX_REGIONS];
for (j = 0; j < MAX_REGIONS; j++) {
prs[j] = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX + j);
if (prs[j] == null) {
fail("Region was destroyed before putter could find it");
}
}
for (j = 0; j < MAX_REGIONS; j++) {
PartitionedRegion pr = prs[j];
assertNotNull(pr);
int startEntries = pr.getTotalNumberOfBuckets() * 20;
int endEntries = startEntries + pr.getTotalNumberOfBuckets();
for (int k = startEntries; k < endEntries; k++) {
pr.put(new Integer(k), PR_PREFIX + k);
}
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
fail("interrupted");
}
}
} catch (RegionDestroyedException e) {
LogWriterUtils.getLogWriter().info("RegionDestroyedException occurred for Region = " + PR_PREFIX + j);
}
getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
}
});
ThreadUtils.join(async1, 30 * 1000);
if (async1.exceptionOccurred()) {
Assert.fail("async1 failed", async1.getException());
}
final String expectedExceptions = "org.apache.geode.distributed.internal.ReplyException";
addExceptionTag(expectedExceptions);
// give async a chance to grab the regions...
Wait.pause(1000);
vm0.invoke(new CacheSerializableRunnable("destroyPRRegions") {
public void run2() throws CacheException {
Cache cache = getCache();
for (int i = 0; i < MAX_REGIONS; i++) {
Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX + i);
assertNotNull(pr);
pr.destroyRegion();
assertTrue(pr.isDestroyed());
Region prDes = cache.getRegion(Region.SEPARATOR + PR_PREFIX + i);
assertNull(prDes);
}
}
});
addExceptionTag(expectedExceptions);
CacheSerializableRunnable validateMetaDataAfterDestroy = new CacheSerializableRunnable("validateMetaDataAfterDestroy") {
public void run2() throws CacheException {
InternalCache cache = getCache();
Region rootRegion = PartitionedRegionHelper.getPRRoot(cache);
// Region allPRs = PartitionedRegionHelper.getPRConfigRegion(rootRegion,
// getCache());
int trial = 0;
// verify that all the regions have received the destroy call.
while (trial < 10) {
if (cache.rootRegions().size() > 1) {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
fail("interrupted");
}
trial++;
} else {
break;
}
}
if (cache.rootRegions().size() > 1) {
fail("All Regions Not destroyed. # OF Regions Not Destroyed = " + (cache.rootRegions().size() - 1));
}
// Assert that all PartitionedRegions are gone
assertEquals(0, rootRegion.size());
LogWriterUtils.getLogWriter().info("allPartitionedRegions size() =" + rootRegion.size());
assertEquals("ThePrIdToPR Map size is:" + PartitionedRegion.prIdToPR.size() + " instead of 0", MAX_REGIONS, PartitionedRegion.prIdToPR.size());
LogWriterUtils.getLogWriter().info("PartitionedRegion.prIdToPR.size() =" + PartitionedRegion.prIdToPR.size());
LogWriterUtils.getLogWriter().info("# of Subregions of root Region after destroy call = " + rootRegion.subregions(false).size());
Iterator itr = (rootRegion.subregions(false)).iterator();
while (itr.hasNext()) {
Region rg = (Region) itr.next();
LogWriterUtils.getLogWriter().info("Root Region SubRegionName = " + rg.getName());
// assertIndexDetailsEquals("REGION NAME FOUND:"+rg.getName(),-1,
// rg.getName().indexOf(
// PartitionedRegionHelper.BUCKET_2_NODE_TABLE_PREFIX));
assertEquals("regionFound that should be gone!:" + rg.getName(), -1, rg.getName().indexOf(PartitionedRegionHelper.BUCKET_REGION_PREFIX));
}
}
};
vm0.invoke(validateMetaDataAfterDestroy);
vm1.invoke(validateMetaDataAfterDestroy);
vm2.invoke(validateMetaDataAfterDestroy);
vm3.invoke(validateMetaDataAfterDestroy);
vm0.invoke(createPRs);
vm1.invoke(createPRs);
vm2.invoke(createPRs);
vm3.invoke(createPRs);
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class DiskInitFile method recover.
DiskStoreID recover() {
recoverFromFailedCompaction();
if (!this.ifFile.exists()) {
// create a UUID using the cheaper Random class.
return new DiskStoreID(UUID.randomUUID());
}
DiskStoreID result = null;
try {
FileInputStream fis = null;
CountingDataInputStream dis = null;
try {
fis = new FileInputStream(this.ifFile);
dis = new CountingDataInputStream(new BufferedInputStream(fis, 8 * 1024), this.ifFile.length());
DiskInitFileParser parser = new DiskInitFileParser(dis, this);
result = parser.parse();
this.gotEOF = parser.gotEOF();
this.nextSeekPosition = dis.getCount();
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "liveRecordCount={} totalRecordCount={}", this.ifLiveRecordCount, this.ifTotalRecordCount);
}
} finally {
if (dis != null) {
dis.close();
}
if (fis != null) {
fis.close();
}
}
for (PlaceHolderDiskRegion drv : this.drMap.values()) {
if (drv.getMyPersistentID() != null || drv.getMyInitializingID() != null) {
// Prepare each region we found in the init file for early recovery.
if (drv.isBucket() || !getDiskStore().getOwnedByRegion()) {
if (drv.isBucket() && !drv.getActualLruAlgorithm().isNone()) {
drv.prlruStats = getDiskStore().getOrCreatePRLRUStats(drv);
}
getDiskStore().getStats().incUncreatedRecoveredRegions(1);
drv.setRecoveredEntryMap(RegionMapFactory.createVM(drv, getDiskStore(), getDiskStore().getInternalRegionArguments()));
if (!getDiskStore().isOffline()) {
// schedule it for recovery since we want to recovery region data early now
getDiskStore().scheduleForRecovery(drv);
}
// else if we are validating or offlineCompacting
// then the scheduleForRecovery is called later in DiskStoreImpl
// this helps fix bug 42043
}
}
}
} catch (EOFException ex) {
// ignore since a partial record write can be caused by a crash
// throw new
// DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
// .toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (ClassNotFoundException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (CancelException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Cache was closed", ignore);
}
} catch (RegionDestroyedException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Region was destroyed", ignore);
}
} catch (IllegalStateException ex) {
if (!this.parent.isClosing()) {
throw ex;
}
}
return result;
}
use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.
the class ColocationHelper method getColocatedChildRegions.
/**
* A utility method to retrieve all child partitioned regions that are directly colocated to the
* specified partitioned region.<br>
* <p>
* For example, shipmentPR is colocated with orderPR and orderPR is colocated with customerPR.
* <br>
* getColocatedChildRegions(customerPR) will return List{orderPR}<br>
* getColocatedChildRegions(orderPR) will return List{shipmentPR}<br>
* getColocatedChildRegions(shipmentPR) will return empty List{}<br>
*
* @return list of all child partitioned regions colocated with the region
* @since GemFire 5.8Beta
*/
public static List<PartitionedRegion> getColocatedChildRegions(PartitionedRegion partitionedRegion) {
List<PartitionedRegion> colocatedChildRegions = new ArrayList<PartitionedRegion>();
Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache());
PartitionRegionConfig prConf = null;
// final List allPRNamesList = new ArrayList(prRoot.keySet());
Iterator itr = prRoot.keySet().iterator();
while (itr.hasNext()) {
try {
String prName = (String) itr.next();
if (prName.equals(partitionedRegion.getRegionIdentifier())) {
// region can't be a child of itself
continue;
}
try {
prConf = (PartitionRegionConfig) prRoot.get(prName);
} catch (EntryDestroyedException ignore) {
continue;
}
if (prConf == null) {
// merge so I added this check and continue
continue;
}
int prID = prConf.getPRId();
PartitionedRegion prRegion = PartitionedRegion.getPRFromId(prID);
if (prRegion != null) {
if (prRegion.getColocatedWith() != null) {
if (prRegion.getColocatedWith().equals(partitionedRegion.getFullPath()) || ("/" + prRegion.getColocatedWith()).equals(partitionedRegion.getFullPath())) {
// only regions directly colocatedWith partitionedRegion are
// added to the list...
prRegion.waitOnBucketMetadataInitialization();
colocatedChildRegions.add(prRegion);
}
}
}
} catch (PRLocallyDestroyedException e) {
if (logger.isDebugEnabled()) {
logger.debug("PRLocallyDestroyedException : Region ={} is locally destroyed on this node", prConf.getPRId(), e);
}
} catch (RegionDestroyedException e) {
if (logger.isDebugEnabled()) {
logger.debug("RegionDestroyedException : Region ={} is destroyed.", prConf.getPRId(), e);
}
}
}
// Fix for 44484 - Make the list of colocated child regions
// is always in the same order on all nodes.
Collections.sort(colocatedChildRegions, new Comparator<PartitionedRegion>() {
@Override
public int compare(PartitionedRegion o1, PartitionedRegion o2) {
if (o1.isShadowPR() == o2.isShadowPR()) {
return o1.getFullPath().compareTo(o2.getFullPath());
}
if (o1.isShadowPR()) {
return 1;
}
return -1;
}
});
return colocatedChildRegions;
}
Aggregations