use of org.apache.geode.internal.cache.persistence.PersistentMemberID in project geode by apache.
the class DiskIFJUnitTest method testTwoIFFiles.
/**
* Make sure if we have multiple init file with the same ds name that ds creation fails. See bug
* 41883.
*/
@Test
public void testTwoIFFiles() throws Exception {
diskProps.setPersistBackup(true);
diskProps.setRegionName("testTwoIFFiles");
diskProps.setDiskDirs(dirs);
LocalRegion lr = (LocalRegion) DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
DiskRegion dr = lr.getDiskRegion();
assertEquals(null, dr.getMyInitializingID());
PersistentMemberID myId = dr.getMyPersistentID();
assertNotNull(myId);
assertTrue(dr.getOnlineMembers().isEmpty());
assertTrue(dr.getOfflineMembers().isEmpty());
assertTrue(dr.getOfflineAndEqualMembers().isEmpty());
// do recovery
close(lr);
assertEquals(true, (new File(dirs[0], "BACKUPtestTwoIFFiles.if")).exists());
File extraIF = new File(dirs[1], "BACKUPtestTwoIFFiles.if");
extraIF.createNewFile();
try {
lr = (LocalRegion) DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
fail("expected IllegalStateException");
} catch (IllegalStateException expected) {
} finally {
extraIF.delete();
}
}
use of org.apache.geode.internal.cache.persistence.PersistentMemberID in project geode by apache.
the class DiskIFJUnitTest method testAboutToPartialDestroy.
@Test
public void testAboutToPartialDestroy() throws Exception {
diskProps.setPersistBackup(true);
diskProps.setRegionName("testAboutToPartialDestroy");
LocalRegion lr = (LocalRegion) DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
DiskRegion dr = lr.getDiskRegion();
PersistentMemberID myId = createNewPMID();
PersistentMemberID myOnId = createNewPMID();
PersistentMemberID myOffId = createNewPMID();
PersistentMemberID myEqualId = createNewPMID();
dr.setInitializing(myId);
dr.setInitialized();
dr.memberOnline(myOnId);
dr.memberOffline(myOffId);
dr.memberOfflineAndEqual(myEqualId);
assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers());
assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers());
assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers());
assertEquals(null, dr.getMyInitializingID());
assertEquals(myId, dr.getMyPersistentID());
assertEquals(false, dr.wasAboutToDestroy());
assertEquals(false, dr.wasAboutToDestroyDataStorage());
dr.beginDestroyDataStorage();
assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers());
assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers());
assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers());
assertEquals(myId, dr.getMyPersistentID());
assertEquals(false, dr.wasAboutToDestroy());
assertEquals(true, dr.wasAboutToDestroyDataStorage());
close(lr);
// do recovery
lr = (LocalRegion) DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
dr = lr.getDiskRegion();
assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers());
assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers());
assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers());
assertEquals(myId, dr.getMyPersistentID());
assertEquals(false, dr.wasAboutToDestroy());
assertEquals(true, dr.wasAboutToDestroyDataStorage());
dr.forceIFCompaction();
assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers());
assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers());
assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers());
assertEquals(myId, dr.getMyPersistentID());
assertEquals(false, dr.wasAboutToDestroy());
assertEquals(true, dr.wasAboutToDestroyDataStorage());
close(lr);
// do recovery
lr = (LocalRegion) DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
dr = lr.getDiskRegion();
assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers());
assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers());
assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers());
assertEquals(myId, dr.getMyPersistentID());
assertEquals(false, dr.wasAboutToDestroy());
assertEquals(true, dr.wasAboutToDestroyDataStorage());
dr.endDestroy(lr);
assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers());
assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers());
assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers());
assertEquals(null, dr.getMyPersistentID());
assertEquals(false, dr.wasAboutToDestroy());
assertEquals(false, dr.wasAboutToDestroyDataStorage());
close(lr);
// do recovery
lr = (LocalRegion) DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
dr = lr.getDiskRegion();
assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers());
assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers());
assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers());
PersistentMemberID newId = dr.getMyPersistentID();
if (myId.equals(newId)) {
fail("expected a new id but was: " + newId);
}
assertNotNull(newId);
assertEquals(false, dr.wasAboutToDestroy());
assertEquals(false, dr.wasAboutToDestroyDataStorage());
dr.forceIFCompaction();
assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers());
assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers());
assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers());
assertEquals(newId, dr.getMyPersistentID());
assertEquals(false, dr.wasAboutToDestroy());
assertEquals(false, dr.wasAboutToDestroyDataStorage());
close(lr);
// do recovery
lr = (LocalRegion) DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
dr = lr.getDiskRegion();
assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers());
assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers());
assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers());
assertEquals(newId, dr.getMyPersistentID());
assertEquals(false, dr.wasAboutToDestroy());
assertEquals(false, dr.wasAboutToDestroyDataStorage());
}
use of org.apache.geode.internal.cache.persistence.PersistentMemberID in project geode by apache.
the class AbstractDiskRegion method dumpPersistentView.
/**
* Dump the (bucket specific) persistent view to the string builder
*
* @param msg
*/
public void dumpPersistentView(StringBuilder msg) {
msg.append("\n\tMyInitializingID=<").append(getMyInitializingID()).append(">");
msg.append("\n\tMyPersistentID=<").append(getMyPersistentID()).append(">");
msg.append("\n\tonlineMembers:");
for (PersistentMemberID id : getOnlineMembers()) {
msg.append("\n\t\t").append(id);
}
msg.append("\n\tofflineMembers:");
for (PersistentMemberID id : getOfflineMembers()) {
msg.append("\n\t\t").append(id);
}
msg.append("\n\tequalsMembers:");
for (PersistentMemberID id : getOfflineAndEqualMembers()) {
msg.append("\n\t\t").append(id);
}
}
use of org.apache.geode.internal.cache.persistence.PersistentMemberID in project geode by apache.
the class MissingPersistentIDsRequest method createResponse.
@Override
protected AdminResponse createResponse(DistributionManager dm) {
Set<PersistentID> missingIds = new HashSet<>();
Set<PersistentID> localPatterns = new HashSet<>();
InternalCache cache = GemFireCacheImpl.getInstance();
if (cache != null && !cache.isClosed()) {
PersistentMemberManager mm = cache.getPersistentMemberManager();
Map<String, Set<PersistentMemberID>> waitingRegions = mm.getWaitingRegions();
for (Map.Entry<String, Set<PersistentMemberID>> entry : waitingRegions.entrySet()) {
for (PersistentMemberID id : entry.getValue()) {
missingIds.add(new PersistentMemberPattern(id));
}
}
Set<PersistentMemberID> localIds = mm.getPersistentIDs();
for (PersistentMemberID id : localIds) {
localPatterns.add(new PersistentMemberPattern(id));
}
}
return new MissingPersistentIDsResponse(missingIds, localPatterns, this.getSender());
}
use of org.apache.geode.internal.cache.persistence.PersistentMemberID in project geode by apache.
the class GemFireCacheImpl method shutDownOnePRGracefully.
private void shutDownOnePRGracefully(PartitionedRegion partitionedRegion) {
boolean acquiredLock = false;
try {
partitionedRegion.acquireDestroyLock();
acquiredLock = true;
synchronized (partitionedRegion.getRedundancyProvider()) {
if (partitionedRegion.isDataStore() && partitionedRegion.getDataStore() != null && partitionedRegion.getDataPolicy() == DataPolicy.PERSISTENT_PARTITION) {
int numBuckets = partitionedRegion.getTotalNumberOfBuckets();
Map<InternalDistributedMember, PersistentMemberID>[] bucketMaps = new Map[numBuckets];
PartitionedRegionDataStore dataStore = partitionedRegion.getDataStore();
// lock all the primary buckets
Set<Entry<Integer, BucketRegion>> bucketEntries = dataStore.getAllLocalBuckets();
for (Entry e : bucketEntries) {
BucketRegion bucket = (BucketRegion) e.getValue();
if (bucket == null || bucket.isDestroyed) {
// bucket region could be destroyed in race condition
continue;
}
bucket.getBucketAdvisor().tryLockIfPrimary();
// get map <InternalDistributedMember, persistentID> for this bucket's
// remote members
bucketMaps[bucket.getId()] = bucket.getBucketAdvisor().adviseInitializedPersistentMembers();
if (logger.isDebugEnabled()) {
logger.debug("shutDownAll: PR {}: initialized persistent members for {}:{}", partitionedRegion.getName(), bucket.getId(), bucketMaps[bucket.getId()]);
}
}
if (logger.isDebugEnabled()) {
logger.debug("shutDownAll: All buckets for PR {} are locked.", partitionedRegion.getName());
}
// send lock profile update to other members
partitionedRegion.setShutDownAllStatus(PartitionedRegion.PRIMARY_BUCKETS_LOCKED);
new UpdateAttributesProcessor(partitionedRegion).distribute(false);
partitionedRegion.getRegionAdvisor().waitForProfileStatus(PartitionedRegion.PRIMARY_BUCKETS_LOCKED);
if (logger.isDebugEnabled()) {
logger.debug("shutDownAll: PR {}: all bucketLock profiles received.", partitionedRegion.getName());
}
// if async write, do flush
if (!partitionedRegion.getAttributes().isDiskSynchronous()) {
// several PRs might share the same diskStore, we will only flush once
// even flush is called several times.
partitionedRegion.getDiskStore().forceFlush();
// send flush profile update to other members
partitionedRegion.setShutDownAllStatus(PartitionedRegion.DISK_STORE_FLUSHED);
new UpdateAttributesProcessor(partitionedRegion).distribute(false);
partitionedRegion.getRegionAdvisor().waitForProfileStatus(PartitionedRegion.DISK_STORE_FLUSHED);
if (logger.isDebugEnabled()) {
logger.debug("shutDownAll: PR {}: all flush profiles received.", partitionedRegion.getName());
}
}
// async write
// persist other members to OFFLINE_EQUAL for each bucket region
// iterate through all the bucketMaps and exclude the items whose
// idm is no longer online
Set<InternalDistributedMember> membersToPersistOfflineEqual = partitionedRegion.getRegionAdvisor().adviseDataStore();
for (Entry e : bucketEntries) {
BucketRegion bucket = (BucketRegion) e.getValue();
if (bucket == null || bucket.isDestroyed) {
// bucket region could be destroyed in race condition
continue;
}
Map<InternalDistributedMember, PersistentMemberID> persistMap = getSubMapForLiveMembers(membersToPersistOfflineEqual, bucketMaps[bucket.getId()]);
if (persistMap != null) {
bucket.getPersistenceAdvisor().persistMembersOfflineAndEqual(persistMap);
if (logger.isDebugEnabled()) {
logger.debug("shutDownAll: PR {}: persisting bucket {}:{}", partitionedRegion.getName(), bucket.getId(), persistMap);
}
}
}
// send persisted profile update to other members, let all members to persist
// before close the region
partitionedRegion.setShutDownAllStatus(PartitionedRegion.OFFLINE_EQUAL_PERSISTED);
new UpdateAttributesProcessor(partitionedRegion).distribute(false);
partitionedRegion.getRegionAdvisor().waitForProfileStatus(PartitionedRegion.OFFLINE_EQUAL_PERSISTED);
if (logger.isDebugEnabled()) {
logger.debug("shutDownAll: PR {}: all offline_equal profiles received.", partitionedRegion.getName());
}
}
// dataStore
// after done all steps for buckets, close partitionedRegion
// close accessor directly
RegionEventImpl event = new RegionEventImpl(partitionedRegion, Operation.REGION_CLOSE, null, false, getMyId(), true);
try {
// not to acquire lock
partitionedRegion.basicDestroyRegion(event, false, false, true);
} catch (CacheWriterException e) {
// not possible with local operation, CacheWriter not called
throw new Error(LocalizedStrings.LocalRegion_CACHEWRITEREXCEPTION_SHOULD_NOT_BE_THROWN_IN_LOCALDESTROYREGION.toLocalizedString(), e);
} catch (TimeoutException e) {
// not possible with local operation, no distributed locks possible
throw new Error(LocalizedStrings.LocalRegion_TIMEOUTEXCEPTION_SHOULD_NOT_BE_THROWN_IN_LOCALDESTROYREGION.toLocalizedString(), e);
}
}
// synchronized
} catch (CacheClosedException cce) {
logger.debug("Encounter CacheClosedException when shutDownAll is closing PR: {}:{}", partitionedRegion.getFullPath(), cce.getMessage());
} catch (CancelException ce) {
logger.debug("Encounter CancelException when shutDownAll is closing PR: {}:{}", partitionedRegion.getFullPath(), ce.getMessage());
} catch (RegionDestroyedException rde) {
logger.debug("Encounter CacheDestroyedException when shutDownAll is closing PR: {}:{}", partitionedRegion.getFullPath(), rde.getMessage());
} finally {
if (acquiredLock) {
partitionedRegion.releaseDestroyLock();
}
}
}
Aggregations