use of org.apache.geode.internal.cache.persistence.DiskStoreID in project geode by apache.
the class RegionVersionVectorJUnitTest method testRVVSerialization.
@Test
public void testRVVSerialization() throws Exception {
DiskStoreID ownerId = new DiskStoreID(0, 0);
DiskStoreID id1 = new DiskStoreID(0, 1);
DiskStoreID id2 = new DiskStoreID(1, 0);
DiskRegionVersionVector rvv = new DiskRegionVersionVector(ownerId);
rvv.recordVersion(id1, 5);
rvv.recordVersion(id1, 6);
rvv.recordVersion(id1, 7);
rvv.recordVersion(id1, 9);
rvv.recordVersion(id1, 20);
rvv.recordVersion(id1, 11);
rvv.recordVersion(id1, 12);
rvv.recordGCVersion(id2, 5);
rvv.recordGCVersion(id1, 3);
assertTrue(rvv.sameAs(rvv.getCloneForTransmission()));
HeapDataOutputStream out = new HeapDataOutputStream(Version.CURRENT);
InternalDataSerializer.writeObject(rvv.getCloneForTransmission(), out);
byte[] bytes = out.toByteArray();
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes));
DiskRegionVersionVector rvv2 = InternalDataSerializer.readObject(dis);
assertTrue(rvv.sameAs(rvv2));
}
use of org.apache.geode.internal.cache.persistence.DiskStoreID in project geode by apache.
the class RegionVersionVectorJUnitTest method testCopyMemberToVersion.
/**
* Test that we can copy the member to version map correctly.
*/
@Test
public void testCopyMemberToVersion() {
DiskStoreID id0 = new DiskStoreID(0, 0);
DiskStoreID id1 = new DiskStoreID(0, 1);
DiskStoreID id2 = new DiskStoreID(1, 0);
DiskRegionVersionVector rvv0 = new DiskRegionVersionVector(id0);
rvv0.getNextVersion();
rvv0.getNextVersion();
rvv0.getNextVersion();
rvv0.recordVersion(id1, 1);
rvv0.recordVersion(id1, 3);
DiskRegionVersionVector rvv1 = new DiskRegionVersionVector(id1);
rvv1.recordVersions(rvv0);
assertEquals(3, rvv1.getCurrentVersion());
assertFalse(rvv1.contains(id1, 2));
assertTrue(rvv1.contains(id1, 1));
assertTrue(rvv1.contains(id1, 3));
assertTrue(rvv1.contains(id0, 3));
assertTrue(rvv0.sameAs(rvv1));
rvv1.recordVersion(id1, 2);
assertTrue(rvv1.isNewerThanOrCanFillExceptionsFor(rvv0));
assertFalse(rvv0.isNewerThanOrCanFillExceptionsFor(rvv1));
assertTrue(rvv1.dominates(rvv0));
assertFalse(rvv0.dominates(rvv1));
}
use of org.apache.geode.internal.cache.persistence.DiskStoreID in project geode by apache.
the class DiskInitFile method recover.
DiskStoreID recover() {
recoverFromFailedCompaction();
if (!this.ifFile.exists()) {
// create a UUID using the cheaper Random class.
return new DiskStoreID(UUID.randomUUID());
}
DiskStoreID result = null;
try {
FileInputStream fis = null;
CountingDataInputStream dis = null;
try {
fis = new FileInputStream(this.ifFile);
dis = new CountingDataInputStream(new BufferedInputStream(fis, 8 * 1024), this.ifFile.length());
DiskInitFileParser parser = new DiskInitFileParser(dis, this);
result = parser.parse();
this.gotEOF = parser.gotEOF();
this.nextSeekPosition = dis.getCount();
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "liveRecordCount={} totalRecordCount={}", this.ifLiveRecordCount, this.ifTotalRecordCount);
}
} finally {
if (dis != null) {
dis.close();
}
if (fis != null) {
fis.close();
}
}
for (PlaceHolderDiskRegion drv : this.drMap.values()) {
if (drv.getMyPersistentID() != null || drv.getMyInitializingID() != null) {
// Prepare each region we found in the init file for early recovery.
if (drv.isBucket() || !getDiskStore().getOwnedByRegion()) {
if (drv.isBucket() && !drv.getActualLruAlgorithm().isNone()) {
drv.prlruStats = getDiskStore().getOrCreatePRLRUStats(drv);
}
getDiskStore().getStats().incUncreatedRecoveredRegions(1);
drv.setRecoveredEntryMap(RegionMapFactory.createVM(drv, getDiskStore(), getDiskStore().getInternalRegionArguments()));
if (!getDiskStore().isOffline()) {
// schedule it for recovery since we want to recovery region data early now
getDiskStore().scheduleForRecovery(drv);
}
// else if we are validating or offlineCompacting
// then the scheduleForRecovery is called later in DiskStoreImpl
// this helps fix bug 42043
}
}
}
} catch (EOFException ex) {
// ignore since a partial record write can be caused by a crash
// throw new
// DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0
// .toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (ClassNotFoundException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.ifFile.getPath()), ex, this.parent);
} catch (CancelException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Cache was closed", ignore);
}
} catch (RegionDestroyedException ignore) {
if (logger.isDebugEnabled()) {
logger.debug("Oplog::readOplog:Error in recovery as Region was destroyed", ignore);
}
} catch (IllegalStateException ex) {
if (!this.parent.isClosing()) {
throw ex;
}
}
return result;
}
use of org.apache.geode.internal.cache.persistence.DiskStoreID in project geode by apache.
the class DiskInitFile method dumpPRMetaData.
/**
* Dump the metadata for a partitioned region, optionally dumping the meta data for individual
* buckets.
*/
private void dumpPRMetaData(boolean showBuckets, List<PlaceHolderDiskRegion> regions) {
StringBuilder msg = new StringBuilder(regions.get(0).getPrName());
regions.get(0).dumpCommonAttributes(msg);
if (showBuckets) {
for (PlaceHolderDiskRegion region : regions) {
msg.append("\n");
msg.append("\n");
msg.append(region.getName());
region.dumpPersistentView(msg);
}
} else {
Map<DiskStoreID, String> online = new HashMap<DiskStoreID, String>();
Map<DiskStoreID, String> offline = new HashMap<DiskStoreID, String>();
Map<DiskStoreID, String> equal = new HashMap<DiskStoreID, String>();
for (PlaceHolderDiskRegion region : regions) {
for (PersistentMemberID mem : region.getOnlineMembers()) {
online.put(mem.diskStoreId, mem.host + ":" + mem.directory);
}
for (PersistentMemberID mem : region.getOfflineMembers()) {
offline.put(mem.diskStoreId, mem.host + ":" + mem.directory);
}
for (PersistentMemberID mem : region.getOfflineAndEqualMembers()) {
equal.put(mem.diskStoreId, mem.host + ":" + mem.directory);
}
}
msg.append("\n\tonlineMembers:");
for (Map.Entry<DiskStoreID, String> id : online.entrySet()) {
msg.append("\n\t\t").append(id.getKey()).append(" ").append(id.getValue());
}
msg.append("\n\tofflineMembers:");
for (Map.Entry<DiskStoreID, String> id : offline.entrySet()) {
msg.append("\n\t\t").append(id.getKey()).append(" ").append(id.getValue());
}
msg.append("\n\tequalsMembers:");
for (Map.Entry<DiskStoreID, String> id : equal.entrySet()) {
msg.append("\n\t\t").append(id.getKey()).append(" ").append(id.getValue());
}
}
System.out.println(msg);
}
use of org.apache.geode.internal.cache.persistence.DiskStoreID in project geode by apache.
the class DiskInitFile method cmnClearRegion.
public void cmnClearRegion(long drId, ConcurrentHashMap<DiskStoreID, RegionVersionHolder<DiskStoreID>> memberToVersion) {
DiskRegionView drv = getDiskRegionById(drId);
if (drv.getClearRVV() == null) {
this.ifLiveRecordCount++;
}
// otherwise previous clear is cancelled so don't change liveRecordCount
this.ifTotalRecordCount++;
DiskStoreID ownerId = parent.getDiskStoreID();
// Create a fake RVV for clear purposes. We only need to memberToVersion information
RegionVersionHolder<DiskStoreID> ownerExceptions = memberToVersion.remove(ownerId);
long ownerVersion = ownerExceptions == null ? 0 : ownerExceptions.getVersion();
RegionVersionVector rvv = new DiskRegionVersionVector(ownerId, memberToVersion, ownerVersion, new ConcurrentHashMap(), 0L, false, ownerExceptions);
drv.setClearRVV(rvv);
}
Aggregations