use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskStoreImpl method scheduleForRecovery.
void scheduleForRecovery(DiskRecoveryStore drs) {
DiskRegionView dr = drs.getDiskRegionView();
PersistentOplogSet oplogSet = getPersistentOplogSet(dr);
oplogSet.scheduleForRecovery(drs);
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskStoreImpl method initializeOwner.
/**
* Initializes the contents of any regions on this DiskStore that have been registered but are not
* yet initialized.
*/
void initializeOwner(LocalRegion lr) {
DiskRegion dr = lr.getDiskRegion();
// We don't need to do recovery for overflow regions.
if (!lr.getDataPolicy().withPersistence() || !dr.isRecreated()) {
return;
}
// while we are copying the entry map.
synchronized (currentAsyncValueRecoveryMap) {
DiskRegionView drv = lr.getDiskRegionView();
if (drv.getRecoveredEntryMap() != null) {
PersistentOplogSet oplogSet = getPersistentOplogSet(drv);
// acquire CompactorWriteLock only if the region attributes for the
// real region are different from the place holder region's
boolean releaseCompactorWriteLock = false;
if (drv.isEntriesMapIncompatible()) {
// fix bug #51097 to prevent concurrent compaction
acquireCompactorWriteLock();
releaseCompactorWriteLock = true;
}
try {
drv.copyExistingRegionMap(lr);
getStats().incUncreatedRecoveredRegions(-1);
for (Oplog oplog : oplogSet.getAllOplogs()) {
if (oplog != null) {
oplog.updateDiskRegion(lr.getDiskRegionView());
}
}
} finally {
if (releaseCompactorWriteLock) {
releaseCompactorWriteLock();
}
}
if (currentAsyncValueRecoveryMap.containsKey(drv.getId())) {
currentAsyncValueRecoveryMap.put(drv.getId(), lr);
}
return;
}
}
scheduleForRecovery(lr);
try {
// acquireReadLock(dr);
// gotLock = true;
recoverRegionsThatAreReady();
} catch (DiskAccessException dae) {
// Asif:Just rethrow t
throw dae;
} catch (RuntimeException re) {
// here
throw new DiskAccessException("RuntimeException in initializing the disk store from the disk", re, this);
}
// finally {
// if(gotLock) {
// releaseReadLock(dr);
// }
// }
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskStoreImpl method validate.
private void validate() {
assert isValidating();
// save memory @todo should Oplog make sure
this.RECOVER_VALUES = false;
// value is deserializable?
this.liveEntryCount = 0;
this.deadRecordCount = 0;
for (DiskRegionView drv : getKnown()) {
scheduleForRecovery(ValidatingDiskRegion.create(this, drv));
}
recoverRegionsThatAreReady();
if (getDeadRecordCount() > 0) {
System.out.println("Disk store contains " + getDeadRecordCount() + " compactable records.");
}
System.out.println("Total number of region entries in this disk store is: " + getLiveEntryCount());
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskStoreImpl method pdxRename.
private Collection<Object> pdxRename(String oldBase, String newBase) throws IOException {
// Since we are recovering a disk store, the cast from DiskRegionView -->
// PlaceHolderDiskRegion
// and from RegionEntry --> DiskEntry should be ok.
// In offline mode, we need to schedule the regions to be recovered
// explicitly.
DiskRegionView foundPdx = null;
for (DiskRegionView drv : getKnown()) {
if (drv.getName().equals(PeerTypeRegistration.REGION_FULL_PATH)) {
foundPdx = drv;
scheduleForRecovery((PlaceHolderDiskRegion) drv);
}
}
if (foundPdx == null) {
throw new IllegalStateException("The disk store does not contain any PDX types.");
}
recoverRegionsThatAreReady();
PersistentOplogSet oplogSet = (PersistentOplogSet) getOplogSet(foundPdx);
ArrayList<Object> result = new ArrayList<>();
Pattern pattern = createPdxRenamePattern(oldBase);
for (RegionEntry re : foundPdx.getRecoveredEntryMap().regionEntries()) {
Object value = re._getValueRetain(foundPdx, true);
if (Token.isRemoved(value)) {
continue;
}
if (value instanceof CachedDeserializable) {
value = ((CachedDeserializable) value).getDeserializedForReading();
}
if (value instanceof EnumInfo) {
EnumInfo ei = (EnumInfo) value;
String newName = replacePdxRenamePattern(pattern, ei.getClassName(), newBase);
if (newName != null) {
ei.setClassName(newName);
result.add(ei);
oplogSet.offlineModify(foundPdx, (DiskEntry) re, BlobHelper.serializeToBlob(ei), true);
}
} else {
PdxType type = (PdxType) value;
String newName = replacePdxRenamePattern(pattern, type.getClassName(), newBase);
if (newName != null) {
type.setClassName(newName);
result.add(type);
oplogSet.offlineModify(foundPdx, (DiskEntry) re, BlobHelper.serializeToBlob(type), true);
}
}
}
return result;
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskInitFile method cmnRegionConfig.
public void cmnRegionConfig(long drId, byte lruAlgorithm, byte lruAction, int lruLimit, int concurrencyLevel, int initialCapacity, float loadFactor, boolean statisticsEnabled, boolean isBucket, EnumSet<DiskRegionFlag> flags, String partitionName, int startingBucketId, String compressorClassName, boolean offHeap) {
DiskRegionView dr = getDiskRegionById(drId);
if (dr != null) {
// name.
if (Version.GFE_80.compareTo(currentRecoveredGFVersion()) > 0 && !dr.getName().contains("_SERIAL_GATEWAY_SENDER_QUEUE") && !dr.getName().contains("_PARALLEL__GATEWAY__SENDER__QUEUE")) {
flags.add(DiskRegionFlag.IS_WITH_VERSIONING);
}
dr.setConfig(lruAlgorithm, lruAction, lruLimit, concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled, isBucket, flags, partitionName, startingBucketId, compressorClassName, offHeap);
// Just count this as a live record even though it is possible
// that we have an extra one due to the config changing while
// we were offline.
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
} else {
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
Aggregations