use of org.apache.geode.internal.cache.persistence.DiskRecoveryStore in project geode by apache.
the class VMThinDiskRegionEntryHeapObjectKey method diskInitialize.
// DO NOT modify this class. It was generated from LeafRegionEntry.cpp
private void diskInitialize(RegionEntryContext context, Object value) {
DiskRecoveryStore drs = (DiskRecoveryStore) context;
DiskStoreImpl ds = drs.getDiskStore();
long maxOplogSize = ds.getMaxOplogSize();
// get appropriate instance of DiskId implementation based on maxOplogSize
this.id = DiskId.createDiskId(maxOplogSize, true, /* is persistence */
ds.needsLinkedList());
Helper.initialize(this, drs, value);
}
use of org.apache.geode.internal.cache.persistence.DiskRecoveryStore in project geode by apache.
the class VersionedStatsDiskRegionEntryOffHeapStringKey2 method diskInitialize.
// DO NOT modify this class. It was generated from LeafRegionEntry.cpp
private void diskInitialize(RegionEntryContext context, Object value) {
DiskRecoveryStore drs = (DiskRecoveryStore) context;
DiskStoreImpl ds = drs.getDiskStore();
long maxOplogSize = ds.getMaxOplogSize();
// get appropriate instance of DiskId implementation based on maxOplogSize
this.id = DiskId.createDiskId(maxOplogSize, true, /* is persistence */
ds.needsLinkedList());
Helper.initialize(this, drs, value);
}
use of org.apache.geode.internal.cache.persistence.DiskRecoveryStore in project geode by apache.
the class PersistentOplogSet method recoverOplogs.
private long recoverOplogs(long byteCount) {
OplogEntryIdSet deletedIds = new OplogEntryIdSet();
TreeSet<Oplog> oplogSet = getSortedOplogs();
Set<Oplog> oplogsNeedingValueRecovery = new HashSet<Oplog>();
if (!this.alreadyRecoveredOnce.get()) {
if (getChild() != null && !getChild().hasBeenUsed()) {
// Then remove the current child since it is empty
// and does not need to be recovered from
// and it is important to not call initAfterRecovery on it.
oplogSet.remove(getChild());
}
}
if (oplogSet.size() > 0) {
long startOpLogRecovery = System.currentTimeMillis();
// first figure out all entries that have been destroyed
boolean latestOplog = true;
for (Oplog oplog : oplogSet) {
byteCount += oplog.recoverDrf(deletedIds, this.alreadyRecoveredOnce.get(), latestOplog);
latestOplog = false;
if (!this.alreadyRecoveredOnce.get()) {
updateOplogEntryId(oplog.getMaxRecoveredOplogEntryId());
}
}
parent.incDeadRecordCount(deletedIds.size());
// now figure out live entries
latestOplog = true;
for (Oplog oplog : oplogSet) {
long startOpLogRead = parent.getStats().startOplogRead();
long bytesRead = oplog.recoverCrf(deletedIds, // @todo make recoverValues per region
recoverValues(), recoverValuesSync(), this.alreadyRecoveredOnce.get(), oplogsNeedingValueRecovery, latestOplog);
latestOplog = false;
if (!this.alreadyRecoveredOnce.get()) {
updateOplogEntryId(oplog.getMaxRecoveredOplogEntryId());
}
byteCount += bytesRead;
parent.getStats().endOplogRead(startOpLogRead, bytesRead);
// Used for offline export
for (DiskRecoveryStore drs : this.currentRecoveryMap.values()) {
drs.getDiskRegionView().oplogRecovered(oplog.oplogId);
}
}
long endOpLogRecovery = System.currentTimeMillis();
long elapsed = endOpLogRecovery - startOpLogRecovery;
logger.info(LocalizedMessage.create(LocalizedStrings.DiskRegion_OPLOG_LOAD_TIME, elapsed));
}
if (!parent.isOfflineCompacting()) {
long startRegionInit = System.currentTimeMillis();
// to the map
for (DiskRecoveryStore drs : this.currentRecoveryMap.values()) {
drs.getDiskRegionView().initRecoveredEntryCount();
}
if (!this.alreadyRecoveredOnce.get()) {
for (Oplog oplog : oplogSet) {
if (oplog != getChild()) {
oplog.initAfterRecovery(parent.isOffline());
}
}
if (getChild() == null) {
setFirstChild(getSortedOplogs(), false);
}
}
if (!parent.isOffline()) {
if (recoverValues() && !recoverValuesSync()) {
// TODO DAN - should we defer compaction until after
// value recovery is complete? Or at least until after
// value recovery for a given oplog is complete?
// Right now, that's effectively what we're doing
// because this uses up the compactor thread.
parent.scheduleValueRecovery(oplogsNeedingValueRecovery, this.currentRecoveryMap);
}
if (!this.alreadyRecoveredOnce.get()) {
// Create krfs for oplogs that are missing them
for (Oplog oplog : oplogSet) {
if (oplog.needsKrf()) {
oplog.createKrfAsync();
}
}
parent.scheduleCompaction();
}
long endRegionInit = System.currentTimeMillis();
logger.info(LocalizedMessage.create(LocalizedStrings.DiskRegion_REGION_INIT_TIME, endRegionInit - startRegionInit));
}
}
return byteCount;
}
use of org.apache.geode.internal.cache.persistence.DiskRecoveryStore in project geode by apache.
the class PersistentOplogSet method recoverRegionsThatAreReady.
public void recoverRegionsThatAreReady() {
// which is needed currently.
synchronized (this.alreadyRecoveredOnce) {
// need to take a snapshot of DiskRecoveryStores we will recover
synchronized (this.pendingRecoveryMap) {
this.currentRecoveryMap.clear();
this.currentRecoveryMap.putAll(this.pendingRecoveryMap);
this.pendingRecoveryMap.clear();
}
if (this.currentRecoveryMap.isEmpty() && this.alreadyRecoveredOnce.get()) {
// no recovery needed
return;
}
for (DiskRecoveryStore drs : this.currentRecoveryMap.values()) {
// Call prepare early to fix bug 41119.
drs.getDiskRegionView().prepareForRecovery();
}
if (!this.alreadyRecoveredOnce.get()) {
initOplogEntryId();
// Fix for #43026 - make sure we don't reuse an entry
// id that has been marked as cleared.
updateOplogEntryId(parent.getDiskInitFile().getMaxRecoveredClearEntryId());
}
final long start = parent.getStats().startRecovery();
long byteCount = 0;
EntryLogger.setSource(parent.getDiskStoreID(), "recovery");
try {
byteCount = recoverOplogs(byteCount);
} finally {
Map<String, Integer> prSizes = null;
Map<String, Integer> prBuckets = null;
if (parent.isValidating()) {
prSizes = new HashMap<String, Integer>();
prBuckets = new HashMap<String, Integer>();
}
for (DiskRecoveryStore drs : this.currentRecoveryMap.values()) {
for (Oplog oplog : getAllOplogs()) {
if (oplog != null) {
// Need to do this AFTER recovery to protect from concurrent compactions
// trying to remove the oplogs.
// We can't remove a dr from the oplog's unrecoveredRegionCount
// until it is fully recovered.
// This fixes bug 41119.
oplog.checkForRecoverableRegion(drs.getDiskRegionView());
}
}
if (parent.isValidating()) {
if (drs instanceof ValidatingDiskRegion) {
ValidatingDiskRegion vdr = ((ValidatingDiskRegion) drs);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
vdr.dump(System.out);
}
if (vdr.isBucket()) {
String prName = vdr.getPrName();
if (prSizes.containsKey(prName)) {
int oldSize = prSizes.get(prName);
oldSize += vdr.size();
prSizes.put(prName, oldSize);
int oldBuckets = prBuckets.get(prName);
oldBuckets++;
prBuckets.put(prName, oldBuckets);
} else {
prSizes.put(prName, vdr.size());
prBuckets.put(prName, 1);
}
} else {
parent.incLiveEntryCount(vdr.size());
System.out.println(vdr.getName() + ": entryCount=" + vdr.size());
}
}
}
}
if (parent.isValidating()) {
for (Map.Entry<String, Integer> me : prSizes.entrySet()) {
parent.incLiveEntryCount(me.getValue());
System.out.println(me.getKey() + " entryCount=" + me.getValue() + " bucketCount=" + prBuckets.get(me.getKey()));
}
}
parent.getStats().endRecovery(start, byteCount);
this.alreadyRecoveredOnce.set(true);
this.currentRecoveryMap.clear();
EntryLogger.clearSource();
}
}
}
use of org.apache.geode.internal.cache.persistence.DiskRecoveryStore in project geode by apache.
the class Oplog method readModifyEntry.
/**
* Reads an oplog entry of type Modify
*
* @param dis DataInputStream from which the oplog is being read
* @param opcode byte whether the id is short/int/long
*/
private void readModifyEntry(CountingDataInputStream dis, byte opcode, OplogEntryIdSet deletedIds, boolean recoverValue, LocalRegion currentRegion, Version version, ByteArrayDataInput in, HeapDataOutputStream hdos) throws IOException {
final boolean isPersistRecoveryDebugEnabled = logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY);
long oplogOffset = -1;
byte userBits = dis.readByte();
int idByteCount = (opcode - OPLOG_MOD_ENTRY_1ID) + 1;
// long debugRecoverModEntryId = this.recoverModEntryId;
long oplogKeyId = getModEntryId(dis, idByteCount);
// long debugOplogKeyId = dis.readLong();
// //assert oplogKeyId == debugOplogKeyId
// // : "expected=" + debugOplogKeyId + " actual=" + oplogKeyId
// assert debugRecoverModEntryId == debugOplogKeyId
// : "expected=" + debugOplogKeyId + " actual=" + debugRecoverModEntryId
// + " idByteCount=" + idByteCount
// + " delta=" + this.lastDelta;
long drId = DiskInitFile.readDiskRegionID(dis);
DiskRecoveryStore drs = getOplogSet().getCurrentlyRecovering(drId);
// read versions
VersionTag tag = null;
if (EntryBits.isWithVersions(userBits)) {
tag = readVersionsFromOplog(dis);
} else if (getParent().isUpgradeVersionOnly() && drs != null) {
tag = this.createDummyTag(drs);
userBits = EntryBits.setWithVersions(userBits, true);
}
if (drs != null && !drs.getDiskRegionView().getFlags().contains(DiskRegionFlag.IS_WITH_VERSIONING)) {
// 50044 Remove version tag from entry if we don't want versioning for
// this region
tag = null;
userBits = EntryBits.setWithVersions(userBits, false);
}
OkToSkipResult skipResult = okToSkipModifyRecord(deletedIds, drId, drs, oplogKeyId, false, tag);
if (skipResult.skip()) {
if (!isPhase2()) {
incSkipped();
this.stats.incRecoveryRecordsSkipped();
}
} else if (recoverValue && !getParent().isOfflineCompacting()) {
recoverValue = recoverLruValue(drs);
}
byte[] objValue = null;
int valueLength = 0;
CompactionRecord p2cr = null;
long crOffset;
if (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits)) {
if (EntryBits.isInvalid(userBits)) {
objValue = DiskEntry.INVALID_BYTES;
} else if (EntryBits.isTombstone(userBits)) {
objValue = DiskEntry.TOMBSTONE_BYTES;
} else {
objValue = DiskEntry.LOCAL_INVALID_BYTES;
}
crOffset = dis.getCount();
if (!skipResult.skip()) {
if (isPhase2()) {
p2cr = (CompactionRecord) getRecoveryMap().get(oplogKeyId);
if (p2cr != null && p2cr.getOffset() != crOffset) {
skipResult = OkToSkipResult.SKIP_RECORD;
}
}
}
} else {
int len = dis.readInt();
oplogOffset = dis.getCount();
crOffset = oplogOffset;
valueLength = len;
if (!skipResult.skip()) {
if (isPhase2()) {
p2cr = (CompactionRecord) getRecoveryMap().get(oplogKeyId);
if (p2cr != null && p2cr.getOffset() != crOffset) {
skipResult = OkToSkipResult.SKIP_RECORD;
}
}
}
if (!skipResult.skip() && recoverValue) {
byte[] valueBytes = new byte[len];
dis.readFully(valueBytes);
objValue = valueBytes;
validateValue(valueBytes, userBits, version, in);
} else {
forceSkipBytes(dis, len);
}
}
readEndOfRecord(dis);
if (drs != null && tag != null) {
// Update the RVV with the new entry
// This must be done after reading the end of record to make sure
// we don't have a corrupt record. See bug #45538
drs.recordRecoveredVersionTag(tag);
}
incTotalCount();
if (!skipResult.skip()) {
Object key = getRecoveryMap().get(oplogKeyId);
// was previously skipped. Check the skipped bytes map for the key.
if (key == null) {
byte[] keyBytes = (byte[]) skippedKeyBytes.get(oplogKeyId);
if (keyBytes != null) {
key = deserializeKey(keyBytes, version, in);
}
}
if (isPersistRecoveryDebugEnabled) {
logger.trace(LogMarker.PERSIST_RECOVERY, "readModifyEntry oplogKeyId=<{}> drId={} key=<{}> userBits={} oplogOffset={} tag={} valueLen={}", oplogKeyId, drId, key, userBits, oplogOffset, tag, valueLength);
}
// will now be a MOD_ENTRY_WITH_KEY record.
assert key != null;
if (getParent().isOfflineCompacting()) {
if (isPhase1()) {
CompactionRecord cr = (CompactionRecord) key;
// we are going to compact the previous record away
incSkipped();
cr.update(crOffset);
} else {
// phase2
Assert.assertTrue(p2cr != null, "First pass did not find create a compaction record");
getOplogSet().getChild().copyForwardForOfflineCompact(oplogKeyId, p2cr.getKeyBytes(), objValue, userBits, drId, tag);
if (isPersistRecoveryDebugEnabled) {
logger.trace(LogMarker.PERSIST_RECOVERY, "readModifyEntry copyForward oplogKeyId=<{}>", oplogKeyId);
}
// add it to the deletedIds set so we will ignore it in earlier oplogs
deletedIds.add(oplogKeyId);
}
} else {
// Check the actual region to see if it has this key from
// a previous recovered oplog.
DiskEntry de = drs.getDiskEntry(key);
// of this entry was cleared through the RVV clear.
if (de == null) {
DiskRegionView drv = drs.getDiskRegionView();
// and create an entry
DiskEntry.RecoveredEntry re = createRecoveredEntry(objValue, valueLength, userBits, getOplogId(), oplogOffset, oplogKeyId, recoverValue, version, in);
if (tag != null) {
re.setVersionTag(tag);
}
if (isPersistRecoveryDebugEnabled) {
logger.trace(LogMarker.PERSIST_RECOVERY, "readModEntryWK init oplogKeyId=<{}> drId={} key=<{}> oplogOffset={} userBits={} valueLen={} tag={}", oplogKeyId, drId, key, oplogOffset, userBits, valueLength, tag);
}
initRecoveredEntry(drv, drs.initializeRecoveredEntry(key, re));
drs.getDiskRegionView().incRecoveredEntryCount();
this.stats.incRecoveredEntryCreates();
} else {
DiskEntry.RecoveredEntry re = createRecoveredEntry(objValue, valueLength, userBits, getOplogId(), oplogOffset, oplogKeyId, recoverValue, version, in);
if (tag != null) {
re.setVersionTag(tag);
}
de = drs.updateRecoveredEntry(key, re);
updateRecoveredEntry(drs.getDiskRegionView(), de, re);
this.stats.incRecoveredEntryUpdates();
}
}
} else {
if (isPersistRecoveryDebugEnabled) {
logger.trace(LogMarker.PERSIST_RECOVERY, "skipping readModifyEntry oplogKeyId=<{}> drId={}", oplogKeyId, drId);
}
}
}
Aggregations