use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskStoreImpl method destroyRegion.
/**
* Destroy a region which has not been created.
*
* @param regName the name of the region to destroy
*/
public void destroyRegion(String regName) {
DiskRegionView drv = getDiskInitFile().getDiskRegionByName(regName);
if (drv == null) {
drv = getDiskInitFile().getDiskRegionByPrName(regName);
PRPersistentConfig prConfig = getDiskInitFile().getPersistentPR(regName);
if (drv == null && prConfig == null) {
throw new IllegalArgumentException("The disk store does not contain a region named: " + regName);
} else {
getDiskInitFile().destroyPRRegion(regName);
}
} else {
getDiskInitFile().endDestroyRegion(drv);
}
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class Oplog method compact.
/**
* Copy any live entries last stored in this oplog to the current oplog. No need to copy deletes
* in the drf. Backup only needs them until all the older crfs are empty.
*/
public int compact(OplogCompactor compactor) {
if (!needsCompaction()) {
// @todo check new logic that deals with not compacting oplogs
return 0;
// which have unrecovered regions
}
isCompactorThread.set(Boolean.TRUE);
assert calledByCompactorThread();
getParent().acquireCompactorReadLock();
try {
if (!compactor.keepCompactorRunning()) {
return 0;
}
lockCompactor();
try {
if (hasNoLiveValues()) {
handleNoLiveValues();
// do this while holding compactorLock
return 0;
}
// Start with a fresh wrapper on every compaction so that
// if previous run used some high memory byte array which was
// exceptional, it gets garbage collected.
long opStart = getStats().getStatTime();
BytesAndBitsForCompactor wrapper = new BytesAndBitsForCompactor();
DiskEntry de;
DiskEntry lastDe = null;
boolean compactFailed = /*
* getParent().getOwner().isDestroyed ||
*/
!compactor.keepCompactorRunning();
int totalCount = 0;
for (DiskRegionInfo dri : this.regionMap.values()) {
final DiskRegionView dr = dri.getDiskRegion();
if (dr == null)
continue;
boolean didCompact = false;
while ((de = dri.getNextLiveEntry()) != null) {
if (/*
* getParent().getOwner().isDestroyed ||
*/
!compactor.keepCompactorRunning()) {
compactFailed = true;
break;
}
if (lastDe != null) {
if (lastDe == de) {
throw new IllegalStateException("compactor would have gone into infinite loop");
}
assert lastDe != de;
}
lastDe = de;
didCompact = false;
synchronized (de) {
// fix for bug 41797
DiskId did = de.getDiskId();
assert did != null;
synchronized (did) {
long oplogId = did.getOplogId();
if (oplogId != getOplogId()) {
continue;
}
boolean toCompact = getBytesAndBitsForCompaction(dr, de, wrapper);
if (toCompact) {
if (oplogId != did.getOplogId()) {
// skip this guy his oplogId changed
if (!wrapper.isReusable()) {
wrapper = new BytesAndBitsForCompactor();
} else if (wrapper.getOffHeapData() != null) {
wrapper.setOffHeapData(null, (byte) 0);
}
continue;
}
// write it to the current oplog
getOplogSet().getChild().copyForwardModifyForCompact(dr, de, wrapper);
// the did's oplogId will now be set to the current active oplog
didCompact = true;
}
}
// did
}
// de
if (didCompact) {
totalCount++;
getStats().endCompactionUpdate(opStart);
opStart = getStats().getStatTime();
// recreate the wrapper
if (!wrapper.isReusable()) {
wrapper = new BytesAndBitsForCompactor();
}
}
}
}
if (!compactFailed) {
// Need to still remove the oplog even if it had nothing to compact.
handleNoLiveValues();
// We can't assert hasNoLiveValues() because a race condition exists
// in which our liveEntries list is empty but the liveCount has not
// yet been decremented.
}
return totalCount;
} finally {
unlockCompactor();
}
} finally {
getParent().releaseCompactorReadLock();
assert calledByCompactorThread();
isCompactorThread.remove();
}
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class Oplog method readModifyEntryWithKey.
/**
* Reads an oplog entry of type ModifyWithKey
*
* @param dis DataInputStream from which the oplog is being read
* @param opcode byte whether the id is short/int/long
*/
private void readModifyEntryWithKey(CountingDataInputStream dis, byte opcode, OplogEntryIdSet deletedIds, boolean recoverValue, final LocalRegion currentRegion, Version version, ByteArrayDataInput in, HeapDataOutputStream hdos) throws IOException {
long oplogOffset = -1;
byte userBits = dis.readByte();
int idByteCount = (opcode - OPLOG_MOD_ENTRY_WITH_KEY_1ID) + 1;
// long debugRecoverModEntryId = this.recoverModEntryId;
long oplogKeyId = getModEntryId(dis, idByteCount);
// long debugOplogKeyId = dis.readLong();
// //assert oplogKeyId == debugOplogKeyId
// // : "expected=" + debugOplogKeyId + " actual=" + oplogKeyId
// assert debugRecoverModEntryId == debugOplogKeyId
// : "expected=" + debugOplogKeyId + " actual=" + debugRecoverModEntryId
// + " idByteCount=" + idByteCount
// + " delta=" + this.lastDelta;
long drId = DiskInitFile.readDiskRegionID(dis);
DiskRecoveryStore drs = getOplogSet().getCurrentlyRecovering(drId);
// read version
VersionTag tag = null;
if (EntryBits.isWithVersions(userBits)) {
tag = readVersionsFromOplog(dis);
} else if (getParent().isUpgradeVersionOnly() && drs != null) {
tag = this.createDummyTag(drs);
userBits = EntryBits.setWithVersions(userBits, true);
}
if (drs != null && !drs.getDiskRegionView().getFlags().contains(DiskRegionFlag.IS_WITH_VERSIONING)) {
// 50044 Remove version tag from entry if we don't want versioning for
// this region
tag = null;
userBits = EntryBits.setWithVersions(userBits, false);
}
OkToSkipResult skipResult = okToSkipModifyRecord(deletedIds, drId, drs, oplogKeyId, true, tag);
if (skipResult.skip()) {
if (!isPhase2()) {
incSkipped();
this.stats.incRecoveryRecordsSkipped();
}
} else if (recoverValue && !getParent().isOfflineCompacting()) {
recoverValue = recoverLruValue(drs);
}
byte[] objValue = null;
int valueLength = 0;
CompactionRecord p2cr = null;
long crOffset;
if (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits)) {
if (EntryBits.isInvalid(userBits)) {
objValue = DiskEntry.INVALID_BYTES;
} else if (EntryBits.isTombstone(userBits)) {
objValue = DiskEntry.TOMBSTONE_BYTES;
} else {
objValue = DiskEntry.LOCAL_INVALID_BYTES;
}
crOffset = dis.getCount();
if (!skipResult.skip()) {
if (isPhase2()) {
p2cr = (CompactionRecord) getRecoveryMap().get(oplogKeyId);
if (p2cr != null && p2cr.getOffset() != crOffset) {
skipResult = OkToSkipResult.SKIP_RECORD;
}
}
}
} else {
int len = dis.readInt();
oplogOffset = dis.getCount();
crOffset = oplogOffset;
valueLength = len;
if (!skipResult.skip()) {
if (isPhase2()) {
p2cr = (CompactionRecord) getRecoveryMap().get(oplogKeyId);
if (p2cr != null && p2cr.getOffset() != crOffset) {
skipResult = OkToSkipResult.SKIP_RECORD;
}
}
}
if (!skipResult.skip() && recoverValue) {
byte[] valueBytes = new byte[len];
dis.readFully(valueBytes);
objValue = valueBytes;
validateValue(valueBytes, userBits, version, in);
} else {
forceSkipBytes(dis, len);
}
}
int keyLen = dis.readInt();
incTotalCount();
if (skipResult.skip()) {
if (skipResult.skipKey()) {
forceSkipBytes(dis, keyLen);
} else {
byte[] keyBytes = new byte[keyLen];
dis.readFully(keyBytes);
skippedKeyBytes.put(oplogKeyId, keyBytes);
}
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "skipping readModEntryWK init oplogKeyId=<{}> drId={}", oplogKeyId, drId);
}
} else {
// read the key
byte[] keyBytes = null;
if (isPhase2()) {
forceSkipBytes(dis, keyLen);
} else {
keyBytes = new byte[keyLen];
dis.readFully(keyBytes);
}
readEndOfRecord(dis);
if (drs != null && tag != null) {
// Update the RVV with the new entry
// This must be done after reading the end of record to make sure
// we don't have a corrupt record. See bug #45538
drs.recordRecoveredVersionTag(tag);
}
assert oplogKeyId >= 0;
if (getParent().isOfflineCompacting()) {
if (isPhase1()) {
CompactionRecord cr = new CompactionRecord(keyBytes, crOffset);
getRecoveryMap().put(oplogKeyId, cr);
drs.getDiskRegionView().incRecoveredEntryCount();
this.stats.incRecoveredEntryCreates();
} else {
// phase2
Assert.assertTrue(p2cr != null, "First pass did not find create a compaction record");
getOplogSet().getChild().copyForwardForOfflineCompact(oplogKeyId, p2cr.getKeyBytes(), objValue, userBits, drId, tag);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "readModifyEntryWithKey copyForward oplogKeyId=<{}>", oplogKeyId);
}
// add it to the deletedIds set so we will ignore it in earlier oplogs
deletedIds.add(oplogKeyId);
}
} else {
Object key = deserializeKey(keyBytes, version, in);
Object oldValue = getRecoveryMap().put(oplogKeyId, key);
if (oldValue != null) {
throw new AssertionError(LocalizedStrings.Oplog_DUPLICATE_CREATE.toLocalizedString(oplogKeyId));
}
// Check the actual region to see if it has this key from
// a previous recovered oplog.
DiskEntry de = drs.getDiskEntry(key);
if (de == null) {
DiskRegionView drv = drs.getDiskRegionView();
// and create an entry
DiskEntry.RecoveredEntry re = createRecoveredEntry(objValue, valueLength, userBits, getOplogId(), oplogOffset, oplogKeyId, recoverValue, version, in);
if (tag != null) {
re.setVersionTag(tag);
}
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "readModEntryWK init oplogKeyId=<{}> drId={} key={} oplogOffset={} userBits={} valueLen={} tag={}", oplogKeyId, drId, key, oplogOffset, userBits, valueLength, tag);
}
initRecoveredEntry(drv, drs.initializeRecoveredEntry(key, re));
drs.getDiskRegionView().incRecoveredEntryCount();
this.stats.incRecoveredEntryCreates();
} else {
DiskId curdid = de.getDiskId();
assert curdid.getOplogId() != getOplogId() : "Mutiple ModEntryWK in the same oplog for getOplogId()=" + getOplogId() + " , curdid.getOplogId()=" + curdid.getOplogId() + " , for drId=" + drId + " , key=" + key;
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "ignore readModEntryWK because getOplogId()={} != curdid.getOplogId()={} for drId={} key={}", getOplogId(), curdid.getOplogId(), drId, key);
}
// de = drs.updateRecoveredEntry(key, re);
// updateRecoveredEntry(drv, de, re);
// this.stats.incRecoveredEntryUpdates();
}
}
}
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskInitFileJUnitTest method testCanonicalIds.
/**
* Test the behavior of canonical ids in the init file.
*/
@Test
public void testCanonicalIds() {
// create a mock statistics factory for creating directory holders
final StatisticsFactory sf = context.mock(StatisticsFactory.class);
context.checking(new Expectations() {
{
ignoring(sf);
}
});
// Create a mock disk store impl. All we need to do is return
// this init file directory.
final DiskStoreImpl parent = context.mock(DiskStoreImpl.class);
context.checking(new Expectations() {
{
allowing(parent).getInfoFileDir();
will(returnValue(new DirectoryHolder(sf, testDirectory, 0, 0)));
ignoring(parent);
}
});
// Create an init file and add some canonical ids
DiskInitFile dif = new DiskInitFile("testFile", parent, false, Collections.<File>emptySet());
assertEquals(null, dif.getCanonicalObject(5));
assertNull(dif.getCanonicalObject(0));
int id1 = dif.getOrCreateCanonicalId("object1");
int id2 = dif.getOrCreateCanonicalId("object2");
assertEquals("object1", dif.getCanonicalObject(id1));
assertEquals("object2", dif.getCanonicalObject(id2));
assertEquals(id2, dif.getOrCreateCanonicalId("object2"));
// Add a mock region to the init file so it doesn't
// delete the file when the init file is closed
final DiskRegionView drv = context.mock(DiskRegionView.class);
context.checking(new Expectations() {
{
ignoring(drv);
}
});
dif.createRegion(drv);
// close the init file
dif.close();
// recover the init file from disk
dif = new DiskInitFile("testFile", parent, true, Collections.<File>emptySet());
// make sure we can recover the ids from disk
assertEquals("object1", dif.getCanonicalObject(id1));
assertEquals("object2", dif.getCanonicalObject(id2));
assertEquals(id2, dif.getOrCreateCanonicalId("object2"));
// Make sure we can add new ids
int id3 = dif.getOrCreateCanonicalId("object3");
assertTrue(id3 > id2);
assertEquals("object1", dif.getCanonicalObject(id1));
assertEquals("object2", dif.getCanonicalObject(id2));
assertEquals("object3", dif.getCanonicalObject(id3));
dif.close();
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskStoreImpl method pdxDeleteField.
private Collection<PdxType> pdxDeleteField(String className, String fieldName) throws IOException {
// Since we are recovering a disk store, the cast from DiskRegionView -->
// PlaceHolderDiskRegion
// and from RegionEntry --> DiskEntry should be ok.
// In offline mode, we need to schedule the regions to be recovered
// explicitly.
DiskRegionView foundPdx = null;
for (DiskRegionView drv : getKnown()) {
if (drv.getName().equals(PeerTypeRegistration.REGION_FULL_PATH)) {
foundPdx = drv;
scheduleForRecovery((PlaceHolderDiskRegion) drv);
}
}
if (foundPdx == null) {
throw new IllegalStateException("The disk store does not contain any PDX types.");
}
recoverRegionsThatAreReady();
PersistentOplogSet oplogSet = (PersistentOplogSet) getOplogSet(foundPdx);
ArrayList<PdxType> result = new ArrayList<PdxType>();
for (RegionEntry re : foundPdx.getRecoveredEntryMap().regionEntries()) {
Object value = re._getValueRetain(foundPdx, true);
if (Token.isRemoved(value)) {
continue;
}
if (value instanceof CachedDeserializable) {
value = ((CachedDeserializable) value).getDeserializedForReading();
}
if (value instanceof EnumInfo) {
// nothing to delete in an enum
continue;
}
PdxType type = (PdxType) value;
if (type.getClassName().equals(className)) {
PdxField field = type.getPdxField(fieldName);
if (field != null) {
field.setDeleted(true);
type.setHasDeletedField(true);
result.add(type);
oplogSet.offlineModify(foundPdx, (DiskEntry) re, BlobHelper.serializeToBlob(type), true);
}
}
}
return result;
}
Aggregations