Search in sources :

Example 6 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskStoreImpl method destroyRegion.

/**
   * Destroy a region which has not been created.
   * 
   * @param regName the name of the region to destroy
   */
public void destroyRegion(String regName) {
    DiskRegionView drv = getDiskInitFile().getDiskRegionByName(regName);
    if (drv == null) {
        drv = getDiskInitFile().getDiskRegionByPrName(regName);
        PRPersistentConfig prConfig = getDiskInitFile().getPersistentPR(regName);
        if (drv == null && prConfig == null) {
            throw new IllegalArgumentException("The disk store does not contain a region named: " + regName);
        } else {
            getDiskInitFile().destroyPRRegion(regName);
        }
    } else {
        getDiskInitFile().endDestroyRegion(drv);
    }
}
Also used : PRPersistentConfig(org.apache.geode.internal.cache.persistence.PRPersistentConfig) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 7 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class Oplog method compact.

/**
   * Copy any live entries last stored in this oplog to the current oplog. No need to copy deletes
   * in the drf. Backup only needs them until all the older crfs are empty.
   */
public int compact(OplogCompactor compactor) {
    if (!needsCompaction()) {
        // @todo check new logic that deals with not compacting oplogs
        return 0;
    // which have unrecovered regions
    }
    isCompactorThread.set(Boolean.TRUE);
    assert calledByCompactorThread();
    getParent().acquireCompactorReadLock();
    try {
        if (!compactor.keepCompactorRunning()) {
            return 0;
        }
        lockCompactor();
        try {
            if (hasNoLiveValues()) {
                handleNoLiveValues();
                // do this while holding compactorLock
                return 0;
            }
            // Start with a fresh wrapper on every compaction so that
            // if previous run used some high memory byte array which was
            // exceptional, it gets garbage collected.
            long opStart = getStats().getStatTime();
            BytesAndBitsForCompactor wrapper = new BytesAndBitsForCompactor();
            DiskEntry de;
            DiskEntry lastDe = null;
            boolean compactFailed = /*
                                 * getParent().getOwner().isDestroyed ||
                                 */
            !compactor.keepCompactorRunning();
            int totalCount = 0;
            for (DiskRegionInfo dri : this.regionMap.values()) {
                final DiskRegionView dr = dri.getDiskRegion();
                if (dr == null)
                    continue;
                boolean didCompact = false;
                while ((de = dri.getNextLiveEntry()) != null) {
                    if (/*
                 * getParent().getOwner().isDestroyed ||
                 */
                    !compactor.keepCompactorRunning()) {
                        compactFailed = true;
                        break;
                    }
                    if (lastDe != null) {
                        if (lastDe == de) {
                            throw new IllegalStateException("compactor would have gone into infinite loop");
                        }
                        assert lastDe != de;
                    }
                    lastDe = de;
                    didCompact = false;
                    synchronized (de) {
                        // fix for bug 41797
                        DiskId did = de.getDiskId();
                        assert did != null;
                        synchronized (did) {
                            long oplogId = did.getOplogId();
                            if (oplogId != getOplogId()) {
                                continue;
                            }
                            boolean toCompact = getBytesAndBitsForCompaction(dr, de, wrapper);
                            if (toCompact) {
                                if (oplogId != did.getOplogId()) {
                                    // skip this guy his oplogId changed
                                    if (!wrapper.isReusable()) {
                                        wrapper = new BytesAndBitsForCompactor();
                                    } else if (wrapper.getOffHeapData() != null) {
                                        wrapper.setOffHeapData(null, (byte) 0);
                                    }
                                    continue;
                                }
                                // write it to the current oplog
                                getOplogSet().getChild().copyForwardModifyForCompact(dr, de, wrapper);
                                // the did's oplogId will now be set to the current active oplog
                                didCompact = true;
                            }
                        }
                    // did
                    }
                    // de
                    if (didCompact) {
                        totalCount++;
                        getStats().endCompactionUpdate(opStart);
                        opStart = getStats().getStatTime();
                        // recreate the wrapper
                        if (!wrapper.isReusable()) {
                            wrapper = new BytesAndBitsForCompactor();
                        }
                    }
                }
            }
            if (!compactFailed) {
                // Need to still remove the oplog even if it had nothing to compact.
                handleNoLiveValues();
            // We can't assert hasNoLiveValues() because a race condition exists
            // in which our liveEntries list is empty but the liveCount has not
            // yet been decremented.
            }
            return totalCount;
        } finally {
            unlockCompactor();
        }
    } finally {
        getParent().releaseCompactorReadLock();
        assert calledByCompactorThread();
        isCompactorThread.remove();
    }
}
Also used : DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 8 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class Oplog method readModifyEntryWithKey.

/**
   * Reads an oplog entry of type ModifyWithKey
   * 
   * @param dis DataInputStream from which the oplog is being read
   * @param opcode byte whether the id is short/int/long
   */
private void readModifyEntryWithKey(CountingDataInputStream dis, byte opcode, OplogEntryIdSet deletedIds, boolean recoverValue, final LocalRegion currentRegion, Version version, ByteArrayDataInput in, HeapDataOutputStream hdos) throws IOException {
    long oplogOffset = -1;
    byte userBits = dis.readByte();
    int idByteCount = (opcode - OPLOG_MOD_ENTRY_WITH_KEY_1ID) + 1;
    // long debugRecoverModEntryId = this.recoverModEntryId;
    long oplogKeyId = getModEntryId(dis, idByteCount);
    // long debugOplogKeyId = dis.readLong();
    // //assert oplogKeyId == debugOplogKeyId
    // // : "expected=" + debugOplogKeyId + " actual=" + oplogKeyId
    // assert debugRecoverModEntryId == debugOplogKeyId
    // : "expected=" + debugOplogKeyId + " actual=" + debugRecoverModEntryId
    // + " idByteCount=" + idByteCount
    // + " delta=" + this.lastDelta;
    long drId = DiskInitFile.readDiskRegionID(dis);
    DiskRecoveryStore drs = getOplogSet().getCurrentlyRecovering(drId);
    // read version
    VersionTag tag = null;
    if (EntryBits.isWithVersions(userBits)) {
        tag = readVersionsFromOplog(dis);
    } else if (getParent().isUpgradeVersionOnly() && drs != null) {
        tag = this.createDummyTag(drs);
        userBits = EntryBits.setWithVersions(userBits, true);
    }
    if (drs != null && !drs.getDiskRegionView().getFlags().contains(DiskRegionFlag.IS_WITH_VERSIONING)) {
        // 50044 Remove version tag from entry if we don't want versioning for
        // this region
        tag = null;
        userBits = EntryBits.setWithVersions(userBits, false);
    }
    OkToSkipResult skipResult = okToSkipModifyRecord(deletedIds, drId, drs, oplogKeyId, true, tag);
    if (skipResult.skip()) {
        if (!isPhase2()) {
            incSkipped();
            this.stats.incRecoveryRecordsSkipped();
        }
    } else if (recoverValue && !getParent().isOfflineCompacting()) {
        recoverValue = recoverLruValue(drs);
    }
    byte[] objValue = null;
    int valueLength = 0;
    CompactionRecord p2cr = null;
    long crOffset;
    if (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits)) {
        if (EntryBits.isInvalid(userBits)) {
            objValue = DiskEntry.INVALID_BYTES;
        } else if (EntryBits.isTombstone(userBits)) {
            objValue = DiskEntry.TOMBSTONE_BYTES;
        } else {
            objValue = DiskEntry.LOCAL_INVALID_BYTES;
        }
        crOffset = dis.getCount();
        if (!skipResult.skip()) {
            if (isPhase2()) {
                p2cr = (CompactionRecord) getRecoveryMap().get(oplogKeyId);
                if (p2cr != null && p2cr.getOffset() != crOffset) {
                    skipResult = OkToSkipResult.SKIP_RECORD;
                }
            }
        }
    } else {
        int len = dis.readInt();
        oplogOffset = dis.getCount();
        crOffset = oplogOffset;
        valueLength = len;
        if (!skipResult.skip()) {
            if (isPhase2()) {
                p2cr = (CompactionRecord) getRecoveryMap().get(oplogKeyId);
                if (p2cr != null && p2cr.getOffset() != crOffset) {
                    skipResult = OkToSkipResult.SKIP_RECORD;
                }
            }
        }
        if (!skipResult.skip() && recoverValue) {
            byte[] valueBytes = new byte[len];
            dis.readFully(valueBytes);
            objValue = valueBytes;
            validateValue(valueBytes, userBits, version, in);
        } else {
            forceSkipBytes(dis, len);
        }
    }
    int keyLen = dis.readInt();
    incTotalCount();
    if (skipResult.skip()) {
        if (skipResult.skipKey()) {
            forceSkipBytes(dis, keyLen);
        } else {
            byte[] keyBytes = new byte[keyLen];
            dis.readFully(keyBytes);
            skippedKeyBytes.put(oplogKeyId, keyBytes);
        }
        readEndOfRecord(dis);
        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
            logger.trace(LogMarker.PERSIST_RECOVERY, "skipping readModEntryWK init oplogKeyId=<{}> drId={}", oplogKeyId, drId);
        }
    } else {
        // read the key
        byte[] keyBytes = null;
        if (isPhase2()) {
            forceSkipBytes(dis, keyLen);
        } else {
            keyBytes = new byte[keyLen];
            dis.readFully(keyBytes);
        }
        readEndOfRecord(dis);
        if (drs != null && tag != null) {
            // Update the RVV with the new entry
            // This must be done after reading the end of record to make sure
            // we don't have a corrupt record. See bug #45538
            drs.recordRecoveredVersionTag(tag);
        }
        assert oplogKeyId >= 0;
        if (getParent().isOfflineCompacting()) {
            if (isPhase1()) {
                CompactionRecord cr = new CompactionRecord(keyBytes, crOffset);
                getRecoveryMap().put(oplogKeyId, cr);
                drs.getDiskRegionView().incRecoveredEntryCount();
                this.stats.incRecoveredEntryCreates();
            } else {
                // phase2
                Assert.assertTrue(p2cr != null, "First pass did not find create a compaction record");
                getOplogSet().getChild().copyForwardForOfflineCompact(oplogKeyId, p2cr.getKeyBytes(), objValue, userBits, drId, tag);
                if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                    logger.trace(LogMarker.PERSIST_RECOVERY, "readModifyEntryWithKey copyForward oplogKeyId=<{}>", oplogKeyId);
                }
                // add it to the deletedIds set so we will ignore it in earlier oplogs
                deletedIds.add(oplogKeyId);
            }
        } else {
            Object key = deserializeKey(keyBytes, version, in);
            Object oldValue = getRecoveryMap().put(oplogKeyId, key);
            if (oldValue != null) {
                throw new AssertionError(LocalizedStrings.Oplog_DUPLICATE_CREATE.toLocalizedString(oplogKeyId));
            }
            // Check the actual region to see if it has this key from
            // a previous recovered oplog.
            DiskEntry de = drs.getDiskEntry(key);
            if (de == null) {
                DiskRegionView drv = drs.getDiskRegionView();
                // and create an entry
                DiskEntry.RecoveredEntry re = createRecoveredEntry(objValue, valueLength, userBits, getOplogId(), oplogOffset, oplogKeyId, recoverValue, version, in);
                if (tag != null) {
                    re.setVersionTag(tag);
                }
                if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                    logger.trace(LogMarker.PERSIST_RECOVERY, "readModEntryWK init oplogKeyId=<{}> drId={} key={} oplogOffset={} userBits={} valueLen={} tag={}", oplogKeyId, drId, key, oplogOffset, userBits, valueLength, tag);
                }
                initRecoveredEntry(drv, drs.initializeRecoveredEntry(key, re));
                drs.getDiskRegionView().incRecoveredEntryCount();
                this.stats.incRecoveredEntryCreates();
            } else {
                DiskId curdid = de.getDiskId();
                assert curdid.getOplogId() != getOplogId() : "Mutiple ModEntryWK in the same oplog for getOplogId()=" + getOplogId() + " , curdid.getOplogId()=" + curdid.getOplogId() + " , for drId=" + drId + " , key=" + key;
                if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                    logger.trace(LogMarker.PERSIST_RECOVERY, "ignore readModEntryWK because getOplogId()={} != curdid.getOplogId()={} for drId={} key={}", getOplogId(), curdid.getOplogId(), drId, key);
                }
            // de = drs.updateRecoveredEntry(key, re);
            // updateRecoveredEntry(drv, de, re);
            // this.stats.incRecoveredEntryUpdates();
            }
        }
    }
}
Also used : DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) DiskRecoveryStore(org.apache.geode.internal.cache.persistence.DiskRecoveryStore) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) StoredObject(org.apache.geode.internal.offheap.StoredObject)

Example 9 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskInitFileJUnitTest method testCanonicalIds.

/**
   * Test the behavior of canonical ids in the init file.
   */
@Test
public void testCanonicalIds() {
    // create a mock statistics factory for creating directory holders
    final StatisticsFactory sf = context.mock(StatisticsFactory.class);
    context.checking(new Expectations() {

        {
            ignoring(sf);
        }
    });
    // Create a mock disk store impl. All we need to do is return
    // this init file directory.
    final DiskStoreImpl parent = context.mock(DiskStoreImpl.class);
    context.checking(new Expectations() {

        {
            allowing(parent).getInfoFileDir();
            will(returnValue(new DirectoryHolder(sf, testDirectory, 0, 0)));
            ignoring(parent);
        }
    });
    // Create an init file and add some canonical ids
    DiskInitFile dif = new DiskInitFile("testFile", parent, false, Collections.<File>emptySet());
    assertEquals(null, dif.getCanonicalObject(5));
    assertNull(dif.getCanonicalObject(0));
    int id1 = dif.getOrCreateCanonicalId("object1");
    int id2 = dif.getOrCreateCanonicalId("object2");
    assertEquals("object1", dif.getCanonicalObject(id1));
    assertEquals("object2", dif.getCanonicalObject(id2));
    assertEquals(id2, dif.getOrCreateCanonicalId("object2"));
    // Add a mock region to the init file so it doesn't
    // delete the file when the init file is closed
    final DiskRegionView drv = context.mock(DiskRegionView.class);
    context.checking(new Expectations() {

        {
            ignoring(drv);
        }
    });
    dif.createRegion(drv);
    // close the init file
    dif.close();
    // recover the init file from disk
    dif = new DiskInitFile("testFile", parent, true, Collections.<File>emptySet());
    // make sure we can recover the ids from disk
    assertEquals("object1", dif.getCanonicalObject(id1));
    assertEquals("object2", dif.getCanonicalObject(id2));
    assertEquals(id2, dif.getOrCreateCanonicalId("object2"));
    // Make sure we can add new ids
    int id3 = dif.getOrCreateCanonicalId("object3");
    assertTrue(id3 > id2);
    assertEquals("object1", dif.getCanonicalObject(id1));
    assertEquals("object2", dif.getCanonicalObject(id2));
    assertEquals("object3", dif.getCanonicalObject(id3));
    dif.close();
}
Also used : Expectations(org.jmock.Expectations) StatisticsFactory(org.apache.geode.StatisticsFactory) File(java.io.File) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 10 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskStoreImpl method pdxDeleteField.

private Collection<PdxType> pdxDeleteField(String className, String fieldName) throws IOException {
    // Since we are recovering a disk store, the cast from DiskRegionView -->
    // PlaceHolderDiskRegion
    // and from RegionEntry --> DiskEntry should be ok.
    // In offline mode, we need to schedule the regions to be recovered
    // explicitly.
    DiskRegionView foundPdx = null;
    for (DiskRegionView drv : getKnown()) {
        if (drv.getName().equals(PeerTypeRegistration.REGION_FULL_PATH)) {
            foundPdx = drv;
            scheduleForRecovery((PlaceHolderDiskRegion) drv);
        }
    }
    if (foundPdx == null) {
        throw new IllegalStateException("The disk store does not contain any PDX types.");
    }
    recoverRegionsThatAreReady();
    PersistentOplogSet oplogSet = (PersistentOplogSet) getOplogSet(foundPdx);
    ArrayList<PdxType> result = new ArrayList<PdxType>();
    for (RegionEntry re : foundPdx.getRecoveredEntryMap().regionEntries()) {
        Object value = re._getValueRetain(foundPdx, true);
        if (Token.isRemoved(value)) {
            continue;
        }
        if (value instanceof CachedDeserializable) {
            value = ((CachedDeserializable) value).getDeserializedForReading();
        }
        if (value instanceof EnumInfo) {
            // nothing to delete in an enum
            continue;
        }
        PdxType type = (PdxType) value;
        if (type.getClassName().equals(className)) {
            PdxField field = type.getPdxField(fieldName);
            if (field != null) {
                field.setDeleted(true);
                type.setHasDeletedField(true);
                result.add(type);
                oplogSet.offlineModify(foundPdx, (DiskEntry) re, BlobHelper.serializeToBlob(type), true);
            }
        }
    }
    return result;
}
Also used : PdxType(org.apache.geode.pdx.internal.PdxType) EnumInfo(org.apache.geode.pdx.internal.EnumInfo) ArrayList(java.util.ArrayList) PdxField(org.apache.geode.pdx.internal.PdxField) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Aggregations

DiskRegionView (org.apache.geode.internal.cache.persistence.DiskRegionView)27 ArrayList (java.util.ArrayList)5 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 File (java.io.File)3 HashMap (java.util.HashMap)3 StatisticsFactory (org.apache.geode.StatisticsFactory)3 DiskRecoveryStore (org.apache.geode.internal.cache.persistence.DiskRecoveryStore)3 PdxType (org.apache.geode.pdx.internal.PdxType)3 Int2ObjectOpenHashMap (it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap)2 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)2 StoredObject (org.apache.geode.internal.offheap.StoredObject)2 EnumInfo (org.apache.geode.pdx.internal.EnumInfo)2 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)2 Expectations (org.jmock.Expectations)2 Test (org.junit.Test)2 IntOpenHashSet (it.unimi.dsi.fastutil.ints.IntOpenHashSet)1 Long2ObjectOpenHashMap (it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap)1 LongOpenHashSet (it.unimi.dsi.fastutil.longs.LongOpenHashSet)1 IOException (java.io.IOException)1 HashSet (java.util.HashSet)1