Search in sources :

Example 26 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class Oplog method readModifyEntry.

/**
   * Reads an oplog entry of type Modify
   * 
   * @param dis DataInputStream from which the oplog is being read
   * @param opcode byte whether the id is short/int/long
   */
private void readModifyEntry(CountingDataInputStream dis, byte opcode, OplogEntryIdSet deletedIds, boolean recoverValue, LocalRegion currentRegion, Version version, ByteArrayDataInput in, HeapDataOutputStream hdos) throws IOException {
    final boolean isPersistRecoveryDebugEnabled = logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY);
    long oplogOffset = -1;
    byte userBits = dis.readByte();
    int idByteCount = (opcode - OPLOG_MOD_ENTRY_1ID) + 1;
    // long debugRecoverModEntryId = this.recoverModEntryId;
    long oplogKeyId = getModEntryId(dis, idByteCount);
    // long debugOplogKeyId = dis.readLong();
    // //assert oplogKeyId == debugOplogKeyId
    // // : "expected=" + debugOplogKeyId + " actual=" + oplogKeyId
    // assert debugRecoverModEntryId == debugOplogKeyId
    // : "expected=" + debugOplogKeyId + " actual=" + debugRecoverModEntryId
    // + " idByteCount=" + idByteCount
    // + " delta=" + this.lastDelta;
    long drId = DiskInitFile.readDiskRegionID(dis);
    DiskRecoveryStore drs = getOplogSet().getCurrentlyRecovering(drId);
    // read versions
    VersionTag tag = null;
    if (EntryBits.isWithVersions(userBits)) {
        tag = readVersionsFromOplog(dis);
    } else if (getParent().isUpgradeVersionOnly() && drs != null) {
        tag = this.createDummyTag(drs);
        userBits = EntryBits.setWithVersions(userBits, true);
    }
    if (drs != null && !drs.getDiskRegionView().getFlags().contains(DiskRegionFlag.IS_WITH_VERSIONING)) {
        // 50044 Remove version tag from entry if we don't want versioning for
        // this region
        tag = null;
        userBits = EntryBits.setWithVersions(userBits, false);
    }
    OkToSkipResult skipResult = okToSkipModifyRecord(deletedIds, drId, drs, oplogKeyId, false, tag);
    if (skipResult.skip()) {
        if (!isPhase2()) {
            incSkipped();
            this.stats.incRecoveryRecordsSkipped();
        }
    } else if (recoverValue && !getParent().isOfflineCompacting()) {
        recoverValue = recoverLruValue(drs);
    }
    byte[] objValue = null;
    int valueLength = 0;
    CompactionRecord p2cr = null;
    long crOffset;
    if (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits)) {
        if (EntryBits.isInvalid(userBits)) {
            objValue = DiskEntry.INVALID_BYTES;
        } else if (EntryBits.isTombstone(userBits)) {
            objValue = DiskEntry.TOMBSTONE_BYTES;
        } else {
            objValue = DiskEntry.LOCAL_INVALID_BYTES;
        }
        crOffset = dis.getCount();
        if (!skipResult.skip()) {
            if (isPhase2()) {
                p2cr = (CompactionRecord) getRecoveryMap().get(oplogKeyId);
                if (p2cr != null && p2cr.getOffset() != crOffset) {
                    skipResult = OkToSkipResult.SKIP_RECORD;
                }
            }
        }
    } else {
        int len = dis.readInt();
        oplogOffset = dis.getCount();
        crOffset = oplogOffset;
        valueLength = len;
        if (!skipResult.skip()) {
            if (isPhase2()) {
                p2cr = (CompactionRecord) getRecoveryMap().get(oplogKeyId);
                if (p2cr != null && p2cr.getOffset() != crOffset) {
                    skipResult = OkToSkipResult.SKIP_RECORD;
                }
            }
        }
        if (!skipResult.skip() && recoverValue) {
            byte[] valueBytes = new byte[len];
            dis.readFully(valueBytes);
            objValue = valueBytes;
            validateValue(valueBytes, userBits, version, in);
        } else {
            forceSkipBytes(dis, len);
        }
    }
    readEndOfRecord(dis);
    if (drs != null && tag != null) {
        // Update the RVV with the new entry
        // This must be done after reading the end of record to make sure
        // we don't have a corrupt record. See bug #45538
        drs.recordRecoveredVersionTag(tag);
    }
    incTotalCount();
    if (!skipResult.skip()) {
        Object key = getRecoveryMap().get(oplogKeyId);
        // was previously skipped. Check the skipped bytes map for the key.
        if (key == null) {
            byte[] keyBytes = (byte[]) skippedKeyBytes.get(oplogKeyId);
            if (keyBytes != null) {
                key = deserializeKey(keyBytes, version, in);
            }
        }
        if (isPersistRecoveryDebugEnabled) {
            logger.trace(LogMarker.PERSIST_RECOVERY, "readModifyEntry oplogKeyId=<{}> drId={} key=<{}> userBits={} oplogOffset={} tag={} valueLen={}", oplogKeyId, drId, key, userBits, oplogOffset, tag, valueLength);
        }
        // will now be a MOD_ENTRY_WITH_KEY record.
        assert key != null;
        if (getParent().isOfflineCompacting()) {
            if (isPhase1()) {
                CompactionRecord cr = (CompactionRecord) key;
                // we are going to compact the previous record away
                incSkipped();
                cr.update(crOffset);
            } else {
                // phase2
                Assert.assertTrue(p2cr != null, "First pass did not find create a compaction record");
                getOplogSet().getChild().copyForwardForOfflineCompact(oplogKeyId, p2cr.getKeyBytes(), objValue, userBits, drId, tag);
                if (isPersistRecoveryDebugEnabled) {
                    logger.trace(LogMarker.PERSIST_RECOVERY, "readModifyEntry copyForward oplogKeyId=<{}>", oplogKeyId);
                }
                // add it to the deletedIds set so we will ignore it in earlier oplogs
                deletedIds.add(oplogKeyId);
            }
        } else {
            // Check the actual region to see if it has this key from
            // a previous recovered oplog.
            DiskEntry de = drs.getDiskEntry(key);
            // of this entry was cleared through the RVV clear.
            if (de == null) {
                DiskRegionView drv = drs.getDiskRegionView();
                // and create an entry
                DiskEntry.RecoveredEntry re = createRecoveredEntry(objValue, valueLength, userBits, getOplogId(), oplogOffset, oplogKeyId, recoverValue, version, in);
                if (tag != null) {
                    re.setVersionTag(tag);
                }
                if (isPersistRecoveryDebugEnabled) {
                    logger.trace(LogMarker.PERSIST_RECOVERY, "readModEntryWK init oplogKeyId=<{}> drId={} key=<{}> oplogOffset={} userBits={} valueLen={} tag={}", oplogKeyId, drId, key, oplogOffset, userBits, valueLength, tag);
                }
                initRecoveredEntry(drv, drs.initializeRecoveredEntry(key, re));
                drs.getDiskRegionView().incRecoveredEntryCount();
                this.stats.incRecoveredEntryCreates();
            } else {
                DiskEntry.RecoveredEntry re = createRecoveredEntry(objValue, valueLength, userBits, getOplogId(), oplogOffset, oplogKeyId, recoverValue, version, in);
                if (tag != null) {
                    re.setVersionTag(tag);
                }
                de = drs.updateRecoveredEntry(key, re);
                updateRecoveredEntry(drs.getDiskRegionView(), de, re);
                this.stats.incRecoveredEntryUpdates();
            }
        }
    } else {
        if (isPersistRecoveryDebugEnabled) {
            logger.trace(LogMarker.PERSIST_RECOVERY, "skipping readModifyEntry oplogKeyId=<{}> drId={}", oplogKeyId, drId);
        }
    }
}
Also used : DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) DiskRecoveryStore(org.apache.geode.internal.cache.persistence.DiskRecoveryStore) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) StoredObject(org.apache.geode.internal.offheap.StoredObject)

Example 27 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class Oplog method recoverValuesIfNeeded.

/**
   * This method is called by the async value recovery task to recover the values from the crf if
   * the keys were recovered from the krf.
   */
public void recoverValuesIfNeeded(Map<Long, DiskRecoveryStore> diskRecoveryStores) {
    // Early out if we start closing the parent.
    if (getParent().isClosing()) {
        return;
    }
    List<KRFEntry> sortedLiveEntries;
    HashMap<Long, DiskRegionInfo> targetRegions = new HashMap<Long, DiskRegionInfo>(this.regionMap);
    synchronized (diskRecoveryStores) {
        Iterator<DiskRecoveryStore> itr = diskRecoveryStores.values().iterator();
        while (itr.hasNext()) {
            DiskRecoveryStore store = itr.next();
            if (isLruValueRecoveryDisabled(store) || store.lruLimitExceeded()) {
                itr.remove();
            }
        }
        // Get the a sorted list of live entries from the target regions
        targetRegions.keySet().retainAll(diskRecoveryStores.keySet());
    }
    sortedLiveEntries = getSortedLiveEntries(targetRegions.values());
    if (sortedLiveEntries == null) {
        // There are no live entries in this oplog to recover.
        return;
    }
    final ByteArrayDataInput in = new ByteArrayDataInput();
    for (KRFEntry entry : sortedLiveEntries) {
        // Early out if we start closing the parent.
        if (getParent().isClosing()) {
            return;
        }
        DiskEntry diskEntry = entry.getDiskEntry();
        DiskRegionView diskRegionView = entry.getDiskRegionView();
        long diskRegionId = diskRegionView.getId();
        synchronized (diskRecoveryStores) {
            DiskRecoveryStore diskRecoveryStore = diskRecoveryStores.get(diskRegionId);
            if (diskRecoveryStore == null) {
                continue;
            }
            // Reset the disk region view because it may have changed
            // due to the region being created.
            diskRegionView = diskRecoveryStore.getDiskRegionView();
            if (diskRegionView == null) {
                continue;
            }
            if (diskRecoveryStore.lruLimitExceeded()) {
                diskRecoveryStores.remove(diskRegionId);
                continue;
            }
            if (diskRegionView.isEntriesMapIncompatible()) {
                // Refetch the disk entry because it may have changed due to copying
                // an incompatible region map
                diskEntry = (DiskEntry) diskRecoveryStore.getRegionMap().getEntryInVM(diskEntry.getKey());
                if (diskEntry == null) {
                    continue;
                }
            }
            synchronized (diskEntry) {
                // Make sure the entry hasn't been modified
                if (diskEntry.getDiskId() != null && diskEntry.getDiskId().getOplogId() == oplogId) {
                    try {
                        DiskEntry.Helper.recoverValue(diskEntry, getOplogId(), diskRecoveryStore, in);
                    } catch (RegionDestroyedException ignore) {
                        // This region has been destroyed, stop recovering from it.
                        diskRecoveryStores.remove(diskRegionId);
                    }
                }
            }
        }
    }
}
Also used : Long2ObjectOpenHashMap(it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap) Int2ObjectOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) ByteArrayDataInput(org.apache.geode.internal.ByteArrayDataInput) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) DiskRecoveryStore(org.apache.geode.internal.cache.persistence.DiskRecoveryStore) AtomicLong(java.util.concurrent.atomic.AtomicLong)

Aggregations

DiskRegionView (org.apache.geode.internal.cache.persistence.DiskRegionView)27 ArrayList (java.util.ArrayList)5 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 File (java.io.File)3 HashMap (java.util.HashMap)3 StatisticsFactory (org.apache.geode.StatisticsFactory)3 DiskRecoveryStore (org.apache.geode.internal.cache.persistence.DiskRecoveryStore)3 PdxType (org.apache.geode.pdx.internal.PdxType)3 Int2ObjectOpenHashMap (it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap)2 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)2 StoredObject (org.apache.geode.internal.offheap.StoredObject)2 EnumInfo (org.apache.geode.pdx.internal.EnumInfo)2 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)2 Expectations (org.jmock.Expectations)2 Test (org.junit.Test)2 IntOpenHashSet (it.unimi.dsi.fastutil.ints.IntOpenHashSet)1 Long2ObjectOpenHashMap (it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap)1 LongOpenHashSet (it.unimi.dsi.fastutil.longs.LongOpenHashSet)1 IOException (java.io.IOException)1 HashSet (java.util.HashSet)1