Search in sources :

Example 11 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskStoreImpl method scheduleForRecovery.

void scheduleForRecovery(DiskRecoveryStore drs) {
    DiskRegionView dr = drs.getDiskRegionView();
    PersistentOplogSet oplogSet = getPersistentOplogSet(dr);
    oplogSet.scheduleForRecovery(drs);
}
Also used : DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 12 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskStoreImpl method initializeOwner.

/**
   * Initializes the contents of any regions on this DiskStore that have been registered but are not
   * yet initialized.
   */
void initializeOwner(LocalRegion lr) {
    DiskRegion dr = lr.getDiskRegion();
    // We don't need to do recovery for overflow regions.
    if (!lr.getDataPolicy().withPersistence() || !dr.isRecreated()) {
        return;
    }
    // while we are copying the entry map.
    synchronized (currentAsyncValueRecoveryMap) {
        DiskRegionView drv = lr.getDiskRegionView();
        if (drv.getRecoveredEntryMap() != null) {
            PersistentOplogSet oplogSet = getPersistentOplogSet(drv);
            // acquire CompactorWriteLock only if the region attributes for the
            // real region are different from the place holder region's
            boolean releaseCompactorWriteLock = false;
            if (drv.isEntriesMapIncompatible()) {
                // fix bug #51097 to prevent concurrent compaction
                acquireCompactorWriteLock();
                releaseCompactorWriteLock = true;
            }
            try {
                drv.copyExistingRegionMap(lr);
                getStats().incUncreatedRecoveredRegions(-1);
                for (Oplog oplog : oplogSet.getAllOplogs()) {
                    if (oplog != null) {
                        oplog.updateDiskRegion(lr.getDiskRegionView());
                    }
                }
            } finally {
                if (releaseCompactorWriteLock) {
                    releaseCompactorWriteLock();
                }
            }
            if (currentAsyncValueRecoveryMap.containsKey(drv.getId())) {
                currentAsyncValueRecoveryMap.put(drv.getId(), lr);
            }
            return;
        }
    }
    scheduleForRecovery(lr);
    try {
        // acquireReadLock(dr);
        // gotLock = true;
        recoverRegionsThatAreReady();
    } catch (DiskAccessException dae) {
        // Asif:Just rethrow t
        throw dae;
    } catch (RuntimeException re) {
        // here
        throw new DiskAccessException("RuntimeException in initializing the disk store from the disk", re, this);
    }
// finally {
// if(gotLock) {
// releaseReadLock(dr);
// }
// }
}
Also used : DiskAccessException(org.apache.geode.cache.DiskAccessException) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 13 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskStoreImpl method validate.

private void validate() {
    assert isValidating();
    // save memory @todo should Oplog make sure
    this.RECOVER_VALUES = false;
    // value is deserializable?
    this.liveEntryCount = 0;
    this.deadRecordCount = 0;
    for (DiskRegionView drv : getKnown()) {
        scheduleForRecovery(ValidatingDiskRegion.create(this, drv));
    }
    recoverRegionsThatAreReady();
    if (getDeadRecordCount() > 0) {
        System.out.println("Disk store contains " + getDeadRecordCount() + " compactable records.");
    }
    System.out.println("Total number of region entries in this disk store is: " + getLiveEntryCount());
}
Also used : DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 14 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskStoreImpl method pdxRename.

private Collection<Object> pdxRename(String oldBase, String newBase) throws IOException {
    // Since we are recovering a disk store, the cast from DiskRegionView -->
    // PlaceHolderDiskRegion
    // and from RegionEntry --> DiskEntry should be ok.
    // In offline mode, we need to schedule the regions to be recovered
    // explicitly.
    DiskRegionView foundPdx = null;
    for (DiskRegionView drv : getKnown()) {
        if (drv.getName().equals(PeerTypeRegistration.REGION_FULL_PATH)) {
            foundPdx = drv;
            scheduleForRecovery((PlaceHolderDiskRegion) drv);
        }
    }
    if (foundPdx == null) {
        throw new IllegalStateException("The disk store does not contain any PDX types.");
    }
    recoverRegionsThatAreReady();
    PersistentOplogSet oplogSet = (PersistentOplogSet) getOplogSet(foundPdx);
    ArrayList<Object> result = new ArrayList<>();
    Pattern pattern = createPdxRenamePattern(oldBase);
    for (RegionEntry re : foundPdx.getRecoveredEntryMap().regionEntries()) {
        Object value = re._getValueRetain(foundPdx, true);
        if (Token.isRemoved(value)) {
            continue;
        }
        if (value instanceof CachedDeserializable) {
            value = ((CachedDeserializable) value).getDeserializedForReading();
        }
        if (value instanceof EnumInfo) {
            EnumInfo ei = (EnumInfo) value;
            String newName = replacePdxRenamePattern(pattern, ei.getClassName(), newBase);
            if (newName != null) {
                ei.setClassName(newName);
                result.add(ei);
                oplogSet.offlineModify(foundPdx, (DiskEntry) re, BlobHelper.serializeToBlob(ei), true);
            }
        } else {
            PdxType type = (PdxType) value;
            String newName = replacePdxRenamePattern(pattern, type.getClassName(), newBase);
            if (newName != null) {
                type.setClassName(newName);
                result.add(type);
                oplogSet.offlineModify(foundPdx, (DiskEntry) re, BlobHelper.serializeToBlob(type), true);
            }
        }
    }
    return result;
}
Also used : Pattern(java.util.regex.Pattern) PersistentMemberPattern(org.apache.geode.internal.cache.persistence.PersistentMemberPattern) PdxType(org.apache.geode.pdx.internal.PdxType) EnumInfo(org.apache.geode.pdx.internal.EnumInfo) ArrayList(java.util.ArrayList) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 15 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskInitFile method cmnRegionConfig.

public void cmnRegionConfig(long drId, byte lruAlgorithm, byte lruAction, int lruLimit, int concurrencyLevel, int initialCapacity, float loadFactor, boolean statisticsEnabled, boolean isBucket, EnumSet<DiskRegionFlag> flags, String partitionName, int startingBucketId, String compressorClassName, boolean offHeap) {
    DiskRegionView dr = getDiskRegionById(drId);
    if (dr != null) {
        // name.
        if (Version.GFE_80.compareTo(currentRecoveredGFVersion()) > 0 && !dr.getName().contains("_SERIAL_GATEWAY_SENDER_QUEUE") && !dr.getName().contains("_PARALLEL__GATEWAY__SENDER__QUEUE")) {
            flags.add(DiskRegionFlag.IS_WITH_VERSIONING);
        }
        dr.setConfig(lruAlgorithm, lruAction, lruLimit, concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled, isBucket, flags, partitionName, startingBucketId, compressorClassName, offHeap);
        // Just count this as a live record even though it is possible
        // that we have an extra one due to the config changing while
        // we were offline.
        this.ifLiveRecordCount++;
        this.ifTotalRecordCount++;
    } else {
        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
            logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
        } else {
            throw new IllegalStateException("bad disk region id");
        }
    }
}
Also used : DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Aggregations

DiskRegionView (org.apache.geode.internal.cache.persistence.DiskRegionView)27 ArrayList (java.util.ArrayList)5 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 File (java.io.File)3 HashMap (java.util.HashMap)3 StatisticsFactory (org.apache.geode.StatisticsFactory)3 DiskRecoveryStore (org.apache.geode.internal.cache.persistence.DiskRecoveryStore)3 PdxType (org.apache.geode.pdx.internal.PdxType)3 Int2ObjectOpenHashMap (it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap)2 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)2 StoredObject (org.apache.geode.internal.offheap.StoredObject)2 EnumInfo (org.apache.geode.pdx.internal.EnumInfo)2 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)2 Expectations (org.jmock.Expectations)2 Test (org.junit.Test)2 IntOpenHashSet (it.unimi.dsi.fastutil.ints.IntOpenHashSet)1 Long2ObjectOpenHashMap (it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap)1 LongOpenHashSet (it.unimi.dsi.fastutil.longs.LongOpenHashSet)1 IOException (java.io.IOException)1 HashSet (java.util.HashSet)1