Search in sources :

Example 21 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskInitFile method cmdOfflineAndEqualMemberId.

public void cmdOfflineAndEqualMemberId(long drId, PersistentMemberID pmid) {
    DiskRegionView dr = getDiskRegionById(drId);
    if (dr != null) {
        if (this.parent.upgradeVersionOnly && Version.GFE_70.compareTo(currentRecoveredGFVersion()) > 0) {
            dr.addOnlineMember(pmid);
            if (dr.rmOfflineMember(pmid)) {
                this.ifLiveRecordCount--;
            }
        } else {
            dr.addOfflineAndEqualMember(pmid);
            if (dr.rmOnlineMember(pmid) || dr.rmOfflineMember(pmid)) {
                this.ifLiveRecordCount--;
            }
        }
        this.ifLiveRecordCount++;
        this.ifTotalRecordCount++;
    } else {
        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
            logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
        } else {
            throw new IllegalStateException("bad disk region id");
        }
    }
}
Also used : DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 22 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskInitFile method writeLiveData.

/**
   * Write all live data to the init file
   */
private void writeLiveData() {
    lock.lock(false);
    try {
        this.ifLiveRecordCount = 0;
        this.ifTotalRecordCount = 0;
        writeDiskStoreId();
        saveGemfireVersion();
        saveInstantiators();
        saveDataSerializers();
        saveCrfIds();
        saveDrfIds();
        saveKrfIds();
        for (DiskRegionView drv : this.drMap.values()) {
            writeLiveData(drv);
        }
        for (DiskRegionView drv : this.parent.getDiskRegions()) {
            writeLiveData(drv);
        }
        savePRConfigs();
        saveCanonicalIds();
        saveRevokedMembers();
        if (logger.isDebugEnabled()) {
            logger.debug("After compacting init file lrc={} trc={}", this.ifLiveRecordCount, this.ifTotalRecordCount);
        }
    } finally {
        lock.unlock();
    }
}
Also used : DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 23 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskInitFile method cmnClearRegion.

public void cmnClearRegion(long drId, ConcurrentHashMap<DiskStoreID, RegionVersionHolder<DiskStoreID>> memberToVersion) {
    DiskRegionView drv = getDiskRegionById(drId);
    if (drv.getClearRVV() == null) {
        this.ifLiveRecordCount++;
    }
    // otherwise previous clear is cancelled so don't change liveRecordCount
    this.ifTotalRecordCount++;
    DiskStoreID ownerId = parent.getDiskStoreID();
    // Create a fake RVV for clear purposes. We only need to memberToVersion information
    RegionVersionHolder<DiskStoreID> ownerExceptions = memberToVersion.remove(ownerId);
    long ownerVersion = ownerExceptions == null ? 0 : ownerExceptions.getVersion();
    RegionVersionVector rvv = new DiskRegionVersionVector(ownerId, memberToVersion, ownerVersion, new ConcurrentHashMap(), 0L, false, ownerExceptions);
    drv.setClearRVV(rvv);
}
Also used : DiskRegionVersionVector(org.apache.geode.internal.cache.versions.DiskRegionVersionVector) DiskRegionVersionVector(org.apache.geode.internal.cache.versions.DiskRegionVersionVector) RegionVersionVector(org.apache.geode.internal.cache.versions.RegionVersionVector) DiskStoreID(org.apache.geode.internal.cache.persistence.DiskStoreID) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 24 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskInitFile method cmnRmMemberId.

public void cmnRmMemberId(long drId, PersistentMemberID pmid) {
    DiskRegionView dr = getDiskRegionById(drId);
    if (dr != null) {
        if (!dr.rmOnlineMember(pmid)) {
            if (!dr.rmOfflineMember(pmid)) {
                dr.rmEqualMember(pmid);
            }
        }
        // since we removed a member don't inc the live count
        // In fact decrement it by one since both this record
        // and the previous one are both garbage.
        this.ifLiveRecordCount--;
        this.ifTotalRecordCount++;
    } else {
        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
            logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
        } else {
            throw new IllegalStateException("bad disk region id");
        }
    }
}
Also used : DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 25 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class HeapLRUCapacityController method createLRUHelper.

@Override
protected EnableLRU createLRUHelper() {
    return new AbstractEnableLRU() {

        /**
       * Indicate what kind of <code>EvictionAlgorithm</code> this helper implements
       */
        public EvictionAlgorithm getEvictionAlgorithm() {
            return EvictionAlgorithm.LRU_HEAP;
        }

        /**
       * As far as we're concerned all entries have the same size
       */
        public int entrySize(Object key, Object value) throws IllegalArgumentException {
            /*
         * if (value != null) { return 1; } else { return 0; }
         */
            if (value == Token.TOMBSTONE) {
                return 0;
            }
            int size = HeapLRUCapacityController.this.getPerEntryOverhead();
            size += sizeof(key);
            size += sizeof(value);
            return size;
        }

        /**
       * In addition to initializing the statistics, create an evictor thread to periodically evict
       * the LRU entry.
       */
        @Override
        public LRUStatistics initStats(Object region, StatisticsFactory sf) {
            setRegionName(region);
            final LRUStatistics stats = new HeapLRUStatistics(sf, getRegionName(), this);
            setStats(stats);
            return stats;
        }

        public StatisticsType getStatisticsType() {
            return statType;
        }

        public String getStatisticsName() {
            return "HeapLRUStatistics";
        }

        public int getLimitStatId() {
            throw new UnsupportedOperationException("Limit not used with this LRU type");
        }

        public int getCountStatId() {
            return statType.nameToId("entryBytes");
        }

        public int getEvictionsStatId() {
            return statType.nameToId("lruEvictions");
        }

        public int getDestroysStatId() {
            return statType.nameToId("lruDestroys");
        }

        public int getDestroysLimitStatId() {
            return statType.nameToId("lruDestroysLimit");
        }

        public int getEvaluationsStatId() {
            return statType.nameToId("lruEvaluations");
        }

        public int getGreedyReturnsStatId() {
            return statType.nameToId("lruGreedyReturns");
        }

        /**
       * Okay, deep breath. Instead of basing the LRU calculation on the number of entries in the
       * region or on their "size" (which turned out to be incorrectly estimated in the general
       * case), we use the amount of memory currently in use. If the amount of memory current in use
       * {@linkplain Runtime#maxMemory max memory} - {@linkplain Runtime#freeMemory free memory} is
       * greater than the overflow threshold, then we evict the LRU entry.
       */
        public boolean mustEvict(LRUStatistics stats, Region region, int delta) {
            final InternalCache cache = (InternalCache) region.getRegionService();
            InternalResourceManager resourceManager = cache.getInternalResourceManager();
            boolean offheap = region.getAttributes().getOffHeap();
            final boolean monitorStateIsEviction = resourceManager.getMemoryMonitor(offheap).getState().isEviction();
            if (region instanceof BucketRegion) {
                return monitorStateIsEviction && ((BucketRegion) region).getSizeForEviction() > 0;
            }
            return monitorStateIsEviction && ((LocalRegion) region).getRegionMap().sizeInVM() > 0;
        }

        @Override
        public boolean lruLimitExceeded(LRUStatistics lruStatistics, DiskRegionView drv) {
            InternalResourceManager resourceManager = drv.getDiskStore().getCache().getInternalResourceManager();
            return resourceManager.getMemoryMonitor(drv.getOffHeap()).getState().isEviction();
        }
    };
}
Also used : InternalCache(org.apache.geode.internal.cache.InternalCache) InternalResourceManager(org.apache.geode.internal.cache.control.InternalResourceManager) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) BucketRegion(org.apache.geode.internal.cache.BucketRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) StatisticsFactory(org.apache.geode.StatisticsFactory)

Aggregations

DiskRegionView (org.apache.geode.internal.cache.persistence.DiskRegionView)27 ArrayList (java.util.ArrayList)5 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 File (java.io.File)3 HashMap (java.util.HashMap)3 StatisticsFactory (org.apache.geode.StatisticsFactory)3 DiskRecoveryStore (org.apache.geode.internal.cache.persistence.DiskRecoveryStore)3 PdxType (org.apache.geode.pdx.internal.PdxType)3 Int2ObjectOpenHashMap (it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap)2 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)2 StoredObject (org.apache.geode.internal.offheap.StoredObject)2 EnumInfo (org.apache.geode.pdx.internal.EnumInfo)2 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)2 Expectations (org.jmock.Expectations)2 Test (org.junit.Test)2 IntOpenHashSet (it.unimi.dsi.fastutil.ints.IntOpenHashSet)1 Long2ObjectOpenHashMap (it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap)1 LongOpenHashSet (it.unimi.dsi.fastutil.longs.LongOpenHashSet)1 IOException (java.io.IOException)1 HashSet (java.util.HashSet)1