use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskInitFile method cmdOfflineAndEqualMemberId.
public void cmdOfflineAndEqualMemberId(long drId, PersistentMemberID pmid) {
DiskRegionView dr = getDiskRegionById(drId);
if (dr != null) {
if (this.parent.upgradeVersionOnly && Version.GFE_70.compareTo(currentRecoveredGFVersion()) > 0) {
dr.addOnlineMember(pmid);
if (dr.rmOfflineMember(pmid)) {
this.ifLiveRecordCount--;
}
} else {
dr.addOfflineAndEqualMember(pmid);
if (dr.rmOnlineMember(pmid) || dr.rmOfflineMember(pmid)) {
this.ifLiveRecordCount--;
}
}
this.ifLiveRecordCount++;
this.ifTotalRecordCount++;
} else {
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskInitFile method writeLiveData.
/**
* Write all live data to the init file
*/
private void writeLiveData() {
lock.lock(false);
try {
this.ifLiveRecordCount = 0;
this.ifTotalRecordCount = 0;
writeDiskStoreId();
saveGemfireVersion();
saveInstantiators();
saveDataSerializers();
saveCrfIds();
saveDrfIds();
saveKrfIds();
for (DiskRegionView drv : this.drMap.values()) {
writeLiveData(drv);
}
for (DiskRegionView drv : this.parent.getDiskRegions()) {
writeLiveData(drv);
}
savePRConfigs();
saveCanonicalIds();
saveRevokedMembers();
if (logger.isDebugEnabled()) {
logger.debug("After compacting init file lrc={} trc={}", this.ifLiveRecordCount, this.ifTotalRecordCount);
}
} finally {
lock.unlock();
}
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskInitFile method cmnClearRegion.
public void cmnClearRegion(long drId, ConcurrentHashMap<DiskStoreID, RegionVersionHolder<DiskStoreID>> memberToVersion) {
DiskRegionView drv = getDiskRegionById(drId);
if (drv.getClearRVV() == null) {
this.ifLiveRecordCount++;
}
// otherwise previous clear is cancelled so don't change liveRecordCount
this.ifTotalRecordCount++;
DiskStoreID ownerId = parent.getDiskStoreID();
// Create a fake RVV for clear purposes. We only need to memberToVersion information
RegionVersionHolder<DiskStoreID> ownerExceptions = memberToVersion.remove(ownerId);
long ownerVersion = ownerExceptions == null ? 0 : ownerExceptions.getVersion();
RegionVersionVector rvv = new DiskRegionVersionVector(ownerId, memberToVersion, ownerVersion, new ConcurrentHashMap(), 0L, false, ownerExceptions);
drv.setClearRVV(rvv);
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskInitFile method cmnRmMemberId.
public void cmnRmMemberId(long drId, PersistentMemberID pmid) {
DiskRegionView dr = getDiskRegionById(drId);
if (dr != null) {
if (!dr.rmOnlineMember(pmid)) {
if (!dr.rmOfflineMember(pmid)) {
dr.rmEqualMember(pmid);
}
}
// since we removed a member don't inc the live count
// In fact decrement it by one since both this record
// and the previous one are both garbage.
this.ifLiveRecordCount--;
this.ifTotalRecordCount++;
} else {
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
} else {
throw new IllegalStateException("bad disk region id");
}
}
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class HeapLRUCapacityController method createLRUHelper.
@Override
protected EnableLRU createLRUHelper() {
return new AbstractEnableLRU() {
/**
* Indicate what kind of <code>EvictionAlgorithm</code> this helper implements
*/
public EvictionAlgorithm getEvictionAlgorithm() {
return EvictionAlgorithm.LRU_HEAP;
}
/**
* As far as we're concerned all entries have the same size
*/
public int entrySize(Object key, Object value) throws IllegalArgumentException {
/*
* if (value != null) { return 1; } else { return 0; }
*/
if (value == Token.TOMBSTONE) {
return 0;
}
int size = HeapLRUCapacityController.this.getPerEntryOverhead();
size += sizeof(key);
size += sizeof(value);
return size;
}
/**
* In addition to initializing the statistics, create an evictor thread to periodically evict
* the LRU entry.
*/
@Override
public LRUStatistics initStats(Object region, StatisticsFactory sf) {
setRegionName(region);
final LRUStatistics stats = new HeapLRUStatistics(sf, getRegionName(), this);
setStats(stats);
return stats;
}
public StatisticsType getStatisticsType() {
return statType;
}
public String getStatisticsName() {
return "HeapLRUStatistics";
}
public int getLimitStatId() {
throw new UnsupportedOperationException("Limit not used with this LRU type");
}
public int getCountStatId() {
return statType.nameToId("entryBytes");
}
public int getEvictionsStatId() {
return statType.nameToId("lruEvictions");
}
public int getDestroysStatId() {
return statType.nameToId("lruDestroys");
}
public int getDestroysLimitStatId() {
return statType.nameToId("lruDestroysLimit");
}
public int getEvaluationsStatId() {
return statType.nameToId("lruEvaluations");
}
public int getGreedyReturnsStatId() {
return statType.nameToId("lruGreedyReturns");
}
/**
* Okay, deep breath. Instead of basing the LRU calculation on the number of entries in the
* region or on their "size" (which turned out to be incorrectly estimated in the general
* case), we use the amount of memory currently in use. If the amount of memory current in use
* {@linkplain Runtime#maxMemory max memory} - {@linkplain Runtime#freeMemory free memory} is
* greater than the overflow threshold, then we evict the LRU entry.
*/
public boolean mustEvict(LRUStatistics stats, Region region, int delta) {
final InternalCache cache = (InternalCache) region.getRegionService();
InternalResourceManager resourceManager = cache.getInternalResourceManager();
boolean offheap = region.getAttributes().getOffHeap();
final boolean monitorStateIsEviction = resourceManager.getMemoryMonitor(offheap).getState().isEviction();
if (region instanceof BucketRegion) {
return monitorStateIsEviction && ((BucketRegion) region).getSizeForEviction() > 0;
}
return monitorStateIsEviction && ((LocalRegion) region).getRegionMap().sizeInVM() > 0;
}
@Override
public boolean lruLimitExceeded(LRUStatistics lruStatistics, DiskRegionView drv) {
InternalResourceManager resourceManager = drv.getDiskStore().getCache().getInternalResourceManager();
return resourceManager.getMemoryMonitor(drv.getOffHeap()).getState().isEviction();
}
};
}
Aggregations