use of org.apache.geode.internal.cache.lru.LRUStatistics in project geode by apache.
the class DiskRegionDUnitTest method testDestroy.
/**
* Tests destroying entries in an overflow region
*/
@Test
public void testDestroy() throws Exception {
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(100, EvictionAction.OVERFLOW_TO_DISK));
File d = new File("DiskRegions" + OSProcess.getId());
d.mkdirs();
DiskStoreFactory dsf = getCache().createDiskStoreFactory();
dsf.setDiskDirs(new File[] { d });
factory.setDiskSynchronous(true);
DiskStore ds = dsf.create(name);
factory.setDiskStoreName(ds.getName());
Region region = createRegion(name, factory.create());
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
DiskRegionStats diskStats = dr.getStats();
LRUStatistics lruStats = getLRUStats(region);
int total;
for (total = 0; lruStats.getEvictions() < 40; total++) {
region.put(new Integer(total), new byte[1000]);
}
assertEquals(0, diskStats.getRemoves());
long evictions = lruStats.getEvictions();
LogWriterUtils.getLogWriter().info("Destroying memory resident entries");
// Destroying each of these guys should have no effect on the disk
for (int i = total - 1; i >= evictions; i--) {
region.destroy(new Integer(i));
flush(region);
assertEquals(0, diskStats.getRemoves());
assertEquals(evictions, lruStats.getEvictions());
}
// long startRemoves = diskStats.getRemoves();
LogWriterUtils.getLogWriter().info("Destroying disk-resident entries. evictions=" + evictions);
// Destroying each of these guys should cause a removal from disk
for (int i = ((int) evictions) - 1; i >= 0; i--) {
region.destroy(new Integer(i));
flush(region);
assertEquals((evictions - i), diskStats.getRemoves());
}
assertEquals(evictions, lruStats.getEvictions());
LogWriterUtils.getLogWriter().info("keys remaining in region: " + region.keySet().size());
assertEquals(0, region.keySet().size());
}
use of org.apache.geode.internal.cache.lru.LRUStatistics in project geode by apache.
the class AbstractLRURegionMap method lruUpdateCallback.
@Override
public void lruUpdateCallback() {
final boolean isDebugEnabled_LRU = logger.isTraceEnabled(LogMarker.LRU);
if (getCallbackDisabled()) {
return;
}
final int delta = getDelta();
int bytesToEvict = delta;
resetThreadLocals();
if (isDebugEnabled_LRU && _isOwnerALocalRegion()) {
logger.trace(LogMarker.LRU, "lruUpdateCallback; list size is: {}; actual size is: {}; map size is: {}; delta is: {}; limit is: {}; tombstone count={}", getTotalEntrySize(), this._getLruList().getExpensiveListCount(), size(), delta, getLimit(), _getOwner().getTombstoneCount());
}
LRUStatistics stats = _getLruList().stats();
if (!_isOwnerALocalRegion()) {
changeTotalEntrySize(delta);
// instead of evicting we just quit faulting values in
} else if (_getCCHelper().getEvictionAlgorithm().isLRUHeap()) {
changeTotalEntrySize(delta);
try {
while (bytesToEvict > 0 && _getCCHelper().mustEvict(stats, _getOwner(), bytesToEvict)) {
boolean evictFromThisRegion = true;
if (HeapEvictor.EVICT_HIGH_ENTRY_COUNT_BUCKETS_FIRST && _getOwner() instanceof BucketRegion) {
long bytesEvicted = 0;
long totalBytesEvicted = 0;
List<BucketRegion> regions = ((BucketRegion) _getOwner()).getPartitionedRegion().getSortedBuckets();
Iterator<BucketRegion> iter = regions.iterator();
while (iter.hasNext()) {
BucketRegion region = iter.next();
// only primaries can trigger inline eviction fix for 41814
if (!region.getBucketAdvisor().isPrimary()) {
try {
bytesEvicted = ((AbstractLRURegionMap) region.entries).centralizedLruUpdateCallback();
if (bytesEvicted == 0) {
iter.remove();
} else {
evictFromThisRegion = false;
}
totalBytesEvicted += bytesEvicted;
bytesToEvict -= bytesEvicted;
if (bytesEvicted > bytesToEvict) {
bytesToEvict = 0;
break;
}
if (totalBytesEvicted > bytesToEvict) {
break;
}
} catch (RegionDestroyedException rd) {
region.cache.getCancelCriterion().checkCancelInProgress(rd);
} catch (Exception e) {
region.cache.getCancelCriterion().checkCancelInProgress(e);
logger.warn(LocalizedMessage.create(LocalizedStrings.Eviction_EVICTOR_TASK_EXCEPTION, new Object[] { e.getMessage() }), e);
}
}
}
}
if (evictFromThisRegion) {
LRUEntry removalEntry = (LRUEntry) _getLruList().getLRUEntry();
if (removalEntry != null) {
int sizeOfValue = evictEntry(removalEntry, stats);
if (sizeOfValue != 0) {
bytesToEvict -= sizeOfValue;
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "evicted entry key={} total entry size is now: {} bytesToEvict :{}", removalEntry.getKey(), getTotalEntrySize(), bytesToEvict);
}
stats.incEvictions();
if (_isOwnerALocalRegion()) {
if (_getOwner() instanceof BucketRegion) {
((BucketRegion) _getOwner()).incEvictions(1);
}
}
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "evictions={}", stats.getEvictions());
}
_getCCHelper().afterEviction();
}
} else {
if (getTotalEntrySize() != 0) {
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "leaving evict loop early");
}
}
break;
}
}
}
} catch (RegionClearedException e) {
// TODO Auto-generated catch block
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "exception ={}", e.getCause());
}
}
} else {
try {
// to fix bug 48285 do no evict if bytesToEvict <= 0.
while (bytesToEvict > 0 && _getCCHelper().mustEvict(stats, _getOwner(), bytesToEvict)) {
LRUEntry removalEntry = (LRUEntry) _getLruList().getLRUEntry();
if (removalEntry != null) {
if (evictEntry(removalEntry, stats) != 0) {
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "evicted entry key(2)={} total entry size is now: {} bytesToEvict :", removalEntry.getKey(), getTotalEntrySize(), bytesToEvict);
}
stats.incEvictions();
if (_isOwnerALocalRegion()) {
if (_getOwner() instanceof BucketRegion) {
((BucketRegion) _getOwner()).incEvictions(1);
}
}
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "evictions={}", stats.getEvictions());
}
_getCCHelper().afterEviction();
}
} else {
if (getTotalEntrySize() != 0) {
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "leaving evict loop early");
}
}
break;
}
}
changeTotalEntrySize(delta);
} catch (RegionClearedException e) {
// TODO Auto-generated catch block
if (isDebugEnabled_LRU) {
logger.debug("exception ={}", e.getCause());
}
}
}
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "callback complete. LRU size is now {}", _getLruList().stats().getCounter());
}
// If in transaction context (either local or message)
// reset the tx thread local
}
use of org.apache.geode.internal.cache.lru.LRUStatistics in project geode by apache.
the class AbstractLRURegionMap method centralizedLruUpdateCallback.
public int centralizedLruUpdateCallback() {
final boolean isDebugEnabled_LRU = logger.isTraceEnabled(LogMarker.LRU);
int evictedBytes = 0;
if (getCallbackDisabled()) {
return evictedBytes;
}
getDelta();
resetThreadLocals();
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "centralLruUpdateCallback: lru size is now {}, limit is: {}", getTotalEntrySize(), getLimit());
}
LRUStatistics stats = _getLruList().stats();
try {
while (mustEvict() && evictedBytes == 0) {
LRUEntry removalEntry = (LRUEntry) _getLruList().getLRUEntry();
if (removalEntry != null) {
evictedBytes = evictEntry(removalEntry, stats);
if (evictedBytes != 0) {
if (_getOwner() instanceof BucketRegion) {
((BucketRegion) _getOwner()).incEvictions(1);
}
stats.incEvictions();
if (isDebugEnabled_LRU) {
logger.debug("evictions={}", stats.getEvictions());
}
_getCCHelper().afterEviction();
}
} else {
if (getTotalEntrySize() != 0) {
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "leaving evict loop early");
}
}
break;
}
}
} catch (RegionClearedException rce) {
// Ignore
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "exception ={}", rce.getCause());
}
}
if (isDebugEnabled_LRU) {
logger.trace(LogMarker.LRU, "callback complete");
}
// reset the tx thread local
return evictedBytes;
}
use of org.apache.geode.internal.cache.lru.LRUStatistics in project geode by apache.
the class MemLRUEvictionControllerDUnitTest method testSizeClassesOnce.
/**
* Make sure that we only size a class the first time we see the class instance.
*
* @throws CacheException
*/
@Test
public void testSizeClassesOnce() throws CacheException {
int threshold = 4;
final String name = this.getUniqueName();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setEvictionAttributes(EvictionAttributes.createLRUMemoryAttributes(threshold));
Region region = createRegion(name, factory.create());
LRUStatistics lruStats = getLRUStats(region);
assertNotNull(lruStats);
TestObject object = new TestObject(50);
// keySize is 0 because it is inlined
// ObjectSizer.DEFAULT.sizeof(new String("10000"));
int keySize = 0;
int valueSize = ObjectSizer.DEFAULT.sizeof(object);
int entrySize = keySize + valueSize + getEntryOverhead(region);
Random ran = new Random();
for (int i = 1; i <= 100; i++) {
Object key = String.valueOf(10000 + i);
// Use a randomly sized object.
Object value = new TestObject(ran.nextInt(100));
region.put(key, value);
assertEquals(i * entrySize, lruStats.getCounter());
assertEquals(0, lruStats.getEvictions());
}
for (int i = 100; i >= 1; i--) {
Object key = String.valueOf(10000 + i);
region.destroy(key);
assertEquals((i - 1) * entrySize, lruStats.getCounter());
assertEquals(0, lruStats.getEvictions());
}
}
Aggregations