use of org.apache.geode.StatisticsFactory in project geode by apache.
the class DiskStoreImpl method getOrCreatePRLRUStats.
LRUStatistics getOrCreatePRLRUStats(PlaceHolderDiskRegion dr) {
String prName = dr.getPrName();
LRUStatistics result = null;
synchronized (this.prlruStatMap) {
result = this.prlruStatMap.get(prName);
if (result == null) {
EvictionAttributesImpl ea = dr.getEvictionAttributes();
LRUAlgorithm ec = ea.createEvictionController(null, dr.getOffHeap());
StatisticsFactory sf = cache.getDistributedSystem();
result = ec.getLRUHelper().initStats(dr, sf);
this.prlruStatMap.put(prName, result);
}
}
return result;
}
use of org.apache.geode.StatisticsFactory in project geode by apache.
the class HeapLRUCapacityController method createLRUHelper.
@Override
protected EnableLRU createLRUHelper() {
return new AbstractEnableLRU() {
/**
* Indicate what kind of <code>EvictionAlgorithm</code> this helper implements
*/
public EvictionAlgorithm getEvictionAlgorithm() {
return EvictionAlgorithm.LRU_HEAP;
}
/**
* As far as we're concerned all entries have the same size
*/
public int entrySize(Object key, Object value) throws IllegalArgumentException {
/*
* if (value != null) { return 1; } else { return 0; }
*/
if (value == Token.TOMBSTONE) {
return 0;
}
int size = HeapLRUCapacityController.this.getPerEntryOverhead();
size += sizeof(key);
size += sizeof(value);
return size;
}
/**
* In addition to initializing the statistics, create an evictor thread to periodically evict
* the LRU entry.
*/
@Override
public LRUStatistics initStats(Object region, StatisticsFactory sf) {
setRegionName(region);
final LRUStatistics stats = new HeapLRUStatistics(sf, getRegionName(), this);
setStats(stats);
return stats;
}
public StatisticsType getStatisticsType() {
return statType;
}
public String getStatisticsName() {
return "HeapLRUStatistics";
}
public int getLimitStatId() {
throw new UnsupportedOperationException("Limit not used with this LRU type");
}
public int getCountStatId() {
return statType.nameToId("entryBytes");
}
public int getEvictionsStatId() {
return statType.nameToId("lruEvictions");
}
public int getDestroysStatId() {
return statType.nameToId("lruDestroys");
}
public int getDestroysLimitStatId() {
return statType.nameToId("lruDestroysLimit");
}
public int getEvaluationsStatId() {
return statType.nameToId("lruEvaluations");
}
public int getGreedyReturnsStatId() {
return statType.nameToId("lruGreedyReturns");
}
/**
* Okay, deep breath. Instead of basing the LRU calculation on the number of entries in the
* region or on their "size" (which turned out to be incorrectly estimated in the general
* case), we use the amount of memory currently in use. If the amount of memory current in use
* {@linkplain Runtime#maxMemory max memory} - {@linkplain Runtime#freeMemory free memory} is
* greater than the overflow threshold, then we evict the LRU entry.
*/
public boolean mustEvict(LRUStatistics stats, Region region, int delta) {
final InternalCache cache = (InternalCache) region.getRegionService();
InternalResourceManager resourceManager = cache.getInternalResourceManager();
boolean offheap = region.getAttributes().getOffHeap();
final boolean monitorStateIsEviction = resourceManager.getMemoryMonitor(offheap).getState().isEviction();
if (region instanceof BucketRegion) {
return monitorStateIsEviction && ((BucketRegion) region).getSizeForEviction() > 0;
}
return monitorStateIsEviction && ((LocalRegion) region).getRegionMap().sizeInVM() > 0;
}
@Override
public boolean lruLimitExceeded(LRUStatistics lruStatistics, DiskRegionView drv) {
InternalResourceManager resourceManager = drv.getDiskStore().getCache().getInternalResourceManager();
return resourceManager.getMemoryMonitor(drv.getOffHeap()).getState().isEviction();
}
};
}
use of org.apache.geode.StatisticsFactory in project geode by apache.
the class GatewayReceiverStats method createGatewayReceiverStats.
// ///////////////////// Constructors ///////////////////////
public static GatewayReceiverStats createGatewayReceiverStats(String ownerName) {
StatisticsFactory f = InternalDistributedSystem.getAnyInstance();
StatisticDescriptor[] descriptors = new StatisticDescriptor[] { f.createIntCounter(DUPLICATE_BATCHES_RECEIVED, "number of batches which have already been seen by this GatewayReceiver", "nanoseconds"), f.createIntCounter(OUT_OF_ORDER_BATCHES_RECEIVED, "number of batches which are out of order on this GatewayReceiver", "operations"), f.createIntCounter(EARLY_ACKS, "number of early acknowledgements sent to gatewaySenders", "operations"), f.createIntCounter(EVENTS_RECEIVED, "total number events across the batched received by this GatewayReceiver", "operations"), f.createIntCounter(CREAT_REQUESTS, "total number of create operations received by this GatewayReceiver", "operations"), f.createIntCounter(UPDATE_REQUESTS, "total number of update operations received by this GatewayReceiver", "operations"), f.createIntCounter(DESTROY_REQUESTS, "total number of destroy operations received by this GatewayReceiver", "operations"), f.createIntCounter(UNKNOWN_OPERATIONS_RECEIVED, "total number of unknown operations received by this GatewayReceiver", "operations"), f.createIntCounter(EXCEPTIONS_OCCURRED, "number of exceptions occurred while porcessing the batches", "operations") };
return new GatewayReceiverStats(f, ownerName, typeName, descriptors);
}
use of org.apache.geode.StatisticsFactory in project geode by apache.
the class ComplexDiskRegionJUnitTest method testRemoveFirstOplog.
/**
* Test method for 'org.apache.geode.internal.cache.ComplexDiskRegion.removeFirstOplog(Oplog)'
*
* The test verifies the FIFO property of the oplog set (first oplog to be added should be the
* firs to be rolled).
*/
@Test
public void testRemoveFirstOplog() {
deleteFiles();
diskProps.setRolling(false);
region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
StatisticsFactory factory = region.getCache().getDistributedSystem();
Oplog oplog1 = new Oplog(11, dr.getOplogSet(), new DirectoryHolder(factory, dirs[1], 1000, 0));
Oplog oplog2 = new Oplog(12, dr.getOplogSet(), new DirectoryHolder(factory, dirs[2], 1000, 1));
Oplog oplog3 = new Oplog(13, dr.getOplogSet(), new DirectoryHolder(factory, dirs[3], 1000, 2));
// give these guys some fake "live" entries
oplog1.incTotalCount();
oplog1.incLiveCount();
oplog2.incTotalCount();
oplog2.incLiveCount();
oplog3.incTotalCount();
oplog3.incLiveCount();
dr.addToBeCompacted(oplog1);
dr.addToBeCompacted(oplog2);
dr.addToBeCompacted(oplog3);
if (oplog1 != dr.removeOplog(oplog1.getOplogId())) {
fail(" expected oplog1 to be the first oplog but not the case !");
}
if (oplog2 != dr.removeOplog(oplog2.getOplogId())) {
fail(" expected oplog2 to be the first oplog but not the case !");
}
if (oplog3 != dr.removeOplog(oplog3.getOplogId())) {
fail(" expected oplog3 to be the first oplog but not the case !");
}
oplog1.destroy();
oplog2.destroy();
oplog3.destroy();
closeDown();
deleteFiles();
}
use of org.apache.geode.StatisticsFactory in project geode by apache.
the class ComplexDiskRegionJUnitTest method testAddToBeCompacted.
/**
* Test method for 'org.apache.geode.internal.cache.ComplexDiskRegion.addToBeCompacted(Oplog)'
*
* The test will test that an oplog is correctly being added to be rolled
*/
@Test
public void testAddToBeCompacted() {
deleteFiles();
diskProps.setRolling(false);
diskProps.setAllowForceCompaction(true);
region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
DiskRegion dr = ((LocalRegion) region).getDiskRegion();
StatisticsFactory factory = region.getCache().getDistributedSystem();
Oplog oplog1 = new Oplog(11, dr.getOplogSet(), new DirectoryHolder(factory, dirs[1], 1000, 0));
Oplog oplog2 = new Oplog(12, dr.getOplogSet(), new DirectoryHolder(factory, dirs[2], 1000, 1));
Oplog oplog3 = new Oplog(13, dr.getOplogSet(), new DirectoryHolder(factory, dirs[3], 1000, 2));
// give these guys some fake "live" entries
oplog1.incTotalCount();
oplog1.incLiveCount();
oplog2.incTotalCount();
oplog2.incLiveCount();
oplog3.incTotalCount();
oplog3.incLiveCount();
dr.addToBeCompacted(oplog1);
dr.addToBeCompacted(oplog2);
dr.addToBeCompacted(oplog3);
assertEquals(null, dr.getOplogToBeCompacted());
oplog1.incTotalCount();
if (oplog1 != dr.getOplogToBeCompacted()[0]) {
fail(" expected oplog1 to be the first oplog but not the case !");
}
dr.removeOplog(oplog1.getOplogId());
assertEquals(null, dr.getOplogToBeCompacted());
oplog2.incTotalCount();
if (oplog2 != dr.getOplogToBeCompacted()[0]) {
fail(" expected oplog2 to be the first oplog but not the case !");
}
dr.removeOplog(oplog2.getOplogId());
assertEquals(null, dr.getOplogToBeCompacted());
oplog3.incTotalCount();
if (oplog3 != dr.getOplogToBeCompacted()[0]) {
fail(" expected oplog3 to be the first oplog but not the case !");
}
dr.removeOplog(oplog3.getOplogId());
oplog1.destroy();
oplog2.destroy();
oplog3.destroy();
closeDown();
deleteFiles();
}
Aggregations