use of org.apache.geode.internal.cache.control.InternalResourceManager in project geode by apache.
the class PartitionedRegionRebalanceOp method execute.
/**
* Do the actual rebalance
*
* @return the details of the rebalance.
*/
public Set<PartitionRebalanceInfo> execute() {
long start = System.nanoTime();
InternalResourceManager resourceManager = InternalResourceManager.getInternalResourceManager(leaderRegion.getCache());
MembershipListener listener = new MembershipChangeListener();
if (isRebalance) {
InternalResourceManager.getResourceObserver().rebalancingStarted(targetRegion);
} else {
InternalResourceManager.getResourceObserver().recoveryStarted(targetRegion);
}
RecoveryLock lock = null;
try {
if (!checkAndSetColocatedRegions()) {
return Collections.emptySet();
}
// have full redundancy.
if (!isRebalanceNecessary()) {
return Collections.emptySet();
}
if (!simulate) {
lock = leaderRegion.getRecoveryLock();
lock.lock();
}
// have fixed it already.
if (!isRebalanceNecessary()) {
return Collections.emptySet();
}
// register a listener to notify us if the new members leave or join.
// When a membership change occurs, we want to restart the rebalancing
// from the beginning.
// TODO rebalance - we should really add a membership listener to ALL of
// the colocated regions.
leaderRegion.getRegionAdvisor().addMembershipListener(listener);
PartitionedRegionLoadModel model = null;
InternalCache cache = leaderRegion.getCache();
Map<PartitionedRegion, InternalPRInfo> detailsMap = fetchDetails(cache);
BucketOperatorWrapper serialOperator = getBucketOperator(detailsMap);
ParallelBucketOperator parallelOperator = new ParallelBucketOperator(MAX_PARALLEL_OPERATIONS, cache.getDistributionManager().getWaitingThreadPool(), serialOperator);
model = buildModel(parallelOperator, detailsMap, resourceManager);
for (PartitionRebalanceDetailsImpl details : serialOperator.getDetailSet()) {
details.setPartitionMemberDetailsBefore(model.getPartitionedMemberDetails(details.getRegionPath()));
}
director.initialize(model);
for (; ; ) {
if (cancelled.get()) {
return Collections.emptySet();
}
if (membershipChange) {
membershipChange = false;
// refetch the partitioned region details after
// a membership change.
debug("Rebalancing {} detected membership changes. Refetching details", leaderRegion);
if (this.stats != null) {
this.stats.incRebalanceMembershipChanges(1);
}
model.waitForOperations();
detailsMap = fetchDetails(cache);
model = buildModel(parallelOperator, detailsMap, resourceManager);
director.membershipChanged(model);
}
leaderRegion.checkClosed();
cache.getCancelCriterion().checkCancelInProgress(null);
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing {} Model:{}\n", leaderRegion, model);
}
if (!director.nextStep()) {
// Stop when the director says we can't rebalance any more.
break;
}
}
debug("Rebalancing {} complete. Model:{}\n", leaderRegion, model);
long end = System.nanoTime();
for (PartitionRebalanceDetailsImpl details : serialOperator.getDetailSet()) {
if (!simulate) {
details.setTime(end - start);
}
details.setPartitionMemberDetailsAfter(model.getPartitionedMemberDetails(details.getRegionPath()));
}
return Collections.<PartitionRebalanceInfo>unmodifiableSet(serialOperator.getDetailSet());
} finally {
if (lock != null) {
try {
lock.unlock();
} catch (CancelException e) {
// lock service has been destroyed
} catch (Exception e) {
logger.error(LocalizedMessage.create(LocalizedStrings.PartitionedRegionRebalanceOp_UNABLE_TO_RELEASE_RECOVERY_LOCK), e);
}
}
try {
if (isRebalance) {
InternalResourceManager.getResourceObserver().rebalancingFinished(targetRegion);
} else {
InternalResourceManager.getResourceObserver().recoveryFinished(targetRegion);
}
} catch (Exception e) {
logger.error(LocalizedMessage.create(LocalizedStrings.PartitionedRegionRebalanceOp_ERROR_IN_RESOURCE_OBSERVER), e);
}
try {
leaderRegion.getRegionAdvisor().removeMembershipListener(listener);
} catch (Exception e) {
logger.error(LocalizedMessage.create(LocalizedStrings.PartitionedRegionRebalanceOp_ERROR_IN_RESOURCE_OBSERVER), e);
}
}
}
use of org.apache.geode.internal.cache.control.InternalResourceManager in project geode by apache.
the class LocalRegion method postCreateRegion.
/**
* Called after this region has been completely created
*
* @since GemFire 5.0
*
* @see DistributedRegion#postDestroyRegion(boolean, RegionEventImpl)
*/
protected void postCreateRegion() {
if (getEvictionAttributes().getAlgorithm().isLRUHeap()) {
final LogWriter logWriter = this.cache.getLogger();
float evictionPercentage = DEFAULT_HEAPLRU_EVICTION_HEAP_PERCENTAGE;
// This is new to 6.5. If a heap lru region is created
// we make sure that the eviction percentage is enabled.
InternalResourceManager rm = this.cache.getInternalResourceManager();
if (!getOffHeap()) {
if (!rm.getHeapMonitor().hasEvictionThreshold()) {
float criticalPercentage = rm.getCriticalHeapPercentage();
if (criticalPercentage > 0.0f) {
if (criticalPercentage >= 10.f) {
evictionPercentage = criticalPercentage - 5.0f;
} else {
evictionPercentage = criticalPercentage;
}
}
rm.setEvictionHeapPercentage(evictionPercentage);
if (logWriter.fineEnabled()) {
logWriter.fine("Enabled heap eviction at " + evictionPercentage + " percent for LRU region");
}
}
} else {
if (!rm.getOffHeapMonitor().hasEvictionThreshold()) {
float criticalPercentage = rm.getCriticalOffHeapPercentage();
if (criticalPercentage > 0.0f) {
if (criticalPercentage >= 10.f) {
evictionPercentage = criticalPercentage - 5.0f;
} else {
evictionPercentage = criticalPercentage;
}
}
rm.setEvictionOffHeapPercentage(evictionPercentage);
if (logWriter.fineEnabled()) {
logWriter.fine("Enabled off-heap eviction at " + evictionPercentage + " percent for LRU region");
}
}
}
}
if (!isInternalRegion()) {
getCachePerfStats().incRegions(1);
if (getMembershipAttributes().hasRequiredRoles()) {
getCachePerfStats().incReliableRegions(1);
}
}
if (hasListener()) {
RegionEventImpl event = new RegionEventImpl(this, Operation.REGION_CREATE, null, false, getMyId());
dispatchListenerEvent(EnumListenerEvent.AFTER_REGION_CREATE, event);
}
releaseAfterRegionCreateEventLatch();
SystemMemberCacheEventProcessor.send(getCache(), this, Operation.REGION_CREATE);
initializingRegion.remove();
}
use of org.apache.geode.internal.cache.control.InternalResourceManager in project geode by apache.
the class AutoBalancerJUnitTest method getFacadeForResourceManagerOps.
private GeodeCacheFacade getFacadeForResourceManagerOps(final boolean simulate) throws Exception {
final GemFireCacheImpl mockCache = mockContext.mock(GemFireCacheImpl.class);
final InternalResourceManager mockRM = mockContext.mock(InternalResourceManager.class);
final RebalanceFactory mockRebalanceFactory = mockContext.mock(RebalanceFactory.class);
final RebalanceOperation mockRebalanceOperation = mockContext.mock(RebalanceOperation.class);
final RebalanceResults mockRebalanceResults = mockContext.mock(RebalanceResults.class);
mockContext.checking(new Expectations() {
{
oneOf(mockCache).isClosed();
will(returnValue(false));
oneOf(mockCache).getResourceManager();
will(returnValue(mockRM));
oneOf(mockRM).createRebalanceFactory();
will(returnValue(mockRebalanceFactory));
if (simulate) {
oneOf(mockRebalanceFactory).simulate();
} else {
oneOf(mockRebalanceFactory).start();
}
will(returnValue(mockRebalanceOperation));
oneOf(mockRebalanceOperation).getResults();
will(returnValue(mockRebalanceResults));
if (simulate) {
atLeast(1).of(mockRebalanceResults).getTotalBucketTransferBytes();
will(returnValue(12345L));
}
allowing(mockRebalanceResults);
}
});
GeodeCacheFacade facade = new GeodeCacheFacade(mockCache);
return facade;
}
use of org.apache.geode.internal.cache.control.InternalResourceManager in project geode by apache.
the class HeapEvictor method getAllRegionList.
private List<LocalRegion> getAllRegionList() {
List<LocalRegion> allRegionList = new ArrayList<LocalRegion>();
InternalResourceManager irm = (InternalResourceManager) cache.getResourceManager();
for (ResourceListener<MemoryEvent> listener : irm.getResourceListeners(getResourceType())) {
if (listener instanceof PartitionedRegion) {
PartitionedRegion pr = (PartitionedRegion) listener;
if (includePartitionedRegion(pr)) {
allRegionList.addAll(pr.getDataStore().getAllLocalBucketRegions());
}
} else if (listener instanceof LocalRegion) {
LocalRegion lr = (LocalRegion) listener;
if (includeLocalRegion(lr)) {
allRegionList.add(lr);
}
}
}
if (HeapEvictor.MINIMUM_ENTRIES_PER_BUCKET > 0) {
Iterator<LocalRegion> iter = allRegionList.iterator();
while (iter.hasNext()) {
LocalRegion lr = iter.next();
if (lr instanceof BucketRegion) {
if (((BucketRegion) lr).getNumEntriesInVM() <= HeapEvictor.MINIMUM_ENTRIES_PER_BUCKET) {
iter.remove();
}
}
}
}
return allRegionList;
}
use of org.apache.geode.internal.cache.control.InternalResourceManager in project geode by apache.
the class HeapLRUCapacityController method createLRUHelper.
@Override
protected EnableLRU createLRUHelper() {
return new AbstractEnableLRU() {
/**
* Indicate what kind of <code>EvictionAlgorithm</code> this helper implements
*/
public EvictionAlgorithm getEvictionAlgorithm() {
return EvictionAlgorithm.LRU_HEAP;
}
/**
* As far as we're concerned all entries have the same size
*/
public int entrySize(Object key, Object value) throws IllegalArgumentException {
/*
* if (value != null) { return 1; } else { return 0; }
*/
if (value == Token.TOMBSTONE) {
return 0;
}
int size = HeapLRUCapacityController.this.getPerEntryOverhead();
size += sizeof(key);
size += sizeof(value);
return size;
}
/**
* In addition to initializing the statistics, create an evictor thread to periodically evict
* the LRU entry.
*/
@Override
public LRUStatistics initStats(Object region, StatisticsFactory sf) {
setRegionName(region);
final LRUStatistics stats = new HeapLRUStatistics(sf, getRegionName(), this);
setStats(stats);
return stats;
}
public StatisticsType getStatisticsType() {
return statType;
}
public String getStatisticsName() {
return "HeapLRUStatistics";
}
public int getLimitStatId() {
throw new UnsupportedOperationException("Limit not used with this LRU type");
}
public int getCountStatId() {
return statType.nameToId("entryBytes");
}
public int getEvictionsStatId() {
return statType.nameToId("lruEvictions");
}
public int getDestroysStatId() {
return statType.nameToId("lruDestroys");
}
public int getDestroysLimitStatId() {
return statType.nameToId("lruDestroysLimit");
}
public int getEvaluationsStatId() {
return statType.nameToId("lruEvaluations");
}
public int getGreedyReturnsStatId() {
return statType.nameToId("lruGreedyReturns");
}
/**
* Okay, deep breath. Instead of basing the LRU calculation on the number of entries in the
* region or on their "size" (which turned out to be incorrectly estimated in the general
* case), we use the amount of memory currently in use. If the amount of memory current in use
* {@linkplain Runtime#maxMemory max memory} - {@linkplain Runtime#freeMemory free memory} is
* greater than the overflow threshold, then we evict the LRU entry.
*/
public boolean mustEvict(LRUStatistics stats, Region region, int delta) {
final InternalCache cache = (InternalCache) region.getRegionService();
InternalResourceManager resourceManager = cache.getInternalResourceManager();
boolean offheap = region.getAttributes().getOffHeap();
final boolean monitorStateIsEviction = resourceManager.getMemoryMonitor(offheap).getState().isEviction();
if (region instanceof BucketRegion) {
return monitorStateIsEviction && ((BucketRegion) region).getSizeForEviction() > 0;
}
return monitorStateIsEviction && ((LocalRegion) region).getRegionMap().sizeInVM() > 0;
}
@Override
public boolean lruLimitExceeded(LRUStatistics lruStatistics, DiskRegionView drv) {
InternalResourceManager resourceManager = drv.getDiskStore().getCache().getInternalResourceManager();
return resourceManager.getMemoryMonitor(drv.getOffHeap()).getState().isEviction();
}
};
}
Aggregations