use of org.apache.geode.internal.cache.control.PartitionRebalanceDetailsImpl in project geode by apache.
the class BucketOperatorWrapperTest method bucketWrapperShouldRecordBytesTransferredPerRegionAfterMoveBucketIsSuccessful.
@Test
public void bucketWrapperShouldRecordBytesTransferredPerRegionAfterMoveBucketIsSuccessful() {
doReturn(true).when(delegate).moveBucket(sourceMember, targetMember, bucketId, colocatedRegionBytes);
wrapper.moveBucket(sourceMember, targetMember, bucketId, colocatedRegionBytes);
// verify the details is updated with bytes transfered
for (PartitionRebalanceDetailsImpl details : rebalanceDetails) {
if (details.getRegionPath().equalsIgnoreCase(PR_LEADER_REGION_NAME))
verify(details, times(1)).incTransfers(eq(colocatedRegionBytes.get(PR_LEADER_REGION_NAME)), anyLong());
else if (details.getRegionPath().equals(PR_COLOCATED_REGION_NAME))
verify(details, times(1)).incTransfers(colocatedRegionBytes.get(PR_COLOCATED_REGION_NAME), // elapsed is recorded only if its leader
0);
}
// verify we recorded necessary stats
verify(stats, times(1)).startBucketTransfer(anyInt());
verify(stats, times(1)).endBucketTransfer(anyInt(), anyBoolean(), anyLong(), anyLong());
}
use of org.apache.geode.internal.cache.control.PartitionRebalanceDetailsImpl in project geode by apache.
the class BucketOperatorWrapper method removeBucket.
@Override
public boolean removeBucket(InternalDistributedMember targetMember, int i, Map<String, Long> colocatedRegionBytes) {
boolean result = false;
long elapsed = 0;
long totalBytes = 0;
if (stats != null) {
stats.startBucketRemove(regionCount);
}
try {
long start = System.nanoTime();
result = delegate.removeBucket(targetMember, i, colocatedRegionBytes);
elapsed = System.nanoTime() - start;
if (result) {
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing {} redundant bucket {} removed from {}", leaderRegion, i, targetMember);
}
for (PartitionRebalanceDetailsImpl details : detailSet) {
String regionPath = details.getRegionPath();
Long lrb = colocatedRegionBytes.get(regionPath);
if (lrb != null) {
// region could have gone away - esp during shutdow
long regionBytes = lrb.longValue();
// Only add the elapsed time to the leader region.
details.incRemoves(regionBytes, details.getRegion().equals(leaderRegion) ? elapsed : 0);
totalBytes += regionBytes;
}
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing {} redundant bucket {} failed removal o{}", leaderRegion, i, targetMember);
}
}
} finally {
if (stats != null) {
stats.endBucketRemove(regionCount, result, totalBytes, elapsed);
}
}
return result;
}
use of org.apache.geode.internal.cache.control.PartitionRebalanceDetailsImpl in project geode by apache.
the class PartitionedRegionRebalanceOp method execute.
/**
* Do the actual rebalance
*
* @return the details of the rebalance.
*/
public Set<PartitionRebalanceInfo> execute() {
long start = System.nanoTime();
InternalResourceManager resourceManager = InternalResourceManager.getInternalResourceManager(leaderRegion.getCache());
MembershipListener listener = new MembershipChangeListener();
if (isRebalance) {
InternalResourceManager.getResourceObserver().rebalancingStarted(targetRegion);
} else {
InternalResourceManager.getResourceObserver().recoveryStarted(targetRegion);
}
RecoveryLock lock = null;
try {
if (!checkAndSetColocatedRegions()) {
return Collections.emptySet();
}
// have full redundancy.
if (!isRebalanceNecessary()) {
return Collections.emptySet();
}
if (!simulate) {
lock = leaderRegion.getRecoveryLock();
lock.lock();
}
// have fixed it already.
if (!isRebalanceNecessary()) {
return Collections.emptySet();
}
// register a listener to notify us if the new members leave or join.
// When a membership change occurs, we want to restart the rebalancing
// from the beginning.
// TODO rebalance - we should really add a membership listener to ALL of
// the colocated regions.
leaderRegion.getRegionAdvisor().addMembershipListener(listener);
PartitionedRegionLoadModel model = null;
InternalCache cache = leaderRegion.getCache();
Map<PartitionedRegion, InternalPRInfo> detailsMap = fetchDetails(cache);
BucketOperatorWrapper serialOperator = getBucketOperator(detailsMap);
ParallelBucketOperator parallelOperator = new ParallelBucketOperator(MAX_PARALLEL_OPERATIONS, cache.getDistributionManager().getWaitingThreadPool(), serialOperator);
model = buildModel(parallelOperator, detailsMap, resourceManager);
for (PartitionRebalanceDetailsImpl details : serialOperator.getDetailSet()) {
details.setPartitionMemberDetailsBefore(model.getPartitionedMemberDetails(details.getRegionPath()));
}
director.initialize(model);
for (; ; ) {
if (cancelled.get()) {
return Collections.emptySet();
}
if (membershipChange) {
membershipChange = false;
// refetch the partitioned region details after
// a membership change.
debug("Rebalancing {} detected membership changes. Refetching details", leaderRegion);
if (this.stats != null) {
this.stats.incRebalanceMembershipChanges(1);
}
model.waitForOperations();
detailsMap = fetchDetails(cache);
model = buildModel(parallelOperator, detailsMap, resourceManager);
director.membershipChanged(model);
}
leaderRegion.checkClosed();
cache.getCancelCriterion().checkCancelInProgress(null);
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing {} Model:{}\n", leaderRegion, model);
}
if (!director.nextStep()) {
// Stop when the director says we can't rebalance any more.
break;
}
}
debug("Rebalancing {} complete. Model:{}\n", leaderRegion, model);
long end = System.nanoTime();
for (PartitionRebalanceDetailsImpl details : serialOperator.getDetailSet()) {
if (!simulate) {
details.setTime(end - start);
}
details.setPartitionMemberDetailsAfter(model.getPartitionedMemberDetails(details.getRegionPath()));
}
return Collections.<PartitionRebalanceInfo>unmodifiableSet(serialOperator.getDetailSet());
} finally {
if (lock != null) {
try {
lock.unlock();
} catch (CancelException e) {
// lock service has been destroyed
} catch (Exception e) {
logger.error(LocalizedMessage.create(LocalizedStrings.PartitionedRegionRebalanceOp_UNABLE_TO_RELEASE_RECOVERY_LOCK), e);
}
}
try {
if (isRebalance) {
InternalResourceManager.getResourceObserver().rebalancingFinished(targetRegion);
} else {
InternalResourceManager.getResourceObserver().recoveryFinished(targetRegion);
}
} catch (Exception e) {
logger.error(LocalizedMessage.create(LocalizedStrings.PartitionedRegionRebalanceOp_ERROR_IN_RESOURCE_OBSERVER), e);
}
try {
leaderRegion.getRegionAdvisor().removeMembershipListener(listener);
} catch (Exception e) {
logger.error(LocalizedMessage.create(LocalizedStrings.PartitionedRegionRebalanceOp_ERROR_IN_RESOURCE_OBSERVER), e);
}
}
}
use of org.apache.geode.internal.cache.control.PartitionRebalanceDetailsImpl in project geode by apache.
the class PartitionedRegionRebalanceOp method getBucketOperator.
private BucketOperatorWrapper getBucketOperator(Map<PartitionedRegion, InternalPRInfo> detailsMap) {
Set<PartitionRebalanceDetailsImpl> rebalanceDetails = new HashSet<PartitionRebalanceDetailsImpl>(detailsMap.size());
for (Map.Entry<PartitionedRegion, InternalPRInfo> entry : detailsMap.entrySet()) {
rebalanceDetails.add(new PartitionRebalanceDetailsImpl(entry.getKey()));
}
BucketOperator operator = simulate ? new SimulatedBucketOperator() : new BucketOperatorImpl(this);
BucketOperatorWrapper wrapper = new BucketOperatorWrapper(operator, rebalanceDetails, stats, leaderRegion);
return wrapper;
}
use of org.apache.geode.internal.cache.control.PartitionRebalanceDetailsImpl in project geode by apache.
the class PartitionedRegionRebalanceOp method executeFPA.
/**
* For FPR we will creatd buckets and make primaries as specified by FixedPartitionAttributes. We
* have to just create buckets and make primaries for the local node.
*
* @return the details of the rebalance.
*/
public Set<PartitionRebalanceInfo> executeFPA() {
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing buckets for fixed partitioned region {}", this.targetRegion);
}
long start = System.nanoTime();
InternalCache cache = leaderRegion.getCache();
InternalResourceManager resourceManager = InternalResourceManager.getInternalResourceManager(cache);
InternalResourceManager.getResourceObserver().recoveryStarted(targetRegion);
try {
if (!checkAndSetColocatedRegions()) {
return Collections.emptySet();
}
// If I am a datastore of a FixedPartition, I will be hosting bucket so no
// need of redundancy check.
// No need to attach listener as well, we know that we are just creating bucket
// for primary and secondary. We are not creating extra bucket for any of peers
// who goes down.
PartitionedRegionLoadModel model = null;
Map<PartitionedRegion, InternalPRInfo> detailsMap = fetchDetails(cache);
BucketOperatorWrapper operator = getBucketOperator(detailsMap);
model = buildModel(operator, detailsMap, resourceManager);
for (PartitionRebalanceDetailsImpl details : operator.getDetailSet()) {
details.setPartitionMemberDetailsBefore(model.getPartitionedMemberDetails(details.getRegionPath()));
}
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing FPR {} Model:{}\n", leaderRegion, model);
}
director.initialize(model);
// This will perform all of the required operations.
director.nextStep();
if (logger.isDebugEnabled()) {
logger.debug("Rebalancing FPR {} complete. Model:{}\n", leaderRegion, model);
}
long end = System.nanoTime();
for (PartitionRebalanceDetailsImpl details : operator.getDetailSet()) {
if (!simulate) {
details.setTime(end - start);
}
details.setPartitionMemberDetailsAfter(model.getPartitionedMemberDetails(details.getRegionPath()));
}
return Collections.<PartitionRebalanceInfo>unmodifiableSet(operator.getDetailSet());
} finally {
try {
InternalResourceManager.getResourceObserver().recoveryFinished(targetRegion);
} catch (Exception e) {
logger.debug(LocalizedMessage.create(LocalizedStrings.PartitionedRegionRebalanceOp_ERROR_IN_RESOURCE_OBSERVER), e);
}
}
}
Aggregations