use of org.apache.geode.internal.cache.partitioned.RegionAdvisor.PartitionProfile in project geode by apache.
the class PRHARedundancyProvider method createBackupBucketOnMember.
/**
* Creates bucket with ID bucketId on targetNode. This method will also create the bucket for all
* of the child colocated PRs.
*
* @param bucketId
* @param targetNMember
* @param isRebalance true if bucket creation is directed by rebalancing
* @param replaceOfflineData
* @return true if the bucket was sucessfully created
*/
public boolean createBackupBucketOnMember(final int bucketId, final InternalDistributedMember targetNMember, final boolean isRebalance, boolean replaceOfflineData, InternalDistributedMember moveSource, boolean forceCreation) {
if (logger.isDebugEnabled()) {
logger.debug("createBackupBucketOnMember for bucketId={} member: {}", this.prRegion.bucketStringForLogs(bucketId), targetNMember);
}
if (!(targetNMember.equals(this.prRegion.getMyId()))) {
// final StoppableReentrantReadWriteLock.StoppableReadLock isClosingReadLock;
PartitionProfile pp = this.prRegion.getRegionAdvisor().getPartitionProfile(targetNMember);
if (pp != null) {
// isClosingReadLock = pp.getIsClosingReadLock(
// this.prRegion.getCancelCriterion());
} else {
return false;
}
try {
ManageBackupBucketMessage.NodeResponse response = ManageBackupBucketMessage.send(targetNMember, this.prRegion, bucketId, isRebalance, replaceOfflineData, moveSource, forceCreation);
if (response.waitForAcceptance()) {
if (logger.isDebugEnabled()) {
logger.debug("createBackupBucketOnMember: Bucket creation succeed for bucketId={} on member = {}", this.prRegion.bucketStringForLogs(bucketId), targetNMember);
}
return true;
} else {
if (logger.isDebugEnabled()) {
logger.debug("createBackupBucketOnMember: Bucket creation failed for bucketId={} on member = {}", this.prRegion.bucketStringForLogs(bucketId), targetNMember);
}
return false;
}
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable e) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
if (e instanceof ForceReattemptException) {
// no log needed see bug 37569
} else if (e instanceof CancelException || (e.getCause() != null && (e.getCause() instanceof CancelException))) {
// no need to log exceptions caused by cache closure
} else {
logger.warn(LocalizedMessage.create(LocalizedStrings.PRHARedundancyProvider_EXCEPTION_CREATING_PARTITION_ON__0, targetNMember), e);
}
return false;
}
} else {
final PartitionedRegionDataStore prDS = this.prRegion.getDataStore();
boolean bucketManaged = prDS != null && prDS.grabBucket(bucketId, moveSource, forceCreation, replaceOfflineData, isRebalance, null, false).equals(CreateBucketResult.CREATED);
if (!bucketManaged) {
if (logger.isDebugEnabled()) {
logger.debug("createBackupBucketOnMember: Local data store refused to accommodate the data for bucketId={} prDS={}", this.prRegion.bucketStringForLogs(bucketId), prDS);
}
}
return bucketManaged;
}
}
use of org.apache.geode.internal.cache.partitioned.RegionAdvisor.PartitionProfile in project geode by apache.
the class PRHARedundancyProvider method createBucketOnMember.
/**
* Creates bucket with ID bucketId on targetNode.
*
* @param bucketId
* @param targetNMember
* @param newBucketSize
* @param forceCreation inform the targetMember it must attempt host the bucket, appropriately
* ignoring it's maximums
* @return a response object
*/
public ManageBucketRsp createBucketOnMember(final int bucketId, final InternalDistributedMember targetNMember, final int newBucketSize, boolean forceCreation) {
if (logger.isDebugEnabled()) {
logger.debug("createBucketOnMember for bucketId={} member: {}{}", this.prRegion.bucketStringForLogs(bucketId), targetNMember, (forceCreation ? " forced" : ""));
}
if (!(targetNMember.equals(this.prRegion.getMyId()))) {
// final StoppableReentrantReadWriteLock.StoppableReadLock isClosingReadLock;
PartitionProfile pp = this.prRegion.getRegionAdvisor().getPartitionProfile(targetNMember);
if (pp != null) {
// isClosingReadLock = pp.getIsClosingReadLock(
// this.prRegion.getCancelCriterion());
} else {
return ManageBucketRsp.NO;
}
try {
// isClosingReadLock.lock(); // Grab the read lock, preventing any region closures
// on this remote Node until this bucket is fully published, forcing the closing
// Node to recognize any pre-natal buckets.
NodeResponse response = ManageBucketMessage.send(targetNMember, this.prRegion, bucketId, newBucketSize, forceCreation);
if (response.waitForAcceptance()) {
if (logger.isDebugEnabled()) {
logger.debug("createBucketOnMember: Bucket creation succeed for bucketId={} on member = {}", this.prRegion.bucketStringForLogs(bucketId), targetNMember);
}
// lockList.add(isClosingReadLock);
return ManageBucketRsp.YES;
} else {
if (logger.isDebugEnabled()) {
logger.debug("createBucketOnMember: Bucket creation failed for bucketId={} on member = {}", this.prRegion.bucketStringForLogs(bucketId), targetNMember);
}
// isClosingReadLock.unlock();
return response.rejectedDueToInitialization() ? ManageBucketRsp.NO_INITIALIZING : ManageBucketRsp.NO;
}
} catch (PartitionOfflineException e) {
throw e;
} catch (VirtualMachineError err) {
SystemFailure.initiateFailure(err);
// now, so don't let this thread continue.
throw err;
} catch (Throwable e) {
// Whenever you catch Error or Throwable, you must also
// catch VirtualMachineError (see above). However, there is
// _still_ a possibility that you are dealing with a cascading
// error condition, so you also need to check to see if the JVM
// is still usable:
SystemFailure.checkFailure();
if (e instanceof CancelException || (e.getCause() != null && (e.getCause() instanceof CancelException))) {
// no need to log exceptions caused by cache closure
return ManageBucketRsp.CLOSED;
} else if (e instanceof ForceReattemptException) {
// no log needed see bug 37569
} else {
logger.warn(LocalizedMessage.create(LocalizedStrings.PRHARedundancyProvider_EXCEPTION_CREATING_PARTITION_ON__0, targetNMember), e);
}
// isClosingReadLock.unlock();
return ManageBucketRsp.NO;
}
} else {
final PartitionedRegionDataStore prDS = this.prRegion.getDataStore();
boolean bucketManaged = prDS != null && prDS.handleManageBucketRequest(bucketId, newBucketSize, this.prRegion.getMyId(), forceCreation);
if (!bucketManaged) {
if (logger.isDebugEnabled()) {
logger.debug("createBucketOnMember: Local data store not able to accommodate the data for bucketId={}", this.prRegion.bucketStringForLogs(bucketId));
}
}
return ManageBucketRsp.valueOf(bucketManaged);
}
}
use of org.apache.geode.internal.cache.partitioned.RegionAdvisor.PartitionProfile in project geode by apache.
the class PRHARedundancyProvider method createBucketInstance.
/**
* Create a single copy of this bucket on one node. The bucket must already be locked.
*
* @param bucketId The bucket we are working on
* @param newBucketSize size to create it
* @param excludedMembers
* @param alreadyUsed members who already seem to have the bucket
* @param timeOut point at which to fail
* @param allStores the set of data stores to choose from
* @return the new member, null if it fails.
* @throws PartitionedRegionStorageException if there are not enough data stores
*/
private InternalDistributedMember createBucketInstance(int bucketId, final int newBucketSize, final Set<InternalDistributedMember> excludedMembers, Collection<InternalDistributedMember> alreadyUsed, ArrayListWithClearState<InternalDistributedMember> failedMembers, final long timeOut, final Set<InternalDistributedMember> allStores) {
final boolean isDebugEnabled = logger.isDebugEnabled();
// Recalculate list of candidates
HashSet<InternalDistributedMember> candidateMembers = new HashSet<InternalDistributedMember>(allStores);
candidateMembers.removeAll(alreadyUsed);
candidateMembers.removeAll(excludedMembers);
candidateMembers.removeAll(failedMembers);
if (isDebugEnabled) {
logger.debug("AllStores={} AlreadyUsed={} excluded={} failed={}", allStores, alreadyUsed, excludedMembers, failedMembers);
}
if (candidateMembers.size() == 0) {
// fix for bug #37207
this.prRegion.checkReadiness();
// Run out of candidates. Refetch?
if (System.currentTimeMillis() > timeOut) {
if (isDebugEnabled) {
logger.debug("createBucketInstance: ran out of candidates and timed out");
}
// fail, let caller signal error
return null;
}
// Recalculate
candidateMembers = new HashSet<InternalDistributedMember>(allStores);
candidateMembers.removeAll(alreadyUsed);
candidateMembers.removeAll(excludedMembers);
failedMembers.clear();
}
if (isDebugEnabled) {
logger.debug("createBucketInstance: candidateMembers = {}", candidateMembers);
}
InternalDistributedMember candidate = null;
// If there are no candidates, early out.
if (candidateMembers.size() == 0) {
// no options
if (isDebugEnabled) {
logger.debug("createBucketInstance: no valid candidates");
}
// failure
return null;
} else // no options
{
// required fixed partition is defined.
if (this.prRegion.isFixedPartitionedRegion()) {
candidate = candidateMembers.iterator().next();
} else {
String prName = this.prRegion.getAttributes().getPartitionAttributes().getColocatedWith();
if (prName != null) {
candidate = getColocatedDataStore(candidateMembers, alreadyUsed, bucketId, prName);
} else {
final ArrayList<InternalDistributedMember> orderedCandidates = new ArrayList<InternalDistributedMember>(candidateMembers);
candidate = getPreferredDataStore(orderedCandidates, alreadyUsed);
}
}
}
if (candidate == null) {
failedMembers.addAll(candidateMembers);
return null;
}
if (!this.prRegion.isShadowPR() && !ColocationHelper.checkMembersColocation(this.prRegion, candidate)) {
if (isDebugEnabled) {
logger.debug("createBucketInstances - Member does not have all of the regions colocated with prRegion {}", candidate);
}
failedMembers.add(candidate);
return null;
}
if (!(candidate.equals(this.prRegion.getMyId()))) {
// myself
PartitionProfile pp = this.prRegion.getRegionAdvisor().getPartitionProfile(candidate);
if (pp == null) {
if (isDebugEnabled) {
logger.debug("createBucketInstance: {}: no partition profile for {}", this.prRegion.getFullPath(), candidate);
}
failedMembers.add(candidate);
return null;
}
}
// myself
// Coordinate with any remote close occurring, causing it to wait until
// this create bucket attempt has been made.
final ManageBucketRsp response = createBucketOnMember(bucketId, candidate, newBucketSize, failedMembers.wasCleared());
// Add targetNode to bucketNodes if successful, else to failedNodeList
if (response.isAcceptance()) {
// success!
return candidate;
}
if (isDebugEnabled) {
logger.debug("createBucketInstance: {}: candidate {} declined to manage bucketId={}: {}", this.prRegion.getFullPath(), candidate, this.prRegion.bucketStringForLogs(bucketId), response);
}
if (response.equals(ManageBucketRsp.CLOSED)) {
excludedMembers.add(candidate);
} else {
failedMembers.add(candidate);
}
// failure
candidate = null;
return null;
}
use of org.apache.geode.internal.cache.partitioned.RegionAdvisor.PartitionProfile in project geode by apache.
the class DestroyPartitionedRegionMessage method operateOnPartitionedRegion.
@Override
protected boolean operateOnPartitionedRegion(DistributionManager dm, PartitionedRegion r, long startTime) throws CacheException {
if (this.op.isLocal()) {
// notify the advisor that the sending member has locally destroyed (or closed) the region
PartitionProfile pp = r.getRegionAdvisor().getPartitionProfile(getSender());
if (pp == null) {
// Fix for bug#36863
return true;
}
// final Lock isClosingWriteLock =
// r.getRegionAdvisor().getPartitionProfile(getSender()).getIsClosingWriteLock();
Assert.assertTrue(this.prSerial != DistributionAdvisor.ILLEGAL_SERIAL);
boolean ok = true;
// Examine this peer's profile and look at the serial number in that
// profile. If we have a newer profile, ignore the request.
int oldSerial = pp.getSerialNumber();
if (DistributionAdvisor.isNewerSerialNumber(oldSerial, this.prSerial)) {
ok = false;
if (logger.isDebugEnabled()) {
logger.debug("Not removing region {}l serial requested = {}; actual is {}", r.getName(), this.prSerial, r.getSerialNumber());
}
}
if (ok) {
RegionAdvisor ra = r.getRegionAdvisor();
ra.removeIdAndBuckets(this.sender, this.prSerial, this.bucketSerials, !this.op.isClose());
// r.getRegionAdvisor().removeId(this.sender);
}
sendReply(getSender(), getProcessorId(), dm, null, r, startTime);
/*
* } finally { isClosingWriteLock.unlock(); }
*/
return false;
}
// we can invoke destroyPartitionedRegionLocally method.
if (r.isDestroyed()) {
boolean isClose = this.op.isClose();
r.destroyPartitionedRegionLocally(!isClose);
return true;
}
if (logger.isTraceEnabled(LogMarker.DM)) {
logger.trace(LogMarker.DM, "{} operateOnRegion: {}", getClass().getName(), r.getFullPath());
}
RegionEventImpl event = new RegionEventImpl(r, this.op, this.cbArg, true, r.getMyId());
r.basicDestroyRegion(event, false, false, true);
return true;
}
use of org.apache.geode.internal.cache.partitioned.RegionAdvisor.PartitionProfile in project geode by apache.
the class PartitionedRegionQueryEvaluatorIntegrationTest method populateAllPartitionedRegion.
private void populateAllPartitionedRegion(PartitionedRegion pr, List nodes) {
// int totalNodes = 4;
Region rootReg = PartitionedRegionHelper.getPRRoot(pr.getCache());
// Region allPRs = PartitionedRegionHelper.getPRConfigRegion(rootReg, pr
// .getCache());
PartitionRegionConfig prConf = new PartitionRegionConfig(pr.getPRId(), pr.getFullPath(), pr.getPartitionAttributes(), pr.getScope());
RegionAdvisor ra = pr.getRegionAdvisor();
for (Iterator itr = nodes.iterator(); itr.hasNext(); ) {
Node n = (Node) itr.next();
prConf.addNode(n);
PartitionProfile pp = (PartitionProfile) ra.createProfile();
pp.peerMemberId = n.getMemberId();
pp.isDataStore = true;
final boolean forceFakeProfile = true;
pr.getRegionAdvisor().putProfile(pp, forceFakeProfile);
}
rootReg.put(pr.getRegionIdentifier(), prConf);
}
Aggregations