use of org.apache.geode.internal.cache.execute.RegionFunctionContextImpl in project geode by apache.
the class DistributedRegion method executeLocally.
ResultCollector executeLocally(final DistributedRegionFunctionExecutor execution, final Function function, final Object args, int prid, final ResultCollector rc, final Set filter, final ServerToClientFunctionResultSender sender) {
final LocalResultCollector<?, ?> localRC = execution.getLocalResultCollector(function, rc);
final DM dm = getDistributionManager();
final DistributedRegionFunctionResultSender resultSender = new DistributedRegionFunctionResultSender(dm, localRC, function, sender);
final RegionFunctionContextImpl context = new RegionFunctionContextImpl(function.getId(), DistributedRegion.this, args, filter, null, null, resultSender, execution.isReExecute());
execution.executeFunctionOnLocalNode(function, context, resultSender, dm, isTX());
return localRC;
}
use of org.apache.geode.internal.cache.execute.RegionFunctionContextImpl in project geode by apache.
the class PartitionedRegion method executeOnSingleNode.
/**
* Single key execution on single node
*
* @since GemFire 6.0
*/
private ResultCollector executeOnSingleNode(final Function function, final PartitionedRegionFunctionExecutor execution, ResultCollector rc, boolean isPRSingleHop, boolean isBucketSetAsFilter) {
final Set routingKeys = execution.getFilter();
final Object key = routingKeys.iterator().next();
final Integer bucketId;
if (isBucketSetAsFilter) {
bucketId = (Integer) key;
} else {
bucketId = PartitionedRegionHelper.getHashKey(this, Operation.FUNCTION_EXECUTION, key, null, null);
}
InternalDistributedMember targetNode = null;
if (function.optimizeForWrite()) {
targetNode = createBucket(bucketId, 0, null);
HeapMemoryMonitor hmm = ((InternalResourceManager) cache.getResourceManager()).getHeapMonitor();
if (hmm.isMemberHeapCritical(targetNode) && !MemoryThresholds.isLowMemoryExceptionDisabled()) {
Set<DistributedMember> sm = Collections.singleton((DistributedMember) targetNode);
throw new LowMemoryException(LocalizedStrings.ResourceManager_LOW_MEMORY_FOR_0_FUNCEXEC_MEMBERS_1.toLocalizedString(function.getId(), sm), sm);
}
} else {
targetNode = getOrCreateNodeForBucketRead(bucketId);
}
final DistributedMember localVm = getMyId();
if (targetNode != null && isPRSingleHop && !localVm.equals(targetNode)) {
Set<ServerBucketProfile> profiles = this.getRegionAdvisor().getClientBucketProfiles(bucketId);
if (profiles != null) {
for (ServerBucketProfile profile : profiles) {
if (profile.getDistributedMember().equals(targetNode)) {
if (logger.isDebugEnabled()) {
logger.debug("FunctionServiceSingleHop: Found remote node.{}", localVm);
}
throw new InternalFunctionInvocationTargetException(LocalizedStrings.PartitionedRegion_MULTIPLE_TARGET_NODE_FOUND_FOR.toLocalizedString());
}
}
}
}
if (targetNode == null) {
throw new FunctionException(LocalizedStrings.PartitionedRegion_NO_TARGET_NODE_FOUND_FOR_KEY_0.toLocalizedString(key));
}
if (logger.isDebugEnabled()) {
logger.debug("Executing Function: {} setArguments={} on {}", function.getId(), execution.getArguments(), targetNode);
}
while (!execution.getFailedNodes().isEmpty()) {
RetryTimeKeeper retryTime = new RetryTimeKeeper(this.retryTimeout);
if (execution.getFailedNodes().contains(targetNode.getId())) {
/*
* if (retryTime.overMaximum()) { PRHARedundancyProvider.timedOut(this, null, null,
* "doing function execution", this.retryTimeout); // NOTREACHED }
*/
// Fix for Bug # 40083
targetNode = null;
while (targetNode == null) {
if (retryTime.overMaximum()) {
PRHARedundancyProvider.timedOut(this, null, null, "doing function execution", this.retryTimeout);
// NOTREACHED
}
retryTime.waitToRetryNode();
if (function.optimizeForWrite()) {
targetNode = getOrCreateNodeForBucketWrite(bucketId, retryTime);
} else {
targetNode = getOrCreateNodeForBucketRead(bucketId);
}
}
if (targetNode == null) {
throw new FunctionException(LocalizedStrings.PartitionedRegion_NO_TARGET_NODE_FOUND_FOR_KEY_0.toLocalizedString(key));
}
} else {
execution.clearFailedNodes();
}
}
final HashSet<Integer> buckets = new HashSet<Integer>();
buckets.add(bucketId);
final Set<InternalDistributedMember> singleMember = Collections.singleton(targetNode);
execution.validateExecution(function, singleMember);
execution.setExecutionNodes(singleMember);
LocalResultCollector<?, ?> localRC = execution.getLocalResultCollector(function, rc);
if (targetNode.equals(localVm)) {
final DM dm = getDistributionManager();
PartitionedRegionFunctionResultSender resultSender = new PartitionedRegionFunctionResultSender(dm, PartitionedRegion.this, 0, localRC, execution.getServerResultSender(), true, false, execution.isForwardExceptions(), function, buckets);
final FunctionContext context = new RegionFunctionContextImpl(function.getId(), PartitionedRegion.this, execution.getArgumentsForMember(localVm.getId()), routingKeys, ColocationHelper.constructAndGetAllColocatedLocalDataSet(PartitionedRegion.this, buckets), buckets, resultSender, execution.isReExecute());
execution.executeFunctionOnLocalPRNode(function, context, resultSender, dm, isTX());
return localRC;
} else {
return executeFunctionOnRemoteNode(targetNode, function, execution.getArgumentsForMember(targetNode.getId()), routingKeys, function.isHA() ? rc : localRC, buckets, execution.getServerResultSender(), execution);
}
}
use of org.apache.geode.internal.cache.execute.RegionFunctionContextImpl in project geode by apache.
the class LocalRegion method executeFunction.
/**
* Execute the provided named function in all locations that contain the given keys. So function
* can be executed on just one fabric node, executed in parallel on a subset of nodes in parallel
* across all the nodes.
*
* @since GemFire 5.8Beta
*/
public ResultCollector executeFunction(final DistributedRegionFunctionExecutor execution, final Function function, final Object args, final ResultCollector rc, final Set filter, final ServerToClientFunctionResultSender sender) {
if (function.optimizeForWrite() && this.memoryThresholdReached.get() && !MemoryThresholds.isLowMemoryExceptionDisabled()) {
Set<DistributedMember> members = getMemoryThresholdReachedMembers();
throw new LowMemoryException(LocalizedStrings.ResourceManager_LOW_MEMORY_FOR_0_FUNCEXEC_MEMBERS_1.toLocalizedString(function.getId(), members), members);
}
final LocalResultCollector<?, ?> resultCollector = execution.getLocalResultCollector(function, rc);
final DM dm = getDistributionManager();
execution.setExecutionNodes(Collections.singleton(getMyId()));
final DistributedRegionFunctionResultSender resultSender = new DistributedRegionFunctionResultSender(dm, resultCollector, function, sender);
final RegionFunctionContextImpl context = new RegionFunctionContextImpl(function.getId(), LocalRegion.this, args, filter, null, null, resultSender, execution.isReExecute());
execution.executeFunctionOnLocalNode(function, context, resultSender, dm, isTX());
return resultCollector;
}
use of org.apache.geode.internal.cache.execute.RegionFunctionContextImpl in project geode by apache.
the class TestFunction method executeFunctionBucketFilter.
public void executeFunctionBucketFilter(FunctionContext context) {
// int bucketIDAsFilter = ((Integer)context.getArguments()).intValue();
// check if the node contains the bucket passed as filter
RegionFunctionContextImpl rfc = (RegionFunctionContextImpl) context;
PartitionedRegion pr = (PartitionedRegion) rfc.getDataSet();
Set<Integer> bucketIDs = rfc.getLocalBucketSet(pr);
pr.getGemFireCache().getLogger().fine("LOCAL BUCKETSET =" + bucketIDs);
ResultSender<Integer> rs = context.<Integer>getResultSender();
if (!pr.getDataStore().areAllBucketsHosted(bucketIDs)) {
throw new AssertionError("bucket IDs =" + bucketIDs + " not all hosted locally");
} else {
Integer[] bucketIds = bucketIDs.toArray(new Integer[0]);
for (int i = 0; i < bucketIds.length - 1; ++i) {
rs.sendResult(bucketIds[i]);
}
rs.lastResult(bucketIds[bucketIds.length - 1]);
}
}
Aggregations