use of org.apache.geode.internal.cache.execute.ServerRegionFunctionExecutor in project geode by apache.
the class FunctionServiceManager method onRegion.
/**
* Returns an {@link Execution} object that can be used to execute a data dependent function on
* the specified Region.<br>
* When invoked from a GemFire client, the method returns an Execution instance that sends a
* message to one of the connected servers as specified by the {@link Pool} for the region. <br>
* Depending on the filters setup on the {@link Execution}, the function is executed on all
* GemFire members that define the data region, or a subset of members.
* {@link Execution#withFilter(Set)}).
*
* For DistributedRegions with DataPolicy.NORMAL, it throws UnsupportedOperationException. For
* DistributedRegions with DataPolicy.EMPTY, execute the function on any random member which has
* DataPolicy.REPLICATE <br>
* . For DistributedRegions with DataPolicy.REPLICATE, execute the function locally. For Regions
* with DataPolicy.PARTITION, it executes on members where the data resides as specified by the
* filter.
*
* @return Execution
* @throws FunctionException if the region passed in is null
* @since GemFire 6.0
*/
public Execution onRegion(Region region) {
if (region == null) {
throw new FunctionException(LocalizedStrings.FunctionService_0_PASSED_IS_NULL.toLocalizedString("Region instance "));
}
ProxyCache proxyCache = null;
String poolName = region.getAttributes().getPoolName();
if (poolName != null) {
Pool pool = PoolManager.find(poolName);
if (pool.getMultiuserAuthentication()) {
if (region instanceof ProxyRegion) {
ProxyRegion proxyRegion = (ProxyRegion) region;
region = proxyRegion.getRealRegion();
proxyCache = proxyRegion.getAuthenticatedCache();
} else {
throw new UnsupportedOperationException();
}
}
}
if (isClientRegion(region)) {
return new ServerRegionFunctionExecutor(region, proxyCache);
}
if (PartitionRegionHelper.isPartitionedRegion(region)) {
return new PartitionedRegionFunctionExecutor(region);
}
return new DistributedRegionFunctionExecutor(region);
}
use of org.apache.geode.internal.cache.execute.ServerRegionFunctionExecutor in project geode by apache.
the class ExecuteRegionFunctionSingleHopOp method constructAndGetExecuteFunctionTasks.
static List<SingleHopOperationCallable> constructAndGetExecuteFunctionTasks(String region, ServerRegionFunctionExecutor serverRegionExecutor, final Map<ServerLocation, ? extends HashSet> serverToFilterMap, final PoolImpl pool, final String functionId, byte hasResult, ResultCollector rc, ClientMetadataService cms, boolean allBucket, boolean isHA, boolean optimizeForWrite) {
final List<SingleHopOperationCallable> tasks = new ArrayList<SingleHopOperationCallable>();
ArrayList<ServerLocation> servers = new ArrayList<ServerLocation>(serverToFilterMap.keySet());
if (logger.isDebugEnabled()) {
logger.debug("Constructing tasks for the servers {}", servers);
}
for (ServerLocation server : servers) {
ServerRegionFunctionExecutor executor = (ServerRegionFunctionExecutor) serverRegionExecutor.withFilter(serverToFilterMap.get(server));
AbstractOp op = new ExecuteRegionFunctionSingleHopOpImpl(region, functionId, executor, rc, hasResult, new HashSet<String>(), allBucket, isHA, optimizeForWrite);
SingleHopOperationCallable task = new SingleHopOperationCallable(new ServerLocation(server.getHostName(), server.getPort()), pool, op, UserAttributes.userAttributes.get());
tasks.add(task);
}
return tasks;
}
use of org.apache.geode.internal.cache.execute.ServerRegionFunctionExecutor in project geode by apache.
the class ExecuteRegionFunctionSingleHopOp method constructAndGetExecuteFunctionTasks.
static List<SingleHopOperationCallable> constructAndGetExecuteFunctionTasks(String region, ServerRegionFunctionExecutor serverRegionExecutor, final Map<ServerLocation, ? extends HashSet> serverToFilterMap, final PoolImpl pool, final Function function, byte hasResult, ResultCollector rc, ClientMetadataService cms, boolean allBucket) {
final List<SingleHopOperationCallable> tasks = new ArrayList<SingleHopOperationCallable>();
ArrayList<ServerLocation> servers = new ArrayList<ServerLocation>(serverToFilterMap.keySet());
if (logger.isDebugEnabled()) {
logger.debug("Constructing tasks for the servers {}", servers);
}
for (ServerLocation server : servers) {
ServerRegionFunctionExecutor executor = (ServerRegionFunctionExecutor) serverRegionExecutor.withFilter(serverToFilterMap.get(server));
AbstractOp op = new ExecuteRegionFunctionSingleHopOpImpl(region, function, executor, rc, hasResult, new HashSet<String>(), allBucket);
SingleHopOperationCallable task = new SingleHopOperationCallable(new ServerLocation(server.getHostName(), server.getPort()), pool, op, UserAttributes.userAttributes.get());
tasks.add(task);
}
return tasks;
}
Aggregations