use of org.apache.geode.cache.execute.FunctionContext in project geode by apache.
the class ExecuteFunction66 method cmdExecute.
@Override
public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
Object function = null;
Object args = null;
MemberMappedArgument memberMappedArg = null;
String[] groups = null;
byte hasResult = 0;
byte functionState = 0;
boolean isReexecute = false;
boolean allMembers = false;
boolean ignoreFailedMembers = false;
int functionTimeout = ConnectionImpl.DEFAULT_CLIENT_FUNCTION_TIMEOUT;
try {
byte[] bytes = clientMessage.getPart(0).getSerializedForm();
functionState = bytes[0];
if (bytes.length >= 5 && servConn.getClientVersion().ordinal() >= Version.GFE_8009.ordinal()) {
functionTimeout = Part.decodeInt(bytes, 1);
}
if (functionState == AbstractExecution.HA_HASRESULT_NO_OPTIMIZEFORWRITE_REEXECUTE) {
functionState = AbstractExecution.HA_HASRESULT_NO_OPTIMIZEFORWRITE;
isReexecute = true;
} else if (functionState == AbstractExecution.HA_HASRESULT_OPTIMIZEFORWRITE_REEXECUTE) {
functionState = AbstractExecution.HA_HASRESULT_OPTIMIZEFORWRITE;
isReexecute = true;
}
if (functionState != 1) {
hasResult = (byte) ((functionState & 2) - 1);
} else {
hasResult = functionState;
}
if (hasResult == 1) {
servConn.setAsTrue(REQUIRES_RESPONSE);
servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
}
function = clientMessage.getPart(1).getStringOrObject();
args = clientMessage.getPart(2).getObject();
Part part = clientMessage.getPart(3);
if (part != null) {
memberMappedArg = (MemberMappedArgument) part.getObject();
}
groups = getGroups(clientMessage);
allMembers = getAllMembers(clientMessage);
ignoreFailedMembers = getIgnoreFailedMembers(clientMessage);
} catch (ClassNotFoundException exception) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), exception);
if (hasResult == 1) {
writeChunkedException(clientMessage, exception, servConn);
} else {
writeException(clientMessage, exception, false, servConn);
}
servConn.setAsTrue(RESPONDED);
return;
}
if (function == null) {
final String message = LocalizedStrings.ExecuteFunction_THE_INPUT_FUNCTION_FOR_THE_EXECUTE_FUNCTION_REQUEST_IS_NULL.toLocalizedString();
logger.warn(LocalizedMessage.create(LocalizedStrings.TWO_ARG_COLON, new Object[] { servConn.getName(), message }));
sendError(hasResult, clientMessage, message, servConn);
return;
}
// Execute function on the cache
try {
Function functionObject = null;
if (function instanceof String) {
functionObject = FunctionService.getFunction((String) function);
if (functionObject == null) {
final String message = LocalizedStrings.ExecuteFunction_FUNCTION_NAMED_0_IS_NOT_REGISTERED.toLocalizedString(function);
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, clientMessage, message, servConn);
return;
} else {
byte functionStateOnServerSide = AbstractExecution.getFunctionState(functionObject.isHA(), functionObject.hasResult(), functionObject.optimizeForWrite());
if (logger.isDebugEnabled()) {
logger.debug("Function State on server side: {} on client: {}", functionStateOnServerSide, functionState);
}
if (functionStateOnServerSide != functionState) {
String message = LocalizedStrings.FunctionService_FUNCTION_ATTRIBUTE_MISMATCH_CLIENT_SERVER.toLocalizedString(function);
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, clientMessage, message, servConn);
return;
}
}
} else {
functionObject = (Function) function;
}
FunctionStats stats = FunctionStats.getFunctionStats(functionObject.getId());
this.securityService.authorizeDataWrite();
// check if the caller is authorized to do this operation on server
AuthorizeRequest authzRequest = servConn.getAuthzRequest();
ExecuteFunctionOperationContext executeContext = null;
if (authzRequest != null) {
executeContext = authzRequest.executeFunctionAuthorize(functionObject.getId(), null, null, args, functionObject.optimizeForWrite());
}
ChunkedMessage m = servConn.getFunctionResponseMessage();
m.setTransactionId(clientMessage.getTransactionId());
ServerToClientFunctionResultSender resultSender = new ServerToClientFunctionResultSender65(m, MessageType.EXECUTE_FUNCTION_RESULT, servConn, functionObject, executeContext);
InternalDistributedMember localVM = (InternalDistributedMember) servConn.getCache().getDistributedSystem().getDistributedMember();
FunctionContext context = null;
if (memberMappedArg != null) {
context = new FunctionContextImpl(functionObject.getId(), memberMappedArg.getArgumentsForMember(localVM.getId()), resultSender, isReexecute);
} else {
context = new FunctionContextImpl(functionObject.getId(), args, resultSender, isReexecute);
}
HandShake handShake = (HandShake) servConn.getHandshake();
int earlierClientReadTimeout = handShake.getClientReadTimeout();
handShake.setClientReadTimeout(functionTimeout);
try {
if (logger.isDebugEnabled()) {
logger.debug("Executing Function on Server: {} with context: {}", servConn, context);
}
InternalCache cache = servConn.getCache();
HeapMemoryMonitor hmm = ((InternalResourceManager) cache.getResourceManager()).getHeapMonitor();
if (functionObject.optimizeForWrite() && cache != null && hmm.getState().isCritical() && !MemoryThresholds.isLowMemoryExceptionDisabled()) {
Set<DistributedMember> sm = Collections.singleton((DistributedMember) cache.getMyId());
Exception e = new LowMemoryException(LocalizedStrings.ResourceManager_LOW_MEMORY_FOR_0_FUNCEXEC_MEMBERS_1.toLocalizedString(new Object[] { functionObject.getId(), sm }), sm);
sendException(hasResult, clientMessage, e.getMessage(), servConn, e);
return;
}
/**
* if cache is null, then either cache has not yet been created on this node or it is a
* shutdown scenario.
*/
DM dm = null;
if (cache != null) {
dm = cache.getDistributionManager();
}
if (groups != null && groups.length > 0) {
executeFunctionOnGroups(function, args, groups, allMembers, functionObject, resultSender, ignoreFailedMembers);
} else {
executeFunctionaLocally(functionObject, context, (ServerToClientFunctionResultSender65) resultSender, dm, stats);
}
if (!functionObject.hasResult()) {
writeReply(clientMessage, servConn);
}
} catch (FunctionException functionException) {
stats.endFunctionExecutionWithException(functionObject.hasResult());
throw functionException;
} catch (Exception exception) {
stats.endFunctionExecutionWithException(functionObject.hasResult());
throw new FunctionException(exception);
} finally {
handShake.setClientReadTimeout(earlierClientReadTimeout);
}
} catch (IOException ioException) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), ioException);
String message = LocalizedStrings.ExecuteFunction_SERVER_COULD_NOT_SEND_THE_REPLY.toLocalizedString();
sendException(hasResult, clientMessage, message, servConn, ioException);
} catch (InternalFunctionInvocationTargetException internalfunctionException) {
// 4> in case of HA member departed
if (logger.isDebugEnabled()) {
logger.debug(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, new Object[] { function }), internalfunctionException);
}
final String message = internalfunctionException.getMessage();
sendException(hasResult, clientMessage, message, servConn, internalfunctionException);
} catch (Exception e) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), e);
final String message = e.getMessage();
sendException(hasResult, clientMessage, message, servConn, e);
}
}
use of org.apache.geode.cache.execute.FunctionContext in project geode by apache.
the class ExecuteFunction method cmdExecute.
@Override
public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
Object function = null;
Object args = null;
MemberMappedArgument memberMappedArg = null;
byte hasResult = 0;
try {
hasResult = clientMessage.getPart(0).getSerializedForm()[0];
if (hasResult == 1) {
servConn.setAsTrue(REQUIRES_RESPONSE);
servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
}
function = clientMessage.getPart(1).getStringOrObject();
args = clientMessage.getPart(2).getObject();
Part part = clientMessage.getPart(3);
if (part != null) {
memberMappedArg = (MemberMappedArgument) part.getObject();
}
} catch (ClassNotFoundException exception) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), exception);
if (hasResult == 1) {
writeChunkedException(clientMessage, exception, servConn);
servConn.setAsTrue(RESPONDED);
return;
}
}
if (function == null) {
final String message = LocalizedStrings.ExecuteFunction_THE_INPUT_FUNCTION_FOR_THE_EXECUTE_FUNCTION_REQUEST_IS_NULL.toLocalizedString();
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, clientMessage, message, servConn);
return;
}
// Execute function on the cache
try {
Function functionObject = null;
if (function instanceof String) {
functionObject = FunctionService.getFunction((String) function);
if (functionObject == null) {
final String message = LocalizedStrings.ExecuteFunction_FUNCTION_NAMED_0_IS_NOT_REGISTERED.toLocalizedString(function);
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, clientMessage, message, servConn);
return;
}
} else {
functionObject = (Function) function;
}
FunctionStats stats = FunctionStats.getFunctionStats(functionObject.getId());
this.securityService.authorizeDataWrite();
// check if the caller is authorized to do this operation on server
AuthorizeRequest authzRequest = servConn.getAuthzRequest();
ExecuteFunctionOperationContext executeContext = null;
if (authzRequest != null) {
executeContext = authzRequest.executeFunctionAuthorize(functionObject.getId(), null, null, args, functionObject.optimizeForWrite());
}
ChunkedMessage m = servConn.getFunctionResponseMessage();
m.setTransactionId(clientMessage.getTransactionId());
ResultSender resultSender = new ServerToClientFunctionResultSender(m, MessageType.EXECUTE_FUNCTION_RESULT, servConn, functionObject, executeContext);
InternalDistributedMember localVM = (InternalDistributedMember) servConn.getCache().getDistributedSystem().getDistributedMember();
FunctionContext context = null;
if (memberMappedArg != null) {
context = new FunctionContextImpl(functionObject.getId(), memberMappedArg.getArgumentsForMember(localVM.getId()), resultSender);
} else {
context = new FunctionContextImpl(functionObject.getId(), args, resultSender);
}
HandShake handShake = (HandShake) servConn.getHandshake();
int earlierClientReadTimeout = handShake.getClientReadTimeout();
handShake.setClientReadTimeout(0);
try {
long startExecution = stats.startTime();
stats.startFunctionExecution(functionObject.hasResult());
if (logger.isDebugEnabled()) {
logger.debug("Executing Function on Server: " + servConn.toString() + "with context :" + context.toString());
}
InternalCache cache = servConn.getCache();
HeapMemoryMonitor hmm = ((InternalResourceManager) cache.getResourceManager()).getHeapMonitor();
if (functionObject.optimizeForWrite() && cache != null && hmm.getState().isCritical() && !MemoryThresholds.isLowMemoryExceptionDisabled()) {
Set<DistributedMember> sm = Collections.<DistributedMember>singleton(cache.getMyId());
throw new LowMemoryException(LocalizedStrings.ResourceManager_LOW_MEMORY_FOR_0_FUNCEXEC_MEMBERS_1.toLocalizedString(new Object[] { functionObject.getId(), sm }), sm);
}
functionObject.execute(context);
stats.endFunctionExecution(startExecution, functionObject.hasResult());
} catch (FunctionException functionException) {
stats.endFunctionExecutionWithException(functionObject.hasResult());
throw functionException;
} catch (Exception exception) {
stats.endFunctionExecutionWithException(functionObject.hasResult());
throw new FunctionException(exception);
} finally {
handShake.setClientReadTimeout(earlierClientReadTimeout);
}
} catch (IOException ioException) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), ioException);
String message = LocalizedStrings.ExecuteFunction_SERVER_COULD_NOT_SEND_THE_REPLY.toLocalizedString();
sendException(hasResult, clientMessage, message, servConn, ioException);
} catch (InternalFunctionInvocationTargetException internalfunctionException) {
// 4> in case of HA member departed
if (logger.isDebugEnabled()) {
logger.debug(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, new Object[] { function }), internalfunctionException);
}
final String message = internalfunctionException.getMessage();
sendException(hasResult, clientMessage, message, servConn, internalfunctionException);
} catch (Exception e) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), e);
final String message = e.getMessage();
sendException(hasResult, clientMessage, message, servConn, e);
}
}
use of org.apache.geode.cache.execute.FunctionContext in project geode by apache.
the class PartitionedRegion method executeOnSingleNode.
/**
* Single key execution on single node
*
* @since GemFire 6.0
*/
private ResultCollector executeOnSingleNode(final Function function, final PartitionedRegionFunctionExecutor execution, ResultCollector rc, boolean isPRSingleHop, boolean isBucketSetAsFilter) {
final Set routingKeys = execution.getFilter();
final Object key = routingKeys.iterator().next();
final Integer bucketId;
if (isBucketSetAsFilter) {
bucketId = (Integer) key;
} else {
bucketId = PartitionedRegionHelper.getHashKey(this, Operation.FUNCTION_EXECUTION, key, null, null);
}
InternalDistributedMember targetNode = null;
if (function.optimizeForWrite()) {
targetNode = createBucket(bucketId, 0, null);
HeapMemoryMonitor hmm = ((InternalResourceManager) cache.getResourceManager()).getHeapMonitor();
if (hmm.isMemberHeapCritical(targetNode) && !MemoryThresholds.isLowMemoryExceptionDisabled()) {
Set<DistributedMember> sm = Collections.singleton((DistributedMember) targetNode);
throw new LowMemoryException(LocalizedStrings.ResourceManager_LOW_MEMORY_FOR_0_FUNCEXEC_MEMBERS_1.toLocalizedString(function.getId(), sm), sm);
}
} else {
targetNode = getOrCreateNodeForBucketRead(bucketId);
}
final DistributedMember localVm = getMyId();
if (targetNode != null && isPRSingleHop && !localVm.equals(targetNode)) {
Set<ServerBucketProfile> profiles = this.getRegionAdvisor().getClientBucketProfiles(bucketId);
if (profiles != null) {
for (ServerBucketProfile profile : profiles) {
if (profile.getDistributedMember().equals(targetNode)) {
if (logger.isDebugEnabled()) {
logger.debug("FunctionServiceSingleHop: Found remote node.{}", localVm);
}
throw new InternalFunctionInvocationTargetException(LocalizedStrings.PartitionedRegion_MULTIPLE_TARGET_NODE_FOUND_FOR.toLocalizedString());
}
}
}
}
if (targetNode == null) {
throw new FunctionException(LocalizedStrings.PartitionedRegion_NO_TARGET_NODE_FOUND_FOR_KEY_0.toLocalizedString(key));
}
if (logger.isDebugEnabled()) {
logger.debug("Executing Function: {} setArguments={} on {}", function.getId(), execution.getArguments(), targetNode);
}
while (!execution.getFailedNodes().isEmpty()) {
RetryTimeKeeper retryTime = new RetryTimeKeeper(this.retryTimeout);
if (execution.getFailedNodes().contains(targetNode.getId())) {
/*
* if (retryTime.overMaximum()) { PRHARedundancyProvider.timedOut(this, null, null,
* "doing function execution", this.retryTimeout); // NOTREACHED }
*/
// Fix for Bug # 40083
targetNode = null;
while (targetNode == null) {
if (retryTime.overMaximum()) {
PRHARedundancyProvider.timedOut(this, null, null, "doing function execution", this.retryTimeout);
// NOTREACHED
}
retryTime.waitToRetryNode();
if (function.optimizeForWrite()) {
targetNode = getOrCreateNodeForBucketWrite(bucketId, retryTime);
} else {
targetNode = getOrCreateNodeForBucketRead(bucketId);
}
}
if (targetNode == null) {
throw new FunctionException(LocalizedStrings.PartitionedRegion_NO_TARGET_NODE_FOUND_FOR_KEY_0.toLocalizedString(key));
}
} else {
execution.clearFailedNodes();
}
}
final HashSet<Integer> buckets = new HashSet<Integer>();
buckets.add(bucketId);
final Set<InternalDistributedMember> singleMember = Collections.singleton(targetNode);
execution.validateExecution(function, singleMember);
execution.setExecutionNodes(singleMember);
LocalResultCollector<?, ?> localRC = execution.getLocalResultCollector(function, rc);
if (targetNode.equals(localVm)) {
final DM dm = getDistributionManager();
PartitionedRegionFunctionResultSender resultSender = new PartitionedRegionFunctionResultSender(dm, PartitionedRegion.this, 0, localRC, execution.getServerResultSender(), true, false, execution.isForwardExceptions(), function, buckets);
final FunctionContext context = new RegionFunctionContextImpl(function.getId(), PartitionedRegion.this, execution.getArgumentsForMember(localVm.getId()), routingKeys, ColocationHelper.constructAndGetAllColocatedLocalDataSet(PartitionedRegion.this, buckets), buckets, resultSender, execution.isReExecute());
execution.executeFunctionOnLocalPRNode(function, context, resultSender, dm, isTX());
return localRC;
} else {
return executeFunctionOnRemoteNode(targetNode, function, execution.getArgumentsForMember(targetNode.getId()), routingKeys, function.isHA() ? rc : localRC, buckets, execution.getServerResultSender(), execution);
}
}
use of org.apache.geode.cache.execute.FunctionContext in project geode by apache.
the class PRColocatedEquiJoinDUnitTest method testNonColocatedPRLocalQuerying.
@Test
public void testNonColocatedPRLocalQuerying() throws Exception {
IgnoredException.addIgnoredException("UnsupportedOperationException");
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
setCacheInVMs(vm0);
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
// Creting PR's on the participating VM's
// Creating DataStore node on the VM0.
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy, Portfolio.class));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
// Creating Colocated Region DataStore node on the VM0.
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
// Create second PR which is not colocated.
vm0.invoke(new CacheSerializableRunnable(coloName) {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region partitionedregion = null;
try {
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(NewPortfolio.class);
PartitionAttributesFactory paf = new PartitionAttributesFactory();
PartitionAttributes prAttr = paf.setRedundantCopies(redundancy).create();
attr.setPartitionAttributes(prAttr);
partitionedregion = cache.createRegion(coloName, attr.create());
} catch (IllegalStateException ex) {
LogWriterUtils.getLogWriter().warning("PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Creation caught IllegalStateException", ex);
}
assertNotNull("PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region " + coloName + " not in cache", cache.getRegion(coloName));
assertNotNull("PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref null", partitionedregion);
assertTrue("PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Partitioned Region ref claims to be destroyed", !partitionedregion.isDestroyed());
}
});
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
// Creating local region on vm0 to compare the results of query.
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, Portfolio.class));
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(coloLocalName, NewPortfolio.class));
// Generating portfolio object array to be populated across the PR's & Local
// Regions
final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio, cnt, cntDest));
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloLocalName, newPortfolio, cnt, cntDest));
// Putting the data into the PR's created
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
// querying the VM for data and comparing the result with query result of
// local region.
// querying the VM for data
vm0.invoke(new CacheSerializableRunnable("PRQuery") {
@Override
public void run2() throws CacheException {
// Helper classes and function
class TestQueryFunction extends FunctionAdapter {
@Override
public boolean hasResult() {
return true;
}
@Override
public boolean isHA() {
return false;
}
private final String id;
public TestQueryFunction(String id) {
super();
this.id = id;
}
@Override
public void execute(FunctionContext context) {
Cache cache = CacheFactory.getAnyInstance();
QueryService queryService = cache.getQueryService();
ArrayList allQueryResults = new ArrayList();
String qstr = (String) context.getArguments();
try {
Query query = queryService.newQuery(qstr);
context.getResultSender().sendResult((ArrayList) ((SelectResults) query.execute((RegionFunctionContext) context)).asList());
context.getResultSender().lastResult(null);
} catch (Exception e) {
e.printStackTrace();
throw new FunctionException(e);
}
}
@Override
public String getId() {
return this.id;
}
}
Cache cache = getCache();
// Querying the PR region
String[] queries = new String[] { "r1.ID = r2.id" };
Object[][] r = new Object[queries.length][2];
Region region = null;
region = cache.getRegion(name);
assertNotNull(region);
region = cache.getRegion(coloName);
assertNotNull(region);
QueryService qs = getCache().getQueryService();
Object[] params;
try {
for (int j = 0; j < queries.length; j++) {
getCache().getLogger().info("About to execute local query: " + queries[j]);
Function func = new TestQueryFunction("testfunction");
Object funcResult = FunctionService.onRegion((getCache().getRegion(name) instanceof PartitionedRegion) ? getCache().getRegion(name) : getCache().getRegion(coloName)).setArguments("Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + name + " r1, /" + coloName + " r2 where " + queries[j]).execute(func).getResult();
r[j][0] = ((ArrayList) funcResult).get(0);
}
fail("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully with non-colocated region on one of the nodes");
} catch (FunctionException e) {
if (e.getCause() instanceof UnsupportedOperationException) {
LogWriterUtils.getLogWriter().info("Query received FunctionException successfully while using QueryService.");
} else {
fail("UnsupportedOperationException must be thrown here");
}
}
}
});
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
}
use of org.apache.geode.cache.execute.FunctionContext in project geode by apache.
the class PRColocatedEquiJoinDUnitTest method testPRRRNonLocalQueryingWithNoRROnOneNode.
/**
* A very basic dunit test that <br>
* 1. Creates two PR Data Stores with redundantCopies = 1. 2. Populates the region with test data.
* 3. Fires a LOCAL query on one data store VM and verifies the result.
*
* @throws Exception
*/
@Test
public void testPRRRNonLocalQueryingWithNoRROnOneNode() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
setCacheInVMs(vm0, vm1);
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
// Creting PR's on the participating VM's
// Creating DataStore node on the VM0.
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, 0, Portfolio.class));
vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, 0, Portfolio.class));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
// Creating Colocated Region DataStore node on the VM0.
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the RR");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(coloName, NewPortfolio.class));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
// Putting the data into the PR's created
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
// querying the VM for data and comparing the result with query result of
// local region.
// querying the VM for data
vm0.invoke(new CacheSerializableRunnable("PRQuery") {
@Override
public void run2() throws CacheException {
// Helper classes and function
class TestQueryFunction extends FunctionAdapter {
@Override
public boolean hasResult() {
return true;
}
@Override
public boolean isHA() {
return false;
}
private final String id;
public TestQueryFunction(String id) {
super();
this.id = id;
}
@Override
public void execute(FunctionContext context) {
Cache cache = CacheFactory.getAnyInstance();
QueryService queryService = cache.getQueryService();
ArrayList allQueryResults = new ArrayList();
String qstr = (String) context.getArguments();
try {
Query query = queryService.newQuery(qstr);
context.getResultSender().sendResult((ArrayList) ((SelectResults) query.execute((RegionFunctionContext) context)).asList());
context.getResultSender().lastResult(null);
} catch (Exception e) {
e.printStackTrace();
throw new FunctionException(e);
}
}
@Override
public String getId() {
return this.id;
}
}
Cache cache = getCache();
// Querying the PR region
String[] queries = new String[] { "r1.ID = r2.id" };
Object[][] r = new Object[queries.length][2];
Region region = null;
region = cache.getRegion(name);
assertNotNull(region);
region = cache.getRegion(coloName);
assertNotNull(region);
QueryService qs = getCache().getQueryService();
Object[] params;
try {
for (int j = 0; j < queries.length; j++) {
getCache().getLogger().info("About to execute local query: " + queries[j]);
Function func = new TestQueryFunction("testfunction");
Object funcResult = FunctionService.onRegion((getCache().getRegion(name) instanceof PartitionedRegion) ? getCache().getRegion(name) : getCache().getRegion(coloName)).setArguments("Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + name + " r1, /" + coloName + " r2 where " + queries[j]).execute(func).getResult();
r[j][0] = ((ArrayList) funcResult).get(0);
}
fail("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully without RR region on one of the nodes");
} catch (FunctionException e) {
if (e.getCause() instanceof RegionNotFoundException) {
LogWriterUtils.getLogWriter().info("Query received FunctionException successfully while using QueryService.");
} else {
fail("RegionNotFoundException must be thrown here");
}
}
}
});
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
}
Aggregations