use of org.apache.geode.cache.query.QueryInvocationTargetException in project geode by apache.
the class ClientAuthorizationTestCase method doOp.
protected static void doOp(OperationCode op, final int[] indices, final int flagsI, final int expectedResult) throws InterruptedException {
boolean operationOmitted = false;
final int flags = flagsI;
Region region = getRegion();
if ((flags & OpFlags.USE_SUBREGION) > 0) {
assertNotNull(region);
Region subregion = null;
if ((flags & OpFlags.NO_CREATE_SUBREGION) > 0) {
if ((flags & OpFlags.CHECK_NOREGION) > 0) {
// Wait for some time for DRF update to come
waitForCondition(() -> getSubregion() == null);
subregion = getSubregion();
assertNull(subregion);
return;
} else {
// Wait for some time for DRF update to come
waitForCondition(() -> getSubregion() != null);
subregion = getSubregion();
assertNotNull(subregion);
}
} else {
subregion = createSubregion(region);
}
assertNotNull(subregion);
region = subregion;
} else if ((flags & OpFlags.CHECK_NOREGION) > 0) {
// Wait for some time for region destroy update to come
waitForCondition(() -> getRegion() == null);
region = getRegion();
assertNull(region);
return;
} else {
assertNotNull(region);
}
final String[] keys = KEYS;
final String[] vals;
if ((flags & OpFlags.USE_NEWVAL) > 0) {
vals = NVALUES;
} else {
vals = VALUES;
}
InterestResultPolicy policy = InterestResultPolicy.KEYS_VALUES;
if ((flags & OpFlags.REGISTER_POLICY_NONE) > 0) {
policy = InterestResultPolicy.NONE;
}
final int numOps = indices.length;
System.out.println("Got doOp for op: " + op.toString() + ", numOps: " + numOps + ", indices: " + indicesToString(indices) + ", expect: " + expectedResult);
boolean exceptionOccurred = false;
boolean breakLoop = false;
if (op.isGet() || op.isContainsKey() || op.isKeySet() || op.isQuery() || op.isExecuteCQ()) {
Thread.sleep(PAUSE);
}
for (int indexIndex = 0; indexIndex < numOps; ++indexIndex) {
if (breakLoop) {
break;
}
int index = indices[indexIndex];
try {
final Object key = keys[index];
final Object expectedVal = vals[index];
if (op.isGet()) {
Object value = null;
// this is the case for testing GET_ALL
if ((flags & OpFlags.USE_ALL_KEYS) > 0) {
breakLoop = true;
List keyList = new ArrayList(numOps);
Object searchKey;
for (int keyNumIndex = 0; keyNumIndex < numOps; ++keyNumIndex) {
int keyNum = indices[keyNumIndex];
searchKey = keys[keyNum];
keyList.add(searchKey);
// local invalidate some KEYS to force fetch of those KEYS from server
if ((flags & OpFlags.CHECK_NOKEY) > 0) {
AbstractRegionEntry entry = (AbstractRegionEntry) ((LocalRegion) region).getRegionEntry(searchKey);
System.out.println("" + keyNum + ": key is " + searchKey + " and entry is " + entry);
assertFalse(region.containsKey(searchKey));
} else {
if (keyNumIndex % 2 == 1) {
assertTrue(region.containsKey(searchKey));
region.localInvalidate(searchKey);
}
}
}
Map entries = region.getAll(keyList);
for (int keyNumIndex = 0; keyNumIndex < numOps; ++keyNumIndex) {
int keyNum = indices[keyNumIndex];
searchKey = keys[keyNum];
if ((flags & OpFlags.CHECK_FAIL) > 0) {
assertFalse(entries.containsKey(searchKey));
} else {
assertTrue(entries.containsKey(searchKey));
value = entries.get(searchKey);
assertEquals(vals[keyNum], value);
}
}
break;
}
if ((flags & OpFlags.LOCAL_OP) > 0) {
Callable<Boolean> condition = new Callable<Boolean>() {
private Region region;
@Override
public Boolean call() throws Exception {
Object value = getLocalValue(region, key);
return (flags & OpFlags.CHECK_FAIL) > 0 ? !expectedVal.equals(value) : expectedVal.equals(value);
}
public Callable<Boolean> init(Region region) {
this.region = region;
return this;
}
}.init(region);
waitForCondition(condition);
value = getLocalValue(region, key);
} else if ((flags & OpFlags.USE_GET_ENTRY_IN_TX) > 0) {
getCache().getCacheTransactionManager().begin();
Entry e = region.getEntry(key);
// Also, check getAll()
ArrayList a = new ArrayList();
a.addAll(a);
region.getAll(a);
getCache().getCacheTransactionManager().commit();
value = e.getValue();
} else {
if ((flags & OpFlags.CHECK_NOKEY) > 0) {
assertFalse(region.containsKey(key));
} else {
assertTrue(region.containsKey(key) || ((LocalRegion) region).getRegionEntry(key).isTombstone());
region.localInvalidate(key);
}
value = region.get(key);
}
if ((flags & OpFlags.CHECK_FAIL) > 0) {
assertFalse(expectedVal.equals(value));
} else {
assertNotNull(value);
assertEquals(expectedVal, value);
}
} else if (op.isPut()) {
region.put(key, expectedVal);
} else if (op.isPutAll()) {
HashMap map = new HashMap();
for (int i = 0; i < indices.length; i++) {
map.put(keys[indices[i]], vals[indices[i]]);
}
region.putAll(map);
breakLoop = true;
} else if (op.isDestroy()) {
// }
if ((flags & OpFlags.LOCAL_OP) > 0) {
region.localDestroy(key);
} else {
region.destroy(key);
}
} else if (op.isInvalidate()) {
if (region.containsKey(key)) {
if ((flags & OpFlags.LOCAL_OP) > 0) {
region.localInvalidate(key);
} else {
region.invalidate(key);
}
}
} else if (op.isContainsKey()) {
boolean result;
if ((flags & OpFlags.LOCAL_OP) > 0) {
result = region.containsKey(key);
} else {
result = region.containsKeyOnServer(key);
}
if ((flags & OpFlags.CHECK_FAIL) > 0) {
assertFalse(result);
} else {
assertTrue(result);
}
} else if (op.isRegisterInterest()) {
if ((flags & OpFlags.USE_LIST) > 0) {
breakLoop = true;
// Register interest list in this case
List keyList = new ArrayList(numOps);
for (int keyNumIndex = 0; keyNumIndex < numOps; ++keyNumIndex) {
int keyNum = indices[keyNumIndex];
keyList.add(keys[keyNum]);
}
region.registerInterest(keyList, policy);
} else if ((flags & OpFlags.USE_REGEX) > 0) {
breakLoop = true;
region.registerInterestRegex("key[1-" + numOps + ']', policy);
} else if ((flags & OpFlags.USE_ALL_KEYS) > 0) {
breakLoop = true;
region.registerInterest("ALL_KEYS", policy);
} else {
region.registerInterest(key, policy);
}
} else if (op.isUnregisterInterest()) {
if ((flags & OpFlags.USE_LIST) > 0) {
breakLoop = true;
// Register interest list in this case
List keyList = new ArrayList(numOps);
for (int keyNumIndex = 0; keyNumIndex < numOps; ++keyNumIndex) {
int keyNum = indices[keyNumIndex];
keyList.add(keys[keyNum]);
}
region.unregisterInterest(keyList);
} else if ((flags & OpFlags.USE_REGEX) > 0) {
breakLoop = true;
region.unregisterInterestRegex("key[1-" + numOps + ']');
} else if ((flags & OpFlags.USE_ALL_KEYS) > 0) {
breakLoop = true;
region.unregisterInterest("ALL_KEYS");
} else {
region.unregisterInterest(key);
}
} else if (op.isKeySet()) {
breakLoop = true;
Set keySet;
if ((flags & OpFlags.LOCAL_OP) > 0) {
keySet = region.keySet();
} else {
keySet = region.keySetOnServer();
}
assertNotNull(keySet);
if ((flags & OpFlags.CHECK_FAIL) == 0) {
assertEquals(numOps, keySet.size());
}
for (int keyNumIndex = 0; keyNumIndex < numOps; ++keyNumIndex) {
int keyNum = indices[keyNumIndex];
if ((flags & OpFlags.CHECK_FAIL) > 0) {
assertFalse(keySet.contains(keys[keyNum]));
} else {
assertTrue(keySet.contains(keys[keyNum]));
}
}
} else if (op.isQuery()) {
breakLoop = true;
SelectResults queryResults = region.query("SELECT DISTINCT * FROM " + region.getFullPath());
assertNotNull(queryResults);
Set queryResultSet = queryResults.asSet();
if ((flags & OpFlags.CHECK_FAIL) == 0) {
assertEquals(numOps, queryResultSet.size());
}
for (int keyNumIndex = 0; keyNumIndex < numOps; ++keyNumIndex) {
int keyNum = indices[keyNumIndex];
if ((flags & OpFlags.CHECK_FAIL) > 0) {
assertFalse(queryResultSet.contains(vals[keyNum]));
} else {
assertTrue(queryResultSet.contains(vals[keyNum]));
}
}
} else if (op.isExecuteCQ()) {
breakLoop = true;
QueryService queryService = getCache().getQueryService();
CqQuery cqQuery;
if ((cqQuery = queryService.getCq("cq1")) == null) {
CqAttributesFactory cqFact = new CqAttributesFactory();
cqFact.addCqListener(new AuthzCqListener());
CqAttributes cqAttrs = cqFact.create();
cqQuery = queryService.newCq("cq1", "SELECT * FROM " + region.getFullPath(), cqAttrs);
}
if ((flags & OpFlags.LOCAL_OP) > 0) {
// Interpret this as testing results using CqListener
final AuthzCqListener listener = (AuthzCqListener) cqQuery.getCqAttributes().getCqListener();
WaitCriterion ev = new WaitCriterion() {
@Override
public boolean done() {
if ((flags & OpFlags.CHECK_FAIL) > 0) {
return 0 == listener.getNumUpdates();
} else {
return numOps == listener.getNumUpdates();
}
}
@Override
public String description() {
return null;
}
};
waitForCriterion(ev, 3 * 1000, 200, true);
if ((flags & OpFlags.CHECK_FAIL) > 0) {
assertEquals(0, listener.getNumUpdates());
} else {
assertEquals(numOps, listener.getNumUpdates());
listener.checkPuts(vals, indices);
}
assertEquals(0, listener.getNumCreates());
assertEquals(0, listener.getNumDestroys());
assertEquals(0, listener.getNumOtherOps());
assertEquals(0, listener.getNumErrors());
} else {
SelectResults cqResults = cqQuery.executeWithInitialResults();
assertNotNull(cqResults);
Set cqResultValues = new HashSet();
for (Object o : cqResults.asList()) {
Struct s = (Struct) o;
cqResultValues.add(s.get("value"));
}
Set cqResultSet = cqResults.asSet();
if ((flags & OpFlags.CHECK_FAIL) == 0) {
assertEquals(numOps, cqResultSet.size());
}
for (int keyNumIndex = 0; keyNumIndex < numOps; ++keyNumIndex) {
int keyNum = indices[keyNumIndex];
if ((flags & OpFlags.CHECK_FAIL) > 0) {
assertFalse(cqResultValues.contains(vals[keyNum]));
} else {
assertTrue(cqResultValues.contains(vals[keyNum]));
}
}
}
} else if (op.isStopCQ()) {
breakLoop = true;
CqQuery cqQuery = getCache().getQueryService().getCq("cq1");
((AuthzCqListener) cqQuery.getCqAttributes().getCqListener()).reset();
cqQuery.stop();
} else if (op.isCloseCQ()) {
breakLoop = true;
CqQuery cqQuery = getCache().getQueryService().getCq("cq1");
((AuthzCqListener) cqQuery.getCqAttributes().getCqListener()).reset();
cqQuery.close();
} else if (op.isRegionClear()) {
breakLoop = true;
if ((flags & OpFlags.LOCAL_OP) > 0) {
region.localClear();
} else {
region.clear();
}
} else if (op.isRegionCreate()) {
breakLoop = true;
// Region subregion = createSubregion(region);
// subregion.createRegionOnServer();
// Create region on server using the DynamicRegionFactory
// Assume it has been already initialized
DynamicRegionFactory drf = DynamicRegionFactory.get();
Region subregion = drf.createDynamicRegion(regionName, SUBREGION_NAME);
assertEquals('/' + regionName + '/' + SUBREGION_NAME, subregion.getFullPath());
} else if (op.isRegionDestroy()) {
breakLoop = true;
if ((flags & OpFlags.LOCAL_OP) > 0) {
region.localDestroyRegion();
} else {
if ((flags & OpFlags.USE_SUBREGION) > 0) {
try {
DynamicRegionFactory.get().destroyDynamicRegion(region.getFullPath());
} catch (RegionDestroyedException ex) {
// harmless to ignore this
System.out.println("doOp: sub-region " + region.getFullPath() + " already destroyed");
operationOmitted = true;
}
} else {
region.destroyRegion();
}
}
} else {
fail("doOp: Unhandled operation " + op);
}
if (expectedResult != NO_EXCEPTION) {
if (!operationOmitted && !op.isUnregisterInterest()) {
fail("Expected an exception while performing operation op =" + op + "flags = " + OpFlags.description(flags));
}
}
} catch (Exception ex) {
exceptionOccurred = true;
if ((ex instanceof ServerConnectivityException || ex instanceof QueryInvocationTargetException || ex instanceof CqException) && (expectedResult == NOTAUTHZ_EXCEPTION) && (ex.getCause() instanceof NotAuthorizedException)) {
System.out.println("doOp: Got expected NotAuthorizedException when doing operation [" + op + "] with flags " + OpFlags.description(flags) + ": " + ex.getCause());
continue;
} else if (expectedResult == OTHER_EXCEPTION) {
System.out.println("doOp: Got expected exception when doing operation: " + ex.toString());
continue;
} else {
fail("doOp: Got unexpected exception when doing operation. Policy = " + policy + " flags = " + OpFlags.description(flags), ex);
}
}
}
if (!exceptionOccurred && !operationOmitted && expectedResult != NO_EXCEPTION) {
fail("Expected an exception while performing operation: " + op + " flags = " + OpFlags.description(flags));
}
}
use of org.apache.geode.cache.query.QueryInvocationTargetException in project geode by apache.
the class QueryAccessController method runNamedQuery.
/**
* Run named parametrized Query with ID
*
* @param queryId id of the OQL string
* @param arguments query bind params required while executing query
* @return query result as a JSON document
*/
@RequestMapping(method = RequestMethod.POST, value = "/{query}", produces = { MediaType.APPLICATION_JSON_VALUE })
@ApiOperation(value = "run parametrized query", notes = "run the specified named query passing in scalar values for query parameters in the GemFire cluster", response = void.class)
@ApiResponses({ @ApiResponse(code = 200, message = "Query successfully executed."), @ApiResponse(code = 401, message = "Invalid Username or Password."), @ApiResponse(code = 403, message = "Insufficient privileges for operation."), @ApiResponse(code = 400, message = "Query bind params specified as JSON document in the request body is invalid"), @ApiResponse(code = 500, message = "GemFire throws an error or exception") })
@ResponseBody
@ResponseStatus(HttpStatus.OK)
@PreAuthorize("@securityService.authorize('DATA', 'READ')")
public ResponseEntity<String> runNamedQuery(@PathVariable("query") String queryId, @RequestBody String arguments) {
logger.debug("Running named Query with ID ({})...", queryId);
queryId = decode(queryId);
if (arguments != null) {
// Its a compiled query.
// Convert arguments into Object[]
Object[] args = jsonToObjectArray(arguments);
Query compiledQuery = compiledQueries.get(queryId);
if (compiledQuery == null) {
// This is first time the query is seen by this server.
final String oql = getValue(PARAMETERIZED_QUERIES_REGION, queryId, false);
ValidationUtils.returnValueThrowOnNull(oql, new ResourceNotFoundException(String.format("No Query with ID (%1$s) was found!", queryId)));
try {
compiledQuery = getQueryService().newQuery(oql);
} catch (QueryInvalidException qie) {
throw new GemfireRestException("Syntax of the OQL queryString is invalid!", qie);
}
compiledQueries.putIfAbsent(queryId, (DefaultQuery) compiledQuery);
}
// and handle the Exceptions appropriately (500 Server Error)!
try {
Object queryResult = compiledQuery.execute(args);
return processQueryResponse(compiledQuery, args, queryResult);
} catch (FunctionDomainException fde) {
throw new GemfireRestException("A function was applied to a parameter that is improper for that function!", fde);
} catch (TypeMismatchException tme) {
throw new GemfireRestException("Bind parameter is not of the expected type!", tme);
} catch (NameResolutionException nre) {
throw new GemfireRestException("Name in the query cannot be resolved!", nre);
} catch (IllegalArgumentException iae) {
throw new GemfireRestException(" The number of bound parameters does not match the number of placeholders!", iae);
} catch (IllegalStateException ise) {
throw new GemfireRestException("Query is not permitted on this type of region!", ise);
} catch (QueryExecutionTimeoutException qete) {
throw new GemfireRestException("Query execution time is exceeded max query execution time (gemfire.Cache.MAX_QUERY_EXECUTION_TIME) configured!", qete);
} catch (QueryInvocationTargetException qite) {
throw new GemfireRestException("Data referenced in from clause is not available for querying!", qite);
} catch (QueryExecutionLowMemoryException qelme) {
throw new GemfireRestException("Query gets canceled due to low memory conditions and the resource manager critical heap percentage has been set!", qelme);
} catch (Exception e) {
throw new GemfireRestException("Error encountered while executing named query!", e);
}
} else {
throw new GemfireRestException(" Bind params either not specified or not processed properly by the server!");
}
}
use of org.apache.geode.cache.query.QueryInvocationTargetException in project geode by apache.
the class ExecuteRegionFunction66 method cmdExecute.
@Override
public void cmdExecute(Message clientMessage, ServerConnection servConn, long start) throws IOException {
String regionName = null;
Object function = null;
Object args = null;
MemberMappedArgument memberMappedArg = null;
final boolean isBucketsAsFilter;
final byte isReExecute;
Set<Object> filter = null;
byte hasResult = 0;
int removedNodesSize = 0;
Set<Object> removedNodesSet = null;
int filterSize = 0, partNumber = 0;
CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
byte functionState = 0;
int functionTimeout = ConnectionImpl.DEFAULT_CLIENT_FUNCTION_TIMEOUT;
try {
byte[] bytes = clientMessage.getPart(0).getSerializedForm();
functionState = bytes[0];
if (bytes.length >= 5 && servConn.getClientVersion().ordinal() >= Version.GFE_8009.ordinal()) {
functionTimeout = Part.decodeInt(bytes, 1);
}
if (functionState != 1) {
hasResult = (byte) ((functionState & 2) - 1);
} else {
hasResult = functionState;
}
if (hasResult == 1) {
servConn.setAsTrue(REQUIRES_RESPONSE);
servConn.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
}
regionName = clientMessage.getPart(1).getString();
function = clientMessage.getPart(2).getStringOrObject();
args = clientMessage.getPart(3).getObject();
Part part = clientMessage.getPart(4);
if (part != null) {
Object obj = part.getObject();
if (obj instanceof MemberMappedArgument) {
memberMappedArg = (MemberMappedArgument) obj;
}
}
byte[] flags = clientMessage.getPart(5).getSerializedForm();
if (servConn.getClientVersion().ordinal() > Version.GFE_81.ordinal()) {
isBucketsAsFilter = (flags[0] & ExecuteFunctionHelper.BUCKETS_AS_FILTER_MASK) != 0;
isReExecute = (flags[0] & ExecuteFunctionHelper.IS_REXECUTE_MASK) != 0 ? (byte) 1 : 0;
} else {
isReExecute = flags[0];
isBucketsAsFilter = false;
}
filterSize = clientMessage.getPart(6).getInt();
if (filterSize != 0) {
filter = new HashSet<Object>();
partNumber = 7;
for (int i = 0; i < filterSize; i++) {
filter.add(clientMessage.getPart(partNumber + i).getStringOrObject());
}
}
partNumber = 7 + filterSize;
removedNodesSize = clientMessage.getPart(partNumber).getInt();
if (removedNodesSize != 0) {
removedNodesSet = new HashSet<Object>();
partNumber = partNumber + 1;
for (int i = 0; i < removedNodesSize; i++) {
removedNodesSet.add(clientMessage.getPart(partNumber + i).getStringOrObject());
}
}
} catch (ClassNotFoundException exception) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), exception);
if (hasResult == 1) {
writeChunkedException(clientMessage, exception, servConn);
} else {
writeException(clientMessage, exception, false, servConn);
}
servConn.setAsTrue(RESPONDED);
return;
}
if (function == null || regionName == null) {
String message = null;
if (function == null) {
message = LocalizedStrings.ExecuteRegionFunction_THE_INPUT_0_FOR_THE_EXECUTE_FUNCTION_REQUEST_IS_NULL.toLocalizedString("function");
}
if (regionName == null) {
message = LocalizedStrings.ExecuteRegionFunction_THE_INPUT_0_FOR_THE_EXECUTE_FUNCTION_REQUEST_IS_NULL.toLocalizedString("region");
}
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, clientMessage, message, servConn);
return;
}
Region region = crHelper.getRegion(regionName);
if (region == null) {
String message = LocalizedStrings.ExecuteRegionFunction_THE_REGION_NAMED_0_WAS_NOT_FOUND_DURING_EXECUTE_FUNCTION_REQUEST.toLocalizedString(regionName);
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, clientMessage, message, servConn);
return;
}
HandShake handShake = (HandShake) servConn.getHandshake();
int earlierClientReadTimeout = handShake.getClientReadTimeout();
handShake.setClientReadTimeout(functionTimeout);
ServerToClientFunctionResultSender resultSender = null;
Function functionObject = null;
try {
if (function instanceof String) {
functionObject = FunctionService.getFunction((String) function);
if (functionObject == null) {
String message = LocalizedStrings.ExecuteRegionFunction_THE_FUNCTION_0_HAS_NOT_BEEN_REGISTERED.toLocalizedString(function);
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, clientMessage, message, servConn);
return;
} else {
byte functionStateOnServerSide = AbstractExecution.getFunctionState(functionObject.isHA(), functionObject.hasResult(), functionObject.optimizeForWrite());
if (logger.isDebugEnabled()) {
logger.debug("Function State on server side: {} on client: {}", functionStateOnServerSide, functionState);
}
if (functionStateOnServerSide != functionState) {
String message = LocalizedStrings.FunctionService_FUNCTION_ATTRIBUTE_MISMATCH_CLIENT_SERVER.toLocalizedString(function);
logger.warn("{}: {}", servConn.getName(), message);
sendError(hasResult, clientMessage, message, servConn);
return;
}
}
} else {
functionObject = (Function) function;
}
this.securityService.authorizeDataWrite();
// check if the caller is authorized to do this operation on server
AuthorizeRequest authzRequest = servConn.getAuthzRequest();
final String functionName = functionObject.getId();
final String regionPath = region.getFullPath();
ExecuteFunctionOperationContext executeContext = null;
if (authzRequest != null) {
executeContext = authzRequest.executeFunctionAuthorize(functionName, regionPath, filter, args, functionObject.optimizeForWrite());
}
// Construct execution
AbstractExecution execution = (AbstractExecution) FunctionService.onRegion(region);
ChunkedMessage m = servConn.getFunctionResponseMessage();
m.setTransactionId(clientMessage.getTransactionId());
resultSender = new ServerToClientFunctionResultSender65(m, MessageType.EXECUTE_REGION_FUNCTION_RESULT, servConn, functionObject, executeContext);
if (execution instanceof PartitionedRegionFunctionExecutor) {
if ((hasResult == 1) && filter != null && filter.size() == 1) {
ServerConnection.executeFunctionOnLocalNodeOnly((byte) 1);
}
execution = new PartitionedRegionFunctionExecutor((PartitionedRegion) region, filter, args, memberMappedArg, resultSender, removedNodesSet, isBucketsAsFilter);
} else {
execution = new DistributedRegionFunctionExecutor((DistributedRegion) region, filter, args, memberMappedArg, resultSender);
}
if (isReExecute == 1) {
execution = execution.setIsReExecute();
}
if (logger.isDebugEnabled()) {
logger.debug("Executing Function: {} on Server: {} with Execution: {} functionState={} reExecute={} hasResult={}", functionObject.getId(), servConn, execution, functionState, isReExecute, hasResult);
}
if (hasResult == 1) {
if (function instanceof String) {
switch(functionState) {
case AbstractExecution.NO_HA_HASRESULT_NO_OPTIMIZEFORWRITE:
execution.execute((String) function, true, false, false).getResult();
break;
case AbstractExecution.HA_HASRESULT_NO_OPTIMIZEFORWRITE:
execution.execute((String) function, true, true, false).getResult();
break;
case AbstractExecution.HA_HASRESULT_OPTIMIZEFORWRITE:
execution.execute((String) function, true, true, true).getResult();
break;
case AbstractExecution.NO_HA_HASRESULT_OPTIMIZEFORWRITE:
execution.execute((String) function, true, false, true).getResult();
break;
}
} else {
execution.execute(functionObject).getResult();
}
} else {
if (function instanceof String) {
switch(functionState) {
case AbstractExecution.NO_HA_NO_HASRESULT_NO_OPTIMIZEFORWRITE:
execution.execute((String) function, false, false, false);
break;
case AbstractExecution.NO_HA_NO_HASRESULT_OPTIMIZEFORWRITE:
execution.execute((String) function, false, false, true);
break;
}
} else {
execution.execute(functionObject);
}
writeReply(clientMessage, servConn);
}
} catch (IOException ioe) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), ioe);
final String message = LocalizedStrings.ExecuteRegionFunction_SERVER_COULD_NOT_SEND_THE_REPLY.toLocalizedString();
sendException(hasResult, clientMessage, message, servConn, ioe);
} catch (FunctionException fe) {
String message = fe.getMessage();
Object cause = fe.getCause();
if (cause instanceof FunctionInvocationTargetException || cause instanceof QueryInvocationTargetException) {
if (cause instanceof InternalFunctionInvocationTargetException) {
// 4> in case of HA member departed
if (logger.isDebugEnabled()) {
logger.debug(LocalizedMessage.create(LocalizedStrings.ExecuteFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, new Object[] { function }), fe);
}
} else if (functionObject.isHA()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function + " :" + message));
} else {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), fe);
}
resultSender.setException(fe);
} else {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), fe);
sendException(hasResult, clientMessage, message, servConn, fe);
}
} catch (Exception e) {
logger.warn(LocalizedMessage.create(LocalizedStrings.ExecuteRegionFunction_EXCEPTION_ON_SERVER_WHILE_EXECUTIONG_FUNCTION_0, function), e);
String message = e.getMessage();
sendException(hasResult, clientMessage, message, servConn, e);
} finally {
handShake.setClientReadTimeout(earlierClientReadTimeout);
ServerConnection.executeFunctionOnLocalNodeOnly((byte) 0);
}
}
use of org.apache.geode.cache.query.QueryInvocationTargetException in project geode by apache.
the class ResourceManagerWithQueryMonitorDUnitTest method doCriticalMemoryHitDuringGatherTestWithMultipleServers.
// tests low memory hit while gathering partition region results
private void doCriticalMemoryHitDuringGatherTestWithMultipleServers(final String regionName, boolean createPR, final int criticalThreshold, final boolean disabledQueryMonitorForLowMem, final int queryTimeout, final boolean hitCriticalThreshold) throws Exception {
// create region on the server
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final VM client = host.getVM(2);
final int numObjects = 200;
try {
final int[] port = AvailablePortHelper.getRandomAvailableTCPPorts(2);
startCacheServer(server1, port[0], criticalThreshold, disabledQueryMonitorForLowMem, queryTimeout, regionName, createPR, 0);
startCacheServer(server2, port[1], criticalThreshold, true, -1, regionName, createPR, 0);
startClient(client, server1, port[0], regionName);
populateData(server2, regionName, numObjects);
createCancelDuringGatherTestHook(server1);
client.invoke(new SerializableCallable("executing query to be canceled by gather") {
public Object call() {
QueryService qs = null;
try {
qs = getCache().getQueryService();
Query query = qs.newQuery("Select * From /" + regionName);
query.execute();
} catch (ServerOperationException soe) {
if (soe.getRootCause() instanceof QueryException) {
QueryException e = (QueryException) soe.getRootCause();
if (!isExceptionDueToLowMemory(e, CRITICAL_HEAP_USED)) {
throw new CacheException(soe) {
};
} else {
return 0;
}
}
} catch (Exception e) {
throw new CacheException(e) {
};
}
// assertTrue(((CancelDuringGatherHook)DefaultQuery.testHook).triggeredOOME);
throw new CacheException("should have hit low memory") {
};
}
});
verifyRejectedObjects(server1, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
// Pause for a second and then let's recover
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// Recover from critical heap
if (hitCriticalThreshold) {
vmRecoversFromCriticalHeap(server1);
}
// Check to see if query execution is ok under "normal" or "healthy" conditions
client.invoke(new CacheSerializableRunnable("Executing query when system is 'Normal'") {
public void run2() {
try {
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery("Select * From /" + regionName);
SelectResults results = (SelectResults) query.execute();
assertEquals(numObjects, results.size());
} catch (QueryInvocationTargetException e) {
assertFalse(true);
} catch (NameResolutionException e) {
assertFalse(true);
} catch (TypeMismatchException e) {
assertFalse(true);
} catch (FunctionDomainException e) {
assertFalse(true);
}
}
});
// Recover from critical heap
if (hitCriticalThreshold) {
vmRecoversFromCriticalHeap(server1);
}
} finally {
stopServer(server1);
stopServer(server2);
}
}
use of org.apache.geode.cache.query.QueryInvocationTargetException in project geode by apache.
the class ResourceManagerWithQueryMonitorDUnitTest method doCriticalMemoryHitTestWithMultipleServers.
// Executes on client cache with multiple configured servers
private void doCriticalMemoryHitTestWithMultipleServers(final String regionName, boolean createPR, final int criticalThreshold, final boolean disabledQueryMonitorForLowMem, final int queryTimeout, final boolean hitCriticalThreshold) throws Exception {
// create region on the server
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final VM client = host.getVM(2);
final int numObjects = 200;
try {
final int[] port = AvailablePortHelper.getRandomAvailableTCPPorts(2);
startCacheServer(server1, port[0], criticalThreshold, disabledQueryMonitorForLowMem, queryTimeout, regionName, createPR, 0);
startCacheServer(server2, port[1], criticalThreshold, true, -1, regionName, createPR, 0);
startClient(client, server1, port[0], regionName);
populateData(server2, regionName, numObjects);
doTestCriticalHeapAndQueryTimeout(server1, client, regionName, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
verifyRejectedObjects(server1, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
// Pause for a second and then let's recover
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// Recover from critical heap
if (hitCriticalThreshold) {
vmRecoversFromCriticalHeap(server1);
}
// Check to see if query execution is ok under "normal" or "healthy" conditions
client.invoke(new CacheSerializableRunnable("Executing query when system is 'Normal'") {
public void run2() {
try {
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery("Select * From /" + regionName);
SelectResults results = (SelectResults) query.execute();
assertEquals(numObjects, results.size());
} catch (QueryInvocationTargetException e) {
assertFalse(true);
} catch (NameResolutionException e) {
assertFalse(true);
} catch (TypeMismatchException e) {
assertFalse(true);
} catch (FunctionDomainException e) {
assertFalse(true);
}
}
});
// Execute a critical heap event/ query timeout test again
doTestCriticalHeapAndQueryTimeout(server1, client, regionName, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
verifyRejectedObjects(server1, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
// Recover from critical heap
if (hitCriticalThreshold) {
vmRecoversFromCriticalHeap(server1);
}
} finally {
stopServer(server1);
stopServer(server2);
}
}
Aggregations