use of org.apache.geode.cache.query.TypeMismatchException in project geode by apache.
the class PartitionedRegion method doExecuteQuery.
/**
* If ForceReattemptException is thrown then the caller must loop and call us again.
*
* @throws ForceReattemptException if one of the buckets moved out from under us
*/
private Object doExecuteQuery(DefaultQuery query, Object[] parameters, Set buckets) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException, ForceReattemptException {
if (logger.isDebugEnabled()) {
logger.debug("Executing query :{}", query);
}
HashSet<Integer> allBuckets = new HashSet<Integer>();
if (buckets == null) {
// remote buckets
final Iterator remoteIter = getRegionAdvisor().getBucketSet().iterator();
try {
while (remoteIter.hasNext()) {
allBuckets.add((Integer) remoteIter.next());
}
} catch (NoSuchElementException ignore) {
}
} else {
// local buckets
Iterator localIter = null;
if (this.dataStore != null) {
localIter = buckets.iterator();
} else {
localIter = Collections.emptySet().iterator();
}
try {
while (localIter.hasNext()) {
allBuckets.add((Integer) localIter.next());
}
} catch (NoSuchElementException ignore) {
}
}
if (allBuckets.size() == 0) {
if (logger.isDebugEnabled()) {
logger.debug("No bucket storage allocated. PR has no data yet.");
}
ResultsSet resSet = new ResultsSet();
resSet.setElementType(new ObjectTypeImpl(this.getValueConstraint() == null ? Object.class : this.getValueConstraint()));
return resSet;
}
CompiledSelect selectExpr = query.getSimpleSelect();
if (selectExpr == null) {
throw new IllegalArgumentException(LocalizedStrings.PartitionedRegion_QUERY_MUST_BE_A_SELECT_EXPRESSION_ONLY.toLocalizedString());
}
// this can return a BAG even if it's a DISTINCT select expression,
// since the expectation is that the duplicates will be removed at the end
SelectResults results = selectExpr.getEmptyResultSet(parameters, getCache(), query);
PartitionedRegionQueryEvaluator prqe = new PartitionedRegionQueryEvaluator(this.getSystem(), this, query, parameters, results, allBuckets);
for (; ; ) {
this.getCancelCriterion().checkCancelInProgress(null);
boolean interrupted = Thread.interrupted();
try {
results = prqe.queryBuckets(null);
break;
} catch (InterruptedException ignore) {
interrupted = true;
} catch (FunctionDomainException e) {
throw e;
} catch (TypeMismatchException e) {
throw e;
} catch (NameResolutionException e) {
throw e;
} catch (QueryInvocationTargetException e) {
throw e;
} catch (QueryException qe) {
throw new QueryInvocationTargetException(LocalizedStrings.PartitionedRegion_UNEXPECTED_QUERY_EXCEPTION_OCCURRED_DURING_QUERY_EXECUTION_0.toLocalizedString(qe.getMessage()), qe);
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
// for
// Drop Duplicates if this is a DISTINCT query
boolean allowsDuplicates = results.getCollectionType().allowsDuplicates();
// be exactly matching the limit
if (selectExpr.isDistinct()) {
// don't just convert to a ResultsSet (or StructSet), since
// the bags can convert themselves to a Set more efficiently
ObjectType elementType = results.getCollectionType().getElementType();
if (selectExpr.getOrderByAttrs() != null) {
// Set limit also, its not applied while building the final result set as order by is
// involved.
} else if (allowsDuplicates) {
results = new ResultsCollectionWrapper(elementType, results.asSet());
}
if (selectExpr.isCount() && (results.isEmpty() || selectExpr.isDistinct())) {
// Constructor with elementType not visible.
SelectResults resultCount = new ResultsBag(getCachePerfStats());
resultCount.setElementType(new ObjectTypeImpl(Integer.class));
((Bag) resultCount).addAndGetOccurence(results.size());
return resultCount;
}
}
return results;
}
use of org.apache.geode.cache.query.TypeMismatchException in project geode by apache.
the class QueryAccessController method runNamedQuery.
/**
* Run named parametrized Query with ID
*
* @param queryId id of the OQL string
* @param arguments query bind params required while executing query
* @return query result as a JSON document
*/
@RequestMapping(method = RequestMethod.POST, value = "/{query}", produces = { MediaType.APPLICATION_JSON_VALUE })
@ApiOperation(value = "run parametrized query", notes = "run the specified named query passing in scalar values for query parameters in the GemFire cluster", response = void.class)
@ApiResponses({ @ApiResponse(code = 200, message = "Query successfully executed."), @ApiResponse(code = 401, message = "Invalid Username or Password."), @ApiResponse(code = 403, message = "Insufficient privileges for operation."), @ApiResponse(code = 400, message = "Query bind params specified as JSON document in the request body is invalid"), @ApiResponse(code = 500, message = "GemFire throws an error or exception") })
@ResponseBody
@ResponseStatus(HttpStatus.OK)
@PreAuthorize("@securityService.authorize('DATA', 'READ')")
public ResponseEntity<String> runNamedQuery(@PathVariable("query") String queryId, @RequestBody String arguments) {
logger.debug("Running named Query with ID ({})...", queryId);
queryId = decode(queryId);
if (arguments != null) {
// Its a compiled query.
// Convert arguments into Object[]
Object[] args = jsonToObjectArray(arguments);
Query compiledQuery = compiledQueries.get(queryId);
if (compiledQuery == null) {
// This is first time the query is seen by this server.
final String oql = getValue(PARAMETERIZED_QUERIES_REGION, queryId, false);
ValidationUtils.returnValueThrowOnNull(oql, new ResourceNotFoundException(String.format("No Query with ID (%1$s) was found!", queryId)));
try {
compiledQuery = getQueryService().newQuery(oql);
} catch (QueryInvalidException qie) {
throw new GemfireRestException("Syntax of the OQL queryString is invalid!", qie);
}
compiledQueries.putIfAbsent(queryId, (DefaultQuery) compiledQuery);
}
// and handle the Exceptions appropriately (500 Server Error)!
try {
Object queryResult = compiledQuery.execute(args);
return processQueryResponse(compiledQuery, args, queryResult);
} catch (FunctionDomainException fde) {
throw new GemfireRestException("A function was applied to a parameter that is improper for that function!", fde);
} catch (TypeMismatchException tme) {
throw new GemfireRestException("Bind parameter is not of the expected type!", tme);
} catch (NameResolutionException nre) {
throw new GemfireRestException("Name in the query cannot be resolved!", nre);
} catch (IllegalArgumentException iae) {
throw new GemfireRestException(" The number of bound parameters does not match the number of placeholders!", iae);
} catch (IllegalStateException ise) {
throw new GemfireRestException("Query is not permitted on this type of region!", ise);
} catch (QueryExecutionTimeoutException qete) {
throw new GemfireRestException("Query execution time is exceeded max query execution time (gemfire.Cache.MAX_QUERY_EXECUTION_TIME) configured!", qete);
} catch (QueryInvocationTargetException qite) {
throw new GemfireRestException("Data referenced in from clause is not available for querying!", qite);
} catch (QueryExecutionLowMemoryException qelme) {
throw new GemfireRestException("Query gets canceled due to low memory conditions and the resource manager critical heap percentage has been set!", qelme);
} catch (Exception e) {
throw new GemfireRestException("Error encountered while executing named query!", e);
}
} else {
throw new GemfireRestException(" Bind params either not specified or not processed properly by the server!");
}
}
use of org.apache.geode.cache.query.TypeMismatchException in project geode by apache.
the class ResourceManagerWithQueryMonitorDUnitTest method doCriticalMemoryHitDuringGatherTestWithMultipleServers.
// tests low memory hit while gathering partition region results
private void doCriticalMemoryHitDuringGatherTestWithMultipleServers(final String regionName, boolean createPR, final int criticalThreshold, final boolean disabledQueryMonitorForLowMem, final int queryTimeout, final boolean hitCriticalThreshold) throws Exception {
// create region on the server
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final VM client = host.getVM(2);
final int numObjects = 200;
try {
final int[] port = AvailablePortHelper.getRandomAvailableTCPPorts(2);
startCacheServer(server1, port[0], criticalThreshold, disabledQueryMonitorForLowMem, queryTimeout, regionName, createPR, 0);
startCacheServer(server2, port[1], criticalThreshold, true, -1, regionName, createPR, 0);
startClient(client, server1, port[0], regionName);
populateData(server2, regionName, numObjects);
createCancelDuringGatherTestHook(server1);
client.invoke(new SerializableCallable("executing query to be canceled by gather") {
public Object call() {
QueryService qs = null;
try {
qs = getCache().getQueryService();
Query query = qs.newQuery("Select * From /" + regionName);
query.execute();
} catch (ServerOperationException soe) {
if (soe.getRootCause() instanceof QueryException) {
QueryException e = (QueryException) soe.getRootCause();
if (!isExceptionDueToLowMemory(e, CRITICAL_HEAP_USED)) {
throw new CacheException(soe) {
};
} else {
return 0;
}
}
} catch (Exception e) {
throw new CacheException(e) {
};
}
// assertTrue(((CancelDuringGatherHook)DefaultQuery.testHook).triggeredOOME);
throw new CacheException("should have hit low memory") {
};
}
});
verifyRejectedObjects(server1, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
// Pause for a second and then let's recover
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// Recover from critical heap
if (hitCriticalThreshold) {
vmRecoversFromCriticalHeap(server1);
}
// Check to see if query execution is ok under "normal" or "healthy" conditions
client.invoke(new CacheSerializableRunnable("Executing query when system is 'Normal'") {
public void run2() {
try {
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery("Select * From /" + regionName);
SelectResults results = (SelectResults) query.execute();
assertEquals(numObjects, results.size());
} catch (QueryInvocationTargetException e) {
assertFalse(true);
} catch (NameResolutionException e) {
assertFalse(true);
} catch (TypeMismatchException e) {
assertFalse(true);
} catch (FunctionDomainException e) {
assertFalse(true);
}
}
});
// Recover from critical heap
if (hitCriticalThreshold) {
vmRecoversFromCriticalHeap(server1);
}
} finally {
stopServer(server1);
stopServer(server2);
}
}
use of org.apache.geode.cache.query.TypeMismatchException in project geode by apache.
the class ResourceManagerWithQueryMonitorDUnitTest method doCriticalMemoryHitTestWithMultipleServers.
// Executes on client cache with multiple configured servers
private void doCriticalMemoryHitTestWithMultipleServers(final String regionName, boolean createPR, final int criticalThreshold, final boolean disabledQueryMonitorForLowMem, final int queryTimeout, final boolean hitCriticalThreshold) throws Exception {
// create region on the server
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final VM client = host.getVM(2);
final int numObjects = 200;
try {
final int[] port = AvailablePortHelper.getRandomAvailableTCPPorts(2);
startCacheServer(server1, port[0], criticalThreshold, disabledQueryMonitorForLowMem, queryTimeout, regionName, createPR, 0);
startCacheServer(server2, port[1], criticalThreshold, true, -1, regionName, createPR, 0);
startClient(client, server1, port[0], regionName);
populateData(server2, regionName, numObjects);
doTestCriticalHeapAndQueryTimeout(server1, client, regionName, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
verifyRejectedObjects(server1, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
// Pause for a second and then let's recover
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// Recover from critical heap
if (hitCriticalThreshold) {
vmRecoversFromCriticalHeap(server1);
}
// Check to see if query execution is ok under "normal" or "healthy" conditions
client.invoke(new CacheSerializableRunnable("Executing query when system is 'Normal'") {
public void run2() {
try {
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery("Select * From /" + regionName);
SelectResults results = (SelectResults) query.execute();
assertEquals(numObjects, results.size());
} catch (QueryInvocationTargetException e) {
assertFalse(true);
} catch (NameResolutionException e) {
assertFalse(true);
} catch (TypeMismatchException e) {
assertFalse(true);
} catch (FunctionDomainException e) {
assertFalse(true);
}
}
});
// Execute a critical heap event/ query timeout test again
doTestCriticalHeapAndQueryTimeout(server1, client, regionName, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
verifyRejectedObjects(server1, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
// Recover from critical heap
if (hitCriticalThreshold) {
vmRecoversFromCriticalHeap(server1);
}
} finally {
stopServer(server1);
stopServer(server2);
}
}
use of org.apache.geode.cache.query.TypeMismatchException in project geode by apache.
the class CompactRangeIndex method evaluate.
private void evaluate(Object key, int operator, Collection results, CompiledValue iterOps, RuntimeIterator runtimeItr, ExecutionContext context, Set keysToRemove, List projAttrib, SelectResults intermediateResults, boolean isIntersection, int limit, boolean applyOrderBy, List orderByAttribs) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException {
boolean multiColOrderBy = false;
if (keysToRemove == null) {
keysToRemove = new HashSet(0);
}
key = TypeUtils.indexKeyFor(key);
if (key == null) {
key = IndexManager.NULL;
}
boolean asc = true;
if (applyOrderBy) {
CompiledSortCriterion csc = (CompiledSortCriterion) orderByAttribs.get(0);
asc = !csc.getCriterion();
multiColOrderBy = orderByAttribs.size() > 1;
}
CloseableIterator<IndexStoreEntry> iterator = null;
try {
switch(operator) {
case OQLLexerTokenTypes.TOK_EQ:
assert keysToRemove.isEmpty();
iterator = indexStore.get(key);
addToResultsFromEntries(key, operator, iterator, results, iterOps, runtimeItr, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit);
break;
case OQLLexerTokenTypes.TOK_LT:
{
if (asc) {
iterator = indexStore.iterator(null, true, key, false, keysToRemove);
} else {
iterator = indexStore.descendingIterator(null, true, key, false, keysToRemove);
}
addToResultsFromEntries(key, operator, iterator, results, iterOps, runtimeItr, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit);
}
break;
case OQLLexerTokenTypes.TOK_LE:
{
if (asc) {
iterator = indexStore.iterator(null, true, key, true, keysToRemove);
} else {
iterator = indexStore.descendingIterator(null, true, key, true, keysToRemove);
}
addToResultsFromEntries(key, operator, iterator, results, iterOps, runtimeItr, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit);
}
break;
case OQLLexerTokenTypes.TOK_GT:
{
if (asc) {
iterator = indexStore.iterator(key, false, keysToRemove);
} else {
iterator = indexStore.descendingIterator(key, false, keysToRemove);
}
addToResultsFromEntries(key, operator, iterator, results, iterOps, runtimeItr, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit);
}
break;
case OQLLexerTokenTypes.TOK_GE:
{
if (asc) {
iterator = indexStore.iterator(key, true, keysToRemove);
} else {
iterator = indexStore.descendingIterator(key, true, keysToRemove);
}
addToResultsFromEntries(key, operator, iterator, results, iterOps, runtimeItr, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit);
}
break;
case OQLLexerTokenTypes.TOK_NE_ALT:
case OQLLexerTokenTypes.TOK_NE:
{
keysToRemove.add(key);
if (asc) {
iterator = indexStore.iterator(keysToRemove);
} else {
iterator = indexStore.descendingIterator(keysToRemove);
}
addToResultsFromEntries(key, operator, iterator, results, iterOps, runtimeItr, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit);
// If the key is not null, then add the nulls to the results as this is a not equals query
if (!IndexManager.NULL.equals(key)) {
// we pass in the operator TOK_EQ because we want to add results where the key is equal
// to NULL
addToResultsFromEntries(IndexManager.NULL, OQLLexerTokenTypes.TOK_EQ, indexStore.get(IndexManager.NULL), results, iterOps, runtimeItr, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit);
}
// equals query
if (!QueryService.UNDEFINED.equals(key)) {
// we pass in the operator TOK_EQ because we want to add results where the key is equal
// to UNDEFINED
addToResultsFromEntries(QueryService.UNDEFINED, OQLLexerTokenTypes.TOK_EQ, indexStore.get(QueryService.UNDEFINED), results, iterOps, runtimeItr, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit);
}
}
break;
default:
throw new AssertionError("Operator = " + operator);
}
// end switch
} catch (ClassCastException ex) {
if (operator == OQLLexerTokenTypes.TOK_EQ) {
// result is empty set
return;
} else if (operator == OQLLexerTokenTypes.TOK_NE || operator == OQLLexerTokenTypes.TOK_NE_ALT) {
// put all in result
keysToRemove.add(key);
try {
if (asc) {
iterator = indexStore.iterator(keysToRemove);
} else {
iterator = indexStore.descendingIterator(keysToRemove);
}
addToResultsFromEntries(key, OQLLexerTokenTypes.TOK_NE, iterator, results, iterOps, runtimeItr, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit);
} finally {
if (iterator != null) {
iterator.close();
}
}
} else {
// otherwise throw exception
throw new TypeMismatchException("", ex);
}
} finally {
if (iterator != null) {
iterator.close();
}
}
}
Aggregations