use of org.apache.geode.cache.query.QueryException in project geode by apache.
the class CqServiceImpl method stopCqs.
@Override
public synchronized void stopCqs(Collection<? extends InternalCqQuery> cqs) throws CqException {
final boolean isDebugEnabled = logger.isDebugEnabled();
if (isDebugEnabled) {
if (cqs == null) {
logger.debug("CqService.stopCqs cqs : null");
} else {
logger.debug("CqService.stopCqs cqs : ({} queries)", cqs.size());
}
}
if (cqs == null) {
return;
}
String cqName = null;
for (InternalCqQuery internalCqQuery : cqs) {
CqQuery cq = internalCqQuery;
if (!cq.isClosed() && cq.isRunning()) {
try {
cqName = cq.getName();
cq.stop();
} catch (QueryException | CqClosedException e) {
if (isDebugEnabled) {
logger.debug("Failed to stop the CQ, CqName : {} Error : {}", cqName, e.getMessage());
}
}
}
}
}
use of org.apache.geode.cache.query.QueryException in project geode by apache.
the class QueryIndexDUnitTest method createIndex.
private static void createIndex() {
Region region = basicGetCache().getRegion("portfolios");
if (region == null) {
logger.info("The region is not created properly");
} else {
if (!region.isDestroyed()) {
logger.info("Obtained the region with name :" + "portfolios");
QueryService qs = basicGetCache().getQueryService();
if (qs != null) {
try {
qs.createIndex("statusIndex", "status", "/portfolios");
logger.info("Index statusIndex Created successfully");
} catch (IndexNameConflictException e) {
Assert.fail("Caught IndexNameConflictException", e);
} catch (IndexExistsException e) {
Assert.fail("Caught IndexExistsException", e);
} catch (QueryException e) {
Assert.fail("Caught exception while trying to create index", e);
}
} else {
fail("Could not obtain QueryService for the cache ");
}
} else {
fail("Region.isDestroyed() returned true for region : " + "/portfolios");
}
}
}
use of org.apache.geode.cache.query.QueryException in project geode by apache.
the class PRColocatedEquiJoinDUnitTest method testPRNonLocalQueryException.
/**
* A very basic dunit test that <br>
* 1. Creates two PR Data Stores with redundantCopies = 1. 2. Populates the region with test data.
* 3. Fires a LOCAL query on one data store VM and verifies the result.
*
* @throws Exception
*/
@Test
public void testPRNonLocalQueryException() throws Exception {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
setCacheInVMs(vm0, vm1);
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
// Creting PR's on the participating VM's
// Creating DataStore node on the VM0.
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy, Portfolio.class));
vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name, redundancy, Portfolio.class));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
// Creating Colocated Region DataStore node on the VM0.
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedCreate(coloName, redundancy, name));
vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedCreate(coloName, redundancy, name));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
// Creating local region on vm0 to compare the results of query.
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, Portfolio.class));
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(coloLocalName, NewPortfolio.class));
// Generating portfolio object array to be populated across the PR's & Local
// Regions
final Portfolio[] portfolio = createPortfoliosAndPositions(cntDest);
final NewPortfolio[] newPortfolio = createNewPortfoliosAndPositions(cntDest);
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio, cnt, cntDest));
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloLocalName, newPortfolio, cnt, cntDest));
// Putting the data into the PR's created
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
// querying the VM for data and comparing the result with query result of
// local region.
// querying the VM for data
vm0.invoke(new CacheSerializableRunnable("PRQuery") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
// Querying the PR region
String[] queries = new String[] { "r1.ID = r2.id" };
Object[][] r = new Object[queries.length][2];
Region region = null;
region = cache.getRegion(name);
assertNotNull(region);
region = cache.getRegion(coloName);
assertNotNull(region);
region = cache.getRegion(localName);
assertNotNull(region);
region = cache.getRegion(coloLocalName);
assertNotNull(region);
final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
for (int i = 0; i < expectedExceptions.length; i++) {
getCache().getLogger().info("<ExpectedException action=add>" + expectedExceptions[i] + "</ExpectedException>");
}
QueryService qs = getCache().getQueryService();
Object[] params;
try {
for (int j = 0; j < queries.length; j++) {
getCache().getLogger().info("About to execute local query: " + queries[j]);
r[j][1] = qs.newQuery("Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + name + " r1, /" + coloName + " r2 where " + queries[j]).execute();
}
fail("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
} catch (QueryInvocationTargetException e) {
// cause and see whether or not it's okay
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (QueryException e) {
LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (UnsupportedOperationException uso) {
LogWriterUtils.getLogWriter().info(uso.getMessage());
if (!uso.getMessage().equalsIgnoreCase(LocalizedStrings.DefaultQuery_A_QUERY_ON_A_PARTITIONED_REGION_0_MAY_NOT_REFERENCE_ANY_OTHER_REGION_1.toLocalizedString(new Object[] { name, "/" + coloName }))) {
fail("Query did not throw UnsupportedOperationException while using QueryService instead of LocalQueryService");
} else {
LogWriterUtils.getLogWriter().info("Query received UnsupportedOperationException successfully while using QueryService.");
}
} finally {
for (int i = 0; i < expectedExceptions.length; i++) {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedExceptions[i] + "</ExpectedException>");
}
}
}
});
LogWriterUtils.getLogWriter().info("PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
}
use of org.apache.geode.cache.query.QueryException in project geode by apache.
the class PartitionedRegion method doExecuteQuery.
/**
* If ForceReattemptException is thrown then the caller must loop and call us again.
*
* @throws ForceReattemptException if one of the buckets moved out from under us
*/
private Object doExecuteQuery(DefaultQuery query, Object[] parameters, Set buckets) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException, ForceReattemptException {
if (logger.isDebugEnabled()) {
logger.debug("Executing query :{}", query);
}
HashSet<Integer> allBuckets = new HashSet<Integer>();
if (buckets == null) {
// remote buckets
final Iterator remoteIter = getRegionAdvisor().getBucketSet().iterator();
try {
while (remoteIter.hasNext()) {
allBuckets.add((Integer) remoteIter.next());
}
} catch (NoSuchElementException ignore) {
}
} else {
// local buckets
Iterator localIter = null;
if (this.dataStore != null) {
localIter = buckets.iterator();
} else {
localIter = Collections.emptySet().iterator();
}
try {
while (localIter.hasNext()) {
allBuckets.add((Integer) localIter.next());
}
} catch (NoSuchElementException ignore) {
}
}
if (allBuckets.size() == 0) {
if (logger.isDebugEnabled()) {
logger.debug("No bucket storage allocated. PR has no data yet.");
}
ResultsSet resSet = new ResultsSet();
resSet.setElementType(new ObjectTypeImpl(this.getValueConstraint() == null ? Object.class : this.getValueConstraint()));
return resSet;
}
CompiledSelect selectExpr = query.getSimpleSelect();
if (selectExpr == null) {
throw new IllegalArgumentException(LocalizedStrings.PartitionedRegion_QUERY_MUST_BE_A_SELECT_EXPRESSION_ONLY.toLocalizedString());
}
// this can return a BAG even if it's a DISTINCT select expression,
// since the expectation is that the duplicates will be removed at the end
SelectResults results = selectExpr.getEmptyResultSet(parameters, getCache(), query);
PartitionedRegionQueryEvaluator prqe = new PartitionedRegionQueryEvaluator(this.getSystem(), this, query, parameters, results, allBuckets);
for (; ; ) {
this.getCancelCriterion().checkCancelInProgress(null);
boolean interrupted = Thread.interrupted();
try {
results = prqe.queryBuckets(null);
break;
} catch (InterruptedException ignore) {
interrupted = true;
} catch (FunctionDomainException e) {
throw e;
} catch (TypeMismatchException e) {
throw e;
} catch (NameResolutionException e) {
throw e;
} catch (QueryInvocationTargetException e) {
throw e;
} catch (QueryException qe) {
throw new QueryInvocationTargetException(LocalizedStrings.PartitionedRegion_UNEXPECTED_QUERY_EXCEPTION_OCCURRED_DURING_QUERY_EXECUTION_0.toLocalizedString(qe.getMessage()), qe);
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
// for
// Drop Duplicates if this is a DISTINCT query
boolean allowsDuplicates = results.getCollectionType().allowsDuplicates();
// be exactly matching the limit
if (selectExpr.isDistinct()) {
// don't just convert to a ResultsSet (or StructSet), since
// the bags can convert themselves to a Set more efficiently
ObjectType elementType = results.getCollectionType().getElementType();
if (selectExpr.getOrderByAttrs() != null) {
// Set limit also, its not applied while building the final result set as order by is
// involved.
} else if (allowsDuplicates) {
results = new ResultsCollectionWrapper(elementType, results.asSet());
}
if (selectExpr.isCount() && (results.isEmpty() || selectExpr.isDistinct())) {
// Constructor with elementType not visible.
SelectResults resultCount = new ResultsBag(getCachePerfStats());
resultCount.setElementType(new ObjectTypeImpl(Integer.class));
((Bag) resultCount).addAndGetOccurence(results.size());
return resultCount;
}
}
return results;
}
use of org.apache.geode.cache.query.QueryException in project geode by apache.
the class RemoveIndexesMessage method operateOnPartitionedRegion.
/**
* This method is responsible to remove index on the given partitioned region.
*
* @param dm Distribution maanger for the system
* @param pr Partitioned region to remove indexes on.
*
* @throws CacheException indicates a cache level error
* @throws ForceReattemptException if the peer is no longer available
* @throws InterruptedException if the thread is interrupted in the operation for example during
* shutdown.
*/
@Override
protected boolean operateOnPartitionedRegion(DistributionManager dm, PartitionedRegion pr, long startTime) throws CacheException, QueryException, ForceReattemptException, InterruptedException {
// TODO Auto-generated method stub
ReplyException replyEx = null;
boolean result = true;
// invalid
int bucketIndexRemoved = 0;
int numIndexesRemoved = 0;
logger.info(LocalizedMessage.create(LocalizedStrings.RemoveIndexesMessage_WILL_REMOVE_THE_INDEXES_ON_THIS_PR___0, pr));
try {
if (this.removeSingleIndex) {
bucketIndexRemoved = pr.removeIndex(this.indexName);
} else {
// remotely orignated
bucketIndexRemoved = pr.removeIndexes(true);
}
numIndexesRemoved = pr.getDataStore().getAllLocalBuckets().size();
} catch (Exception ex) {
result = false;
replyEx = new ReplyException(ex);
}
// send back the reply.
sendReply(getSender(), getProcessorId(), dm, replyEx, result, bucketIndexRemoved, numIndexesRemoved);
return false;
}
Aggregations