use of org.apache.geode.cache.query.QueryException in project geode by apache.
the class EntryEventImpl method setNewValueInRegion.
@Retained(ENTRY_EVENT_NEW_VALUE)
private void setNewValueInRegion(final LocalRegion owner, final RegionEntry reentry, Object oldValueForDelta) throws RegionClearedException {
boolean wasTombstone = reentry.isTombstone();
// not be applied. This is possible if the event originated locally.
if (this.deltaBytes != null && this.newValue == null) {
processDeltaBytes(oldValueForDelta);
}
if (owner != null) {
owner.generateAndSetVersionTag(this, reentry);
} else {
this.region.generateAndSetVersionTag(this, reentry);
}
Object v = this.newValue;
if (v == null) {
v = isLocalInvalid() ? Token.LOCAL_INVALID : Token.INVALID;
} else {
this.region.regionInvalid = false;
}
reentry.setValueResultOfSearch(this.op.isNetSearch());
// in the primary.
if (v instanceof org.apache.geode.Delta && region.isUsedForPartitionedRegionBucket()) {
int vSize;
Object ov = basicGetOldValue();
if (ov instanceof CachedDeserializable && !GemFireCacheImpl.DELTAS_RECALCULATE_SIZE) {
vSize = ((CachedDeserializable) ov).getValueSizeInBytes();
} else {
vSize = CachedDeserializableFactory.calcMemSize(v, region.getObjectSizer(), false);
}
v = CachedDeserializableFactory.create(v, vSize);
basicSetNewValue(v);
}
Object preparedV = reentry.prepareValueForCache(this.region, v, this, false);
if (preparedV != v) {
v = preparedV;
if (v instanceof StoredObject) {
if (!((StoredObject) v).isCompressed()) {
// fix bug 52109
// If we put it off heap and it is not compressed then remember that value.
// Otherwise we want to remember the decompressed value in the event.
basicSetNewValue(v);
}
}
}
boolean isTombstone = (v == Token.TOMBSTONE);
boolean success = false;
boolean calledSetValue = false;
try {
setNewValueBucketSize(owner, v);
if ((this.op.isUpdate() && !reentry.isInvalid()) || this.op.isInvalidate()) {
IndexManager idxManager = IndexUtils.getIndexManager(this.region, false);
if (idxManager != null) {
try {
idxManager.updateIndexes(reentry, IndexManager.REMOVE_ENTRY, this.op.isUpdate() ? IndexProtocol.BEFORE_UPDATE_OP : IndexProtocol.OTHER_OP);
} catch (QueryException e) {
throw new IndexMaintenanceException(e);
}
}
}
calledSetValue = true;
// already called prepareValueForCache
reentry.setValueWithTombstoneCheck(v, this);
success = true;
} finally {
if (!success && reentry instanceof OffHeapRegionEntry && v instanceof StoredObject) {
OffHeapRegionEntryHelper.releaseEntry((OffHeapRegionEntry) reentry, (StoredObject) v);
}
}
if (logger.isTraceEnabled()) {
if (v instanceof CachedDeserializable) {
logger.trace("EntryEventImpl.setNewValueInRegion: put CachedDeserializable({},{})", this.getKey(), ((CachedDeserializable) v).getStringForm());
} else {
logger.trace("EntryEventImpl.setNewValueInRegion: put({},{})", this.getKey(), StringUtils.forceToString(v));
}
}
if (!isTombstone && wasTombstone) {
owner.unscheduleTombstone(reentry);
}
}
use of org.apache.geode.cache.query.QueryException in project geode by apache.
the class BaseCommandQuery method processQueryUsingParams.
/**
* Process the give query and sends the resulset back to the client.
*
* @param msg
* @param query
* @param queryString
* @param regionNames
* @param start
* @param cqQuery
* @param queryContext
* @param servConn
* @return true if successful execution false in case of failure.
* @throws IOException
*/
protected boolean processQueryUsingParams(Message msg, Query query, String queryString, Set regionNames, long start, ServerCQ cqQuery, QueryOperationContext queryContext, ServerConnection servConn, boolean sendResults, Object[] params) throws IOException, InterruptedException {
ChunkedMessage queryResponseMsg = servConn.getQueryResponseMessage();
CacheServerStats stats = servConn.getCacheServerStats();
CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
{
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incReadQueryRequestTime(start - oldStart);
}
// object type
if (servConn.getClientVersion().compareTo(Version.GFE_70) >= 0) {
((DefaultQuery) query).setRemoteQuery(true);
}
// Process the query request
try {
// integrated security
for (Object regionName : regionNames) {
this.securityService.authorizeRegionRead(regionName.toString());
}
// Execute query
// startTime = GenericStats.getTime();
// startTime = System.currentTimeMillis();
// For now we assume the results are a SelectResults
// which is the only possibility now, but this may change
// in the future if we support arbitrary queries
Object result = null;
if (params != null) {
result = query.execute(params);
} else {
result = query.execute();
}
// Asif : Before conditioning the results check if any
// of the regions involved in the query have been destroyed
// or not. If yes, throw an Exception.
// This is a workaround/fix for Bug 36969
Iterator itr = regionNames.iterator();
while (itr.hasNext()) {
String regionName = (String) itr.next();
if (crHelper.getRegion(regionName) == null) {
throw new RegionDestroyedException(LocalizedStrings.BaseCommand_REGION_DESTROYED_DURING_THE_EXECUTION_OF_THE_QUERY.toLocalizedString(), regionName);
}
}
AuthorizeRequestPP postAuthzRequest = servConn.getPostAuthzRequest();
if (postAuthzRequest != null) {
if (cqQuery == null) {
queryContext = postAuthzRequest.queryAuthorize(queryString, regionNames, result, queryContext, params);
} else {
queryContext = postAuthzRequest.executeCQAuthorize(cqQuery.getName(), queryString, regionNames, result, queryContext);
}
result = queryContext.getQueryResult();
}
if (result instanceof SelectResults) {
SelectResults selectResults = (SelectResults) result;
if (logger.isDebugEnabled()) {
logger.debug("Query Result size for : {} is {}", query.getQueryString(), selectResults.size());
}
CollectionType collectionType = null;
boolean sendCqResultsWithKey = true;
boolean isStructs = false;
// check if resultset has serialized objects, so that they could be sent
// as ObjectPartList
boolean hasSerializedObjects = ((DefaultQuery) query).isKeepSerialized();
if (logger.isDebugEnabled()) {
logger.debug("Query Result for :{} has serialized objects: {}", query.getQueryString(), hasSerializedObjects);
}
// Don't convert to a Set, there might be duplicates now
// The results in a StructSet are stored in Object[]s
// Get them as Object[]s for the objs[] in order to avoid duplicating
// the StructTypes
// Object[] objs = new Object[selectResults.size()];
// Get the collection type (which includes the element type)
// (used to generate the appropriate instance on the client)
// Get the collection type (which includes the element type)
// (used to generate the appropriate instance on the client)
collectionType = getCollectionType(selectResults);
isStructs = collectionType.getElementType().isStructType();
// Check if the Query is from CQ execution.
if (cqQuery != null) {
// Check if the key can be sent to the client based on its version.
sendCqResultsWithKey = sendCqResultsWithKey(servConn);
if (sendCqResultsWithKey) {
// Update the collection type to include key info.
collectionType = new CollectionTypeImpl(Collection.class, new StructTypeImpl(new String[] { "key", "value" }));
isStructs = collectionType.getElementType().isStructType();
}
}
int numberOfChunks = (int) Math.ceil(selectResults.size() * 1.0 / MAXIMUM_CHUNK_SIZE);
if (logger.isTraceEnabled()) {
logger.trace("{}: Query results size: {}: Entries in chunk: {}: Number of chunks: {}", servConn.getName(), selectResults.size(), MAXIMUM_CHUNK_SIZE, numberOfChunks);
}
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incProcessQueryTime(start - oldStart);
if (sendResults) {
queryResponseMsg.setMessageType(MessageType.RESPONSE);
queryResponseMsg.setTransactionId(msg.getTransactionId());
queryResponseMsg.sendHeader();
}
if (sendResults && numberOfChunks == 0) {
// Send 1 empty chunk
if (logger.isTraceEnabled()) {
logger.trace("{}: Creating chunk: 0", servConn.getName());
}
writeQueryResponseChunk(new Object[0], collectionType, true, servConn);
if (logger.isDebugEnabled()) {
logger.debug("{}: Sent chunk (1 of 1) of query response for query {}", servConn.getName(), queryString);
}
} else {
// send it as a part of ObjectPartList
if (hasSerializedObjects) {
sendResultsAsObjectPartList(numberOfChunks, servConn, selectResults.asList(), isStructs, collectionType, queryString, cqQuery, sendCqResultsWithKey, sendResults);
} else {
sendResultsAsObjectArray(selectResults, numberOfChunks, servConn, isStructs, collectionType, queryString, cqQuery, sendCqResultsWithKey, sendResults);
}
}
if (cqQuery != null) {
// Set the CQ query result cache initialized flag.
cqQuery.setCqResultsCacheInitialized();
}
} else if (result instanceof Integer) {
if (sendResults) {
queryResponseMsg.setMessageType(MessageType.RESPONSE);
queryResponseMsg.setTransactionId(msg.getTransactionId());
queryResponseMsg.sendHeader();
writeQueryResponseChunk(result, null, true, servConn);
}
} else {
throw new QueryInvalidException(LocalizedStrings.BaseCommand_UNKNOWN_RESULT_TYPE_0.toLocalizedString(result.getClass()));
}
msg.clearParts();
} catch (QueryInvalidException e) {
// Handle this exception differently since it can contain
// non-serializable objects.
// java.io.NotSerializableException: antlr.CommonToken
// Log a warning to show stack trace and create a new
// QueryInvalidEsception on the original one's message (not cause).
logger.warn(LocalizedMessage.create(LocalizedStrings.BaseCommand_UNEXPECTED_QUERYINVALIDEXCEPTION_WHILE_PROCESSING_QUERY_0, queryString), e);
QueryInvalidException qie = new QueryInvalidException(LocalizedStrings.BaseCommand_0_QUERYSTRING_IS_1.toLocalizedString(new Object[] { e.getLocalizedMessage(), queryString }));
writeQueryResponseException(msg, qie, servConn);
return false;
} catch (DistributedSystemDisconnectedException se) {
if (msg != null && logger.isDebugEnabled()) {
logger.debug("{}: ignoring message of type {} from client {} because shutdown occurred during message processing.", servConn.getName(), MessageType.getString(msg.getMessageType()), servConn.getProxyID());
}
servConn.setFlagProcessMessagesAsFalse();
servConn.setClientDisconnectedException(se);
return false;
} catch (Exception e) {
// If an interrupted exception is thrown , rethrow it
checkForInterrupt(servConn, e);
// Otherwise, write a query response and continue
// Check if query got canceled from QueryMonitor.
DefaultQuery defaultQuery = (DefaultQuery) query;
if ((defaultQuery).isCanceled()) {
e = new QueryException(defaultQuery.getQueryCanceledException().getMessage(), e.getCause());
}
writeQueryResponseException(msg, e, servConn);
return false;
} finally {
// Since the query object is being shared in case of bind queries,
// resetting the flag may cause inconsistency.
// Also since this flag is only being set in code path executed by
// remote query execution, resetting it is not required.
// ((DefaultQuery)query).setRemoteQuery(false);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Sent query response for query {}", servConn.getName(), queryString);
}
stats.incWriteQueryResponseTime(DistributionStats.getStatTime() - start);
return true;
}
use of org.apache.geode.cache.query.QueryException in project geode by apache.
the class PRQueryDUnitHelper method getCacheSerializableRunnableForPRAndRRQueryAndCompareResults.
public SerializableRunnableIF getCacheSerializableRunnableForPRAndRRQueryAndCompareResults(final String name, final String coloName, final String localName, final String coloLocalName) {
SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
// Querying the PR region
String[] queries = new String[] { "r1.ID = r2.id", "r1.ID = r2.id AND r1.ID > 5", "r1.ID = r2.id AND r1.status = 'active'", // "r1.ID = r2.id LIMIT 10",
"r1.ID = r2.id ORDER BY r1.ID", "r1.ID = r2.id ORDER BY r2.id", "r1.ID = r2.id ORDER BY r2.status", "r1.ID = r2.id AND r1.status != r2.status", "r1.ID = r2.id AND r1.status = r2.status", "r1.ID = r2.id AND r1.positions.size = r2.positions.size", "r1.ID = r2.id AND r1.positions.size > r2.positions.size", "r1.ID = r2.id AND r1.positions.size < r2.positions.size", "r1.ID = r2.id AND r1.positions.size = r2.positions.size AND r2.positions.size > 0", "r1.ID = r2.id AND (r1.positions.size > r2.positions.size OR r2.positions.size > 0)", "r1.ID = r2.id AND (r1.positions.size < r2.positions.size OR r1.positions.size > 0)" };
Object[][] r = new Object[queries.length][2];
Region region = null;
region = cache.getRegion(name);
assertNotNull(region);
region = cache.getRegion(coloName);
assertNotNull(region);
region = cache.getRegion(localName);
assertNotNull(region);
region = cache.getRegion(coloLocalName);
assertNotNull(region);
final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
for (final String expectedException : expectedExceptions) {
getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
}
QueryService qs = getCache().getQueryService();
Object[] params;
try {
for (int j = 0; j < queries.length; j++) {
getCache().getLogger().info("About to execute local query: " + queries[j]);
Function func = new TestQueryFunction("testfunction");
Object funcResult = FunctionService.onRegion((getCache().getRegion(name) instanceof PartitionedRegion) ? getCache().getRegion(name) : getCache().getRegion(coloName)).setArguments("<trace> Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + name + " r1, /" + coloName + " r2, r2.positions.values pos2 where " + queries[j]).execute(func).getResult();
r[j][0] = ((ArrayList) funcResult).get(0);
getCache().getLogger().info("About to execute local query: " + queries[j]);
SelectResults r2 = (SelectResults) qs.newQuery("Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + localName + " r1, /" + coloLocalName + " r2, r2.positions.values pos2 where " + queries[j]).execute();
r[j][1] = r2.asList();
}
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
// compareTwoQueryResults(r, queries.length);
StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
ssORrs.CompareQueryResultsAsListWithoutAndWithIndexes(r, queries.length, false, false, queries);
} catch (QueryInvocationTargetException e) {
// cause and see whether or not it's okay
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (QueryException e) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (RegionDestroyedException rde) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
} catch (CancelException cce) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
} finally {
for (final String expectedException : expectedExceptions) {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
}
}
}
};
return (CacheSerializableRunnable) PrRegion;
}
use of org.apache.geode.cache.query.QueryException in project geode by apache.
the class PRQueryDUnitHelper method getCacheSerializableRunnableForPRQueryAndCompareResults.
public CacheSerializableRunnable getCacheSerializableRunnableForPRQueryAndCompareResults(final String regionName, final String localRegion, final boolean fullQueryOnPortfolioPositions) {
SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
// Querying the localRegion and the PR region
String[] queries;
if (fullQueryOnPortfolioPositions) {
queries = new String[] { "import org.apache.geode.cache.\"query\".data.Position;" + "select distinct r.ID, status, mktValue " + "from $1 r, r.positions.values pVal TYPE Position " + "where r.status = 'active' AND pVal.mktValue >= 25.00", "import org.apache.geode.cache.\"query\".data.Position;" + "select distinct * " + "from $1 r, r.positions.values pVal TYPE Position " + "where r.status = 'active' AND pVal.mktValue >= 25.00", "import org.apache.geode.cache.\"query\".data.Position;" + "select distinct ID " + "from $1 r, r.positions.values pVal TYPE Position " + "where r.status = 'active' AND pVal.mktValue >= 25.00", "select distinct * " + "from $1 " + "where status = 'active'", "import org.apache.geode.cache.\"query\".data.Position;" + "select distinct r from $1 r, " + "r.positions.values pVal TYPE Position where pVal.mktValue < $2", "select p.positions.get('acc') from $1 p" };
} else {
queries = new String[] { "ID = 0 OR ID = 1", "ID > 4 AND ID < 9", "ID = 5", "ID < 5 ", "ID <= 5" };
}
Object[][] r = new Object[queries.length][2];
Region local = cache.getRegion(localRegion);
Region region = cache.getRegion(regionName);
assertNotNull(region);
final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
for (int i = 0; i < expectedExceptions.length; i++) {
getCache().getLogger().info("<ExpectedException action=add>" + expectedExceptions[i] + "</ExpectedException>");
}
QueryService qs = getCache().getQueryService();
Object[] params;
try {
for (int j = 0; j < queries.length; j++) {
synchronized (region) {
if (fullQueryOnPortfolioPositions) {
params = new Object[] { local, new Double((j % 25) * 1.0 + 1) };
r[j][0] = qs.newQuery(queries[j]).execute(params);
} else {
r[j][0] = local.query(queries[j]);
}
if (fullQueryOnPortfolioPositions) {
params = new Object[] { region, new Double((j % 25) * 1.0 + 1) };
r[j][1] = qs.newQuery(queries[j]).execute(params);
} else {
r[j][1] = region.query(queries[j]);
}
}
}
compareTwoQueryResults(r, queries.length);
} catch (QueryInvocationTargetException e) {
// If cause is RegionDestroyedException then its ok
Throwable cause = e.getCause();
if (!(cause instanceof RegionDestroyedException)) {
// or not it's okay
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
}
} catch (QueryException e) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (RegionDestroyedException rde) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
} catch (CancelException cce) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
} finally {
for (final String expectedException : expectedExceptions) {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
}
}
}
};
return (CacheSerializableRunnable) PrRegion;
}
use of org.apache.geode.cache.query.QueryException in project geode by apache.
the class PRQueryDUnitHelper method getCacheSerializableRunnableForPROrderByQueryAndVerifyOrder.
public CacheSerializableRunnable getCacheSerializableRunnableForPROrderByQueryAndVerifyOrder(final String regionName, final String localRegion) {
SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {
public void run2() throws CacheException {
Cache cache = getCache();
// Querying the localRegion and the PR region
String[] queries = new String[] { "p.status from /REGION_NAME p order by p.status", "status, ID from /REGION_NAME order by status, ID", "p.status, p.ID from /REGION_NAME p order by p.status, p.ID", "key.ID from /REGION_NAME.keys key order by key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status desc, key.ID", "key.ID, key.status from /REGION_NAME.keys key order by key.status, key.ID desc", "p.status, p.ID from /REGION_NAME p order by p.status asc, p.ID", "p.ID, p.status from /REGION_NAME p order by p.ID desc, p.status asc", "p.ID from /REGION_NAME p, p.positions.values order by p.ID", "p.ID, p.status from /REGION_NAME p, p.positions.values order by p.status, p.ID", "pos.secId from /REGION_NAME p, p.positions.values pos order by pos.secId", "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by pos.secId, p.ID", "p.iD from /REGION_NAME p order by p.iD", "p.iD, p.status from /REGION_NAME p order by p.iD", "iD, status from /REGION_NAME order by iD", "p.getID() from /REGION_NAME p order by p.getID()", "p.names[1] from /REGION_NAME p order by p.names[1]", "p.position1.secId, p.ID from /REGION_NAME p order by p.position1.secId desc, p.ID", "p.ID, p.position1.secId from /REGION_NAME p order by p.position1.secId, p.ID", "e.key.ID from /REGION_NAME.entries e order by e.key.ID", "e.key.ID, e.value.status from /REGION_NAME.entries e order by e.key.ID", "e.key.ID, e.value.status from /REGION_NAME.entrySet e order by e.key.ID desc , e.value.status desc", "e.key, e.value from /REGION_NAME.entrySet e order by e.key.ID, e.value.status desc", "e.key from /REGION_NAME.entrySet e order by e.key.ID desc, e.key.pkid desc", "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by p.ID, pos.secId", "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by p.ID desc, pos.secId desc", "p.ID, pos.secId from /REGION_NAME p, p.positions.values pos order by p.ID desc, pos.secId" };
Object[][] r = new Object[1][2];
Region local = cache.getRegion(localRegion);
Region region = cache.getRegion(regionName);
assertNotNull(region);
final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
for (final String expectedException : expectedExceptions) {
getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
}
String distinct = "SELECT DISTINCT ";
QueryService qs = getCache().getQueryService();
Object[] params;
StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
try {
for (final String query : queries) {
String qStr = null;
synchronized (region) {
// Execute on local region.
qStr = (distinct + query.replace("REGION_NAME", localRegion));
r[0][0] = qs.newQuery(qStr).execute();
// Execute on remote region.
qStr = (distinct + query.replace("REGION_NAME", regionName));
r[0][1] = qs.newQuery(qStr).execute();
ssORrs.CompareQueryResultsWithoutAndWithIndexes(r, 1, true, queries);
}
}
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
} catch (QueryInvocationTargetException e) {
// not it's okay
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (QueryException e) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
} catch (RegionDestroyedException rde) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
} catch (CancelException cce) {
org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
} finally {
for (final String expectedException : expectedExceptions) {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
}
}
}
};
return (CacheSerializableRunnable) PrRegion;
}
Aggregations