use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class PRQueryDUnitTest method testSimulatedDataLossBeforeQueryProcessor.
/**
* Simulate a data loss (buckets 0 and 2) before the PRQueryEvaluator begins the query loop
*
* @throws Exception
*/
@Test
public void testSimulatedDataLossBeforeQueryProcessor() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
VM accessor = host.getVM(1);
VM datastore1 = host.getVM(2);
VM datastore2 = host.getVM(3);
final int totalBuckets = 11;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
@Override
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(String.class);
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
accessor.invoke(new CacheSerializableRunnable("Create accessor PR") {
@Override
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(String.class);
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(totalBuckets).setLocalMaxMemory(0).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
});
// add expected exception strings
final IgnoredException ex = IgnoredException.addIgnoredException("Data loss detected", accessor);
accessor.invoke(new SerializableCallable("Create bucket and test dataloss query") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
// Create bucket one
pr.put(new Integer(1), "one");
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery("select distinct * from " + pr.getFullPath());
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// Fake data loss
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int i = 0; i < 3; i++) {
buckets.add(new Integer(i));
}
try {
PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
qe.queryBuckets(null);
assertTrue(false);
} catch (QueryException expected) {
}
// getLogWriter().info("Select results are: " + results);
return Boolean.TRUE;
}
});
ex.remove();
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class PRQueryRemoteNodeExceptionDUnitTest method testCacheCloseExceptionFromLocalAndRemote.
@Test
public void testCacheCloseExceptionFromLocalAndRemote() throws Exception {
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
setCacheInVMs(vm0, vm1);
List vmList = new LinkedList();
vmList.add(vm1);
vmList.add(vm0);
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
// creating a local region on one of the JVM's
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
// Generating portfolio object array to be populated across the PR's & Local
// Regions
final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
// Putting the data into the accessor node
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
// Putting the same data in the local region created
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node VM0 for result Set Comparison");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node VM0 for result Set Comparison");
// Insert the test hooks on local and remote node.
// Test hook on remote node will throw CacheException while Test hook on local node will throw
// QueryException.
vm1.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
class MyQueryObserver extends IndexTrackingQueryObserver {
private int noOfAccess = 0;
@Override
public void afterIterationEvaluation(Object result) {
LogWriterUtils.getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
if (noOfAccess > 2) {
PRQHelp.getCache().getRegion(name).destroyRegion();
}
++noOfAccess;
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
}
;
});
vm0.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
boolean gotException = false;
Cache cache = PRQHelp.getCache();
class MyQueryObserver extends QueryObserverAdapter {
private int noOfAccess = 0;
@Override
public void afterIterationEvaluation(Object result) {
// Object region = ((DefaultQuery)query).getRegionsInQuery(null).iterator().next();
LogWriterUtils.getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
if (noOfAccess > 2) {
PRQHelp.getCache().close();
}
++noOfAccess;
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
final DefaultQuery query = (DefaultQuery) cache.getQueryService().newQuery("Select * from /" + name + " p where p.ID > 0");
try {
query.execute();
} catch (Exception ex) {
gotException = true;
if (ex instanceof CacheClosedException || ex instanceof QueryInvocationTargetException) {
LogWriterUtils.getLogWriter().info(ex.getMessage());
LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from local node successfully.");
} else {
Assert.fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from local node rather received", ex);
}
}
if (!gotException) {
fail("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Test did not receive Exception as expected from local as well as remote node");
}
}
});
LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class PRQueryDUnitTest method testQueryResultsFromMembersWithAccessor.
@Test
public void testQueryResultsFromMembersWithAccessor() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore1 = host.getVM(2);
final VM datastore2 = host.getVM(3);
final int totalBuckets = 20;
final int redCop = 0;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).setLocalMaxMemory(0).create();
attr.setPartitionAttributes(prAttr);
PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
// Create bucket zero, one and two
int numEntries = 100;
for (int i = 1; i <= numEntries; i++) {
pr.put(new Integer(i), new Portfolio(i));
}
int[] limit = new int[] { 10, 15, 30, 0, 1, 9 };
String[] queries = new String[] { "select * from " + pr.getFullPath() + " LIMIT " + limit[0], "select * from " + pr.getFullPath() + " LIMIT " + limit[1], "select * from " + pr.getFullPath() + " LIMIT " + limit[2], "select * from " + pr.getFullPath() + " LIMIT " + limit[3], "select * from " + pr.getFullPath() + " LIMIT " + limit[4], "select * from " + pr.getFullPath() + " where ID > 10 LIMIT " + limit[5] };
try {
for (int q = 0; q < queries.length; q++) {
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery(queries[q]);
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// TODO assert this is the correct set of bucket Ids,
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int b = 0; b < totalBuckets; b++) {
buckets.add(b);
}
final PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {
public HashMap resultsPerMember = new HashMap();
public void hook(int spot) throws RuntimeException {
if (spot == 3) {
for (Object mr : qe.getResultsPerMember().entrySet()) {
Map.Entry e = (Map.Entry) mr;
Collection<Collection> results = (Collection<Collection>) e.getValue();
for (Collection<Object> r : results) {
if (this.resultsPerMember.containsKey(e.getKey())) {
this.resultsPerMember.put(e.getKey(), new Integer(r.size() + ((Integer) this.resultsPerMember.get(e.getKey())).intValue()));
} else {
this.resultsPerMember.put(e.getKey(), new Integer(r.size()));
}
}
}
}
}
}
;
final MyTestHook th = new MyTestHook();
qe.queryBuckets(th);
for (Object r : th.resultsPerMember.entrySet()) {
Map.Entry e = (Map.Entry) r;
Integer res = (Integer) e.getValue();
LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" + "Query [" + queries[q] + "] Member : " + e.getKey() + " results size :" + res.intValue());
if (res.intValue() != 0 || /* accessor member */
res.intValue() != limit[q]) {
assertEquals("Query [" + queries[q] + "]: The results returned by the member does not match the query limit size : Member : " + e.getKey(), limit[q], res.intValue());
}
}
}
} finally {
getCache().close();
}
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class BaseCommandQuery method processQueryUsingParams.
/**
* Process the give query and sends the resulset back to the client.
*
* @param msg
* @param query
* @param queryString
* @param regionNames
* @param start
* @param cqQuery
* @param queryContext
* @param servConn
* @return true if successful execution false in case of failure.
* @throws IOException
*/
protected boolean processQueryUsingParams(Message msg, Query query, String queryString, Set regionNames, long start, ServerCQ cqQuery, QueryOperationContext queryContext, ServerConnection servConn, boolean sendResults, Object[] params) throws IOException, InterruptedException {
ChunkedMessage queryResponseMsg = servConn.getQueryResponseMessage();
CacheServerStats stats = servConn.getCacheServerStats();
CachedRegionHelper crHelper = servConn.getCachedRegionHelper();
{
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incReadQueryRequestTime(start - oldStart);
}
// object type
if (servConn.getClientVersion().compareTo(Version.GFE_70) >= 0) {
((DefaultQuery) query).setRemoteQuery(true);
}
// Process the query request
try {
// integrated security
for (Object regionName : regionNames) {
this.securityService.authorizeRegionRead(regionName.toString());
}
// Execute query
// startTime = GenericStats.getTime();
// startTime = System.currentTimeMillis();
// For now we assume the results are a SelectResults
// which is the only possibility now, but this may change
// in the future if we support arbitrary queries
Object result = null;
if (params != null) {
result = query.execute(params);
} else {
result = query.execute();
}
// Asif : Before conditioning the results check if any
// of the regions involved in the query have been destroyed
// or not. If yes, throw an Exception.
// This is a workaround/fix for Bug 36969
Iterator itr = regionNames.iterator();
while (itr.hasNext()) {
String regionName = (String) itr.next();
if (crHelper.getRegion(regionName) == null) {
throw new RegionDestroyedException(LocalizedStrings.BaseCommand_REGION_DESTROYED_DURING_THE_EXECUTION_OF_THE_QUERY.toLocalizedString(), regionName);
}
}
AuthorizeRequestPP postAuthzRequest = servConn.getPostAuthzRequest();
if (postAuthzRequest != null) {
if (cqQuery == null) {
queryContext = postAuthzRequest.queryAuthorize(queryString, regionNames, result, queryContext, params);
} else {
queryContext = postAuthzRequest.executeCQAuthorize(cqQuery.getName(), queryString, regionNames, result, queryContext);
}
result = queryContext.getQueryResult();
}
if (result instanceof SelectResults) {
SelectResults selectResults = (SelectResults) result;
if (logger.isDebugEnabled()) {
logger.debug("Query Result size for : {} is {}", query.getQueryString(), selectResults.size());
}
CollectionType collectionType = null;
boolean sendCqResultsWithKey = true;
boolean isStructs = false;
// check if resultset has serialized objects, so that they could be sent
// as ObjectPartList
boolean hasSerializedObjects = ((DefaultQuery) query).isKeepSerialized();
if (logger.isDebugEnabled()) {
logger.debug("Query Result for :{} has serialized objects: {}", query.getQueryString(), hasSerializedObjects);
}
// Don't convert to a Set, there might be duplicates now
// The results in a StructSet are stored in Object[]s
// Get them as Object[]s for the objs[] in order to avoid duplicating
// the StructTypes
// Object[] objs = new Object[selectResults.size()];
// Get the collection type (which includes the element type)
// (used to generate the appropriate instance on the client)
// Get the collection type (which includes the element type)
// (used to generate the appropriate instance on the client)
collectionType = getCollectionType(selectResults);
isStructs = collectionType.getElementType().isStructType();
// Check if the Query is from CQ execution.
if (cqQuery != null) {
// Check if the key can be sent to the client based on its version.
sendCqResultsWithKey = sendCqResultsWithKey(servConn);
if (sendCqResultsWithKey) {
// Update the collection type to include key info.
collectionType = new CollectionTypeImpl(Collection.class, new StructTypeImpl(new String[] { "key", "value" }));
isStructs = collectionType.getElementType().isStructType();
}
}
int numberOfChunks = (int) Math.ceil(selectResults.size() * 1.0 / MAXIMUM_CHUNK_SIZE);
if (logger.isTraceEnabled()) {
logger.trace("{}: Query results size: {}: Entries in chunk: {}: Number of chunks: {}", servConn.getName(), selectResults.size(), MAXIMUM_CHUNK_SIZE, numberOfChunks);
}
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incProcessQueryTime(start - oldStart);
if (sendResults) {
queryResponseMsg.setMessageType(MessageType.RESPONSE);
queryResponseMsg.setTransactionId(msg.getTransactionId());
queryResponseMsg.sendHeader();
}
if (sendResults && numberOfChunks == 0) {
// Send 1 empty chunk
if (logger.isTraceEnabled()) {
logger.trace("{}: Creating chunk: 0", servConn.getName());
}
writeQueryResponseChunk(new Object[0], collectionType, true, servConn);
if (logger.isDebugEnabled()) {
logger.debug("{}: Sent chunk (1 of 1) of query response for query {}", servConn.getName(), queryString);
}
} else {
// send it as a part of ObjectPartList
if (hasSerializedObjects) {
sendResultsAsObjectPartList(numberOfChunks, servConn, selectResults.asList(), isStructs, collectionType, queryString, cqQuery, sendCqResultsWithKey, sendResults);
} else {
sendResultsAsObjectArray(selectResults, numberOfChunks, servConn, isStructs, collectionType, queryString, cqQuery, sendCqResultsWithKey, sendResults);
}
}
if (cqQuery != null) {
// Set the CQ query result cache initialized flag.
cqQuery.setCqResultsCacheInitialized();
}
} else if (result instanceof Integer) {
if (sendResults) {
queryResponseMsg.setMessageType(MessageType.RESPONSE);
queryResponseMsg.setTransactionId(msg.getTransactionId());
queryResponseMsg.sendHeader();
writeQueryResponseChunk(result, null, true, servConn);
}
} else {
throw new QueryInvalidException(LocalizedStrings.BaseCommand_UNKNOWN_RESULT_TYPE_0.toLocalizedString(result.getClass()));
}
msg.clearParts();
} catch (QueryInvalidException e) {
// Handle this exception differently since it can contain
// non-serializable objects.
// java.io.NotSerializableException: antlr.CommonToken
// Log a warning to show stack trace and create a new
// QueryInvalidEsception on the original one's message (not cause).
logger.warn(LocalizedMessage.create(LocalizedStrings.BaseCommand_UNEXPECTED_QUERYINVALIDEXCEPTION_WHILE_PROCESSING_QUERY_0, queryString), e);
QueryInvalidException qie = new QueryInvalidException(LocalizedStrings.BaseCommand_0_QUERYSTRING_IS_1.toLocalizedString(new Object[] { e.getLocalizedMessage(), queryString }));
writeQueryResponseException(msg, qie, servConn);
return false;
} catch (DistributedSystemDisconnectedException se) {
if (msg != null && logger.isDebugEnabled()) {
logger.debug("{}: ignoring message of type {} from client {} because shutdown occurred during message processing.", servConn.getName(), MessageType.getString(msg.getMessageType()), servConn.getProxyID());
}
servConn.setFlagProcessMessagesAsFalse();
servConn.setClientDisconnectedException(se);
return false;
} catch (Exception e) {
// If an interrupted exception is thrown , rethrow it
checkForInterrupt(servConn, e);
// Otherwise, write a query response and continue
// Check if query got canceled from QueryMonitor.
DefaultQuery defaultQuery = (DefaultQuery) query;
if ((defaultQuery).isCanceled()) {
e = new QueryException(defaultQuery.getQueryCanceledException().getMessage(), e.getCause());
}
writeQueryResponseException(msg, e, servConn);
return false;
} finally {
// Since the query object is being shared in case of bind queries,
// resetting the flag may cause inconsistency.
// Also since this flag is only being set in code path executed by
// remote query execution, resetting it is not required.
// ((DefaultQuery)query).setRemoteQuery(false);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Sent query response for query {}", servConn.getName(), queryString);
}
stats.incWriteQueryResponseTime(DistributionStats.getStatTime() - start);
return true;
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class CacheClientNotifier method startCompiledQueryCleanupThread.
/**
* This starts the cleanup thread that periodically (DefaultQuery.TEST_COMPILED_QUERY_CLEAR_TIME)
* checks for the compiled queries that are not used and removes them.
*/
private void startCompiledQueryCleanupThread() {
if (isCompiledQueryCleanupThreadStarted) {
return;
}
SystemTimer.SystemTimerTask task = new SystemTimer.SystemTimerTask() {
@Override
public void run2() {
final boolean isDebugEnabled = logger.isDebugEnabled();
for (Map.Entry<String, DefaultQuery> e : compiledQueries.entrySet()) {
DefaultQuery q = e.getValue();
// from the its last checked.
if (q.getLastUsed()) {
q.setLastUsed(false);
} else {
if (compiledQueries.remove(e.getKey()) != null) {
// If successfully removed decrement the counter.
statistics.incCompiledQueryCount(-1);
if (isDebugEnabled) {
logger.debug("Removed compiled query from ccn.compliedQueries list. Query: " + q.getQueryString() + ". Total compiled queries are : " + statistics.getCompiledQueryCount());
}
}
}
}
}
};
synchronized (lockIsCompiledQueryCleanupThreadStarted) {
if (!isCompiledQueryCleanupThreadStarted) {
long period = DefaultQuery.TEST_COMPILED_QUERY_CLEAR_TIME > 0 ? DefaultQuery.TEST_COMPILED_QUERY_CLEAR_TIME : DefaultQuery.COMPILED_QUERY_CLEAR_TIME;
_cache.getCCPTimer().scheduleAtFixedRate(task, period, period);
}
isCompiledQueryCleanupThreadStarted = true;
}
}
Aggregations