use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class QueryMessage method operateOnPartitionedRegion.
@Override
protected boolean operateOnPartitionedRegion(DistributionManager dm, PartitionedRegion pr, long startTime) throws CacheException, QueryException, ForceReattemptException, InterruptedException {
// calculate trace start time if trace is on this is because the start time is only set if
// enableClock stats is on in this case we still want to see trace time even if clock is not
// enabled
long traceStartTime = 0;
if (this.traceOn) {
traceStartTime = NanoTimer.getTime();
}
if (Thread.interrupted()) {
throw new InterruptedException();
}
if (logger.isTraceEnabled(LogMarker.DM)) {
logger.trace(LogMarker.DM, "QueryMessage operateOnPartitionedRegion: {} buckets {}", pr.getFullPath(), this.buckets);
}
pr.waitOnInitialization();
if (QueryMonitor.isLowMemory()) {
String reason = LocalizedStrings.QueryMonitor_LOW_MEMORY_CANCELED_QUERY.toLocalizedString(QueryMonitor.getMemoryUsedDuringLowMemory());
// throws the same error for low memory
throw new QueryExecutionLowMemoryException(reason);
}
DefaultQuery query = new DefaultQuery(this.queryString, pr.getCache(), false);
// Remote query, use the PDX types in serialized form.
DefaultQuery.setPdxReadSerialized(pr.getCache(), true);
// In case of "select *" queries we can keep the results in serialized form and send
query.setRemoteQuery(true);
QueryObserver indexObserver = query.startTrace();
boolean isQueryTraced = false;
List queryTraceList = null;
try {
query.setIsCqQuery(this.cqQuery);
PRQueryProcessor qp = new PRQueryProcessor(pr, query, this.parameters, this.buckets);
if (logger.isDebugEnabled()) {
logger.debug("Started executing query from remote node: {}", query.getQueryString());
}
isQueryTraced = query.isTraced() && this.sender.getVersionObject().compareTo(Version.GFE_81) >= 0;
// Adds a query trace info object to the results list for remote queries
PRQueryTraceInfo queryTraceInfo = null;
if (isQueryTraced) {
this.isTraceInfoIteration = true;
if (DefaultQuery.testHook != null) {
DefaultQuery.testHook.doTestHook("Create PR Query Trace Info for Remote Query");
}
queryTraceInfo = new PRQueryTraceInfo();
queryTraceList = Collections.singletonList(queryTraceInfo);
}
this.isStructType = qp.executeQuery(this.resultCollector);
// from the sorted collection of NWayMergeResults
if (isQueryTraced) {
this.resultCollector.add(0, queryTraceList);
}
this.currentSelectResultIterator = this.resultCollector.iterator();
// information here rather than the finally block.
if (isQueryTraced) {
if (DefaultQuery.testHook != null) {
DefaultQuery.testHook.doTestHook("Populating Trace Info for Remote Query");
}
// calculate the number of rows being sent
int traceSize = queryTraceInfo.calculateNumberOfResults(this.resultCollector);
// subtract the query trace info object
traceSize -= 1;
queryTraceInfo.setTimeInMillis((NanoTimer.getTime() - traceStartTime) / 1.0e6f);
queryTraceInfo.setNumResults(traceSize);
// created the indexes used string
if (indexObserver instanceof IndexTrackingQueryObserver) {
Map indexesUsed = ((IndexTrackingQueryObserver) indexObserver).getUsedIndexes();
StringBuilder sb = new StringBuilder();
sb.append(" indexesUsed(").append(indexesUsed.size()).append(")");
if (indexesUsed.size() > 0) {
sb.append(":");
for (Iterator itr = indexesUsed.entrySet().iterator(); itr.hasNext(); ) {
Map.Entry entry = (Map.Entry) itr.next();
sb.append(entry.getKey()).append(entry.getValue());
if (itr.hasNext()) {
sb.append(",");
}
}
}
queryTraceInfo.setIndexesUsed(sb.toString());
}
}
if (QueryMonitor.isLowMemory()) {
String reason = LocalizedStrings.QueryMonitor_LOW_MEMORY_CANCELED_QUERY.toLocalizedString(QueryMonitor.getMemoryUsedDuringLowMemory());
throw new QueryExecutionLowMemoryException(reason);
}
super.operateOnPartitionedRegion(dm, pr, startTime);
} finally {
// remove trace info so that it is not included in the num results when logged
if (isQueryTraced) {
this.resultCollector.remove(queryTraceList);
}
DefaultQuery.setPdxReadSerialized(pr.getCache(), false);
query.setRemoteQuery(false);
query.endTrace(indexObserver, traceStartTime, this.resultCollector);
}
// Unless there was an exception thrown, this message handles sending the response
return false;
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class Query651 method cmdExecute.
@Override
public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long start) throws IOException, InterruptedException {
// Based on MessageType.DESTROY
// Added by gregp 10/18/05
serverConnection.setAsTrue(REQUIRES_RESPONSE);
serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
// Retrieve the data from the message parts
String queryString = clientMessage.getPart(0).getString();
long compiledQueryId = 0;
Object[] queryParams = null;
try {
if (clientMessage.getMessageType() == MessageType.QUERY_WITH_PARAMETERS) {
// Query with parameters supported from 6.6 onwards.
// Number of parameters.
int params = clientMessage.getPart(1).getInt();
// In case of native client there will be extra two parameters at 2 and 3 index.
int paramStartIndex = 2;
if (clientMessage.getNumberOfParts() > (1 + /* type */
1 + /* query string */
1 + /* params length */
params)) {
int timeout = clientMessage.getPart(3).getInt();
serverConnection.setRequestSpecificTimeout(timeout);
paramStartIndex = 4;
}
// Get the query execution parameters.
queryParams = new Object[params];
for (int i = 0; i < queryParams.length; i++) {
queryParams[i] = clientMessage.getPart(i + paramStartIndex).getObject();
}
} else {
// need to take care while adding new message
if (clientMessage.getNumberOfParts() == 3) {
int timeout = clientMessage.getPart(2).getInt();
serverConnection.setRequestSpecificTimeout(timeout);
}
}
} catch (ClassNotFoundException cne) {
throw new QueryInvalidException(cne.getMessage() + queryString);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Received query request from {} queryString: {}{}", serverConnection.getName(), serverConnection.getSocketString(), queryString, (queryParams != null ? (" with num query parameters :" + queryParams.length) : ""));
}
try {
// Create query
QueryService queryService = serverConnection.getCachedRegionHelper().getCache().getLocalQueryService();
org.apache.geode.cache.query.Query query = null;
if (queryParams != null) {
// Its a compiled query.
CacheClientNotifier ccn = serverConnection.getAcceptor().getCacheClientNotifier();
query = ccn.getCompiledQuery(queryString);
if (query == null) {
// This is first time the query is seen by this server.
query = queryService.newQuery(queryString);
ccn.addCompiledQuery((DefaultQuery) query);
}
ccn.getStats().incCompiledQueryUsedCount(1);
((DefaultQuery) query).setLastUsed(true);
} else {
query = queryService.newQuery(queryString);
}
Set regionNames = ((DefaultQuery) query).getRegionsInQuery(queryParams);
// Authorization check
QueryOperationContext queryContext = null;
AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
if (authzRequest != null) {
queryContext = authzRequest.queryAuthorize(queryString, regionNames, queryParams);
String newQueryString = queryContext.getQuery();
if (queryString != null && !queryString.equals(newQueryString)) {
query = queryService.newQuery(newQueryString);
queryString = newQueryString;
regionNames = queryContext.getRegionNames();
if (regionNames == null) {
regionNames = ((DefaultQuery) query).getRegionsInQuery(null);
}
}
}
processQueryUsingParams(clientMessage, query, queryString, regionNames, start, null, queryContext, serverConnection, true, queryParams);
} catch (QueryInvalidException e) {
throw new QueryInvalidException(e.getMessage() + queryString);
}
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class PRQueryRemoteNodeExceptionDUnitTest method testForceReattemptExceptionFromLocal.
@Test
public void testForceReattemptExceptionFromLocal() throws Exception {
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
setCacheInVMs(vm0, vm1, vm2);
List vmList = new LinkedList();
vmList.add(vm1);
vmList.add(vm0);
vmList.add(vm2);
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, 1, /* redundancy */
numOfBuckets));
vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, 1, /* redundancy */
numOfBuckets));
vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, 1, /* redundancy */
numOfBuckets));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
// creating a local region on one of the JVM's
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
// Generating portfolio object array to be populated across the PR's & Local
// Regions
final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
// Putting the data into the accessor node
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
// Putting the same data in the local region created
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node VM0 for result Set Comparison");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node VM0 for result Set Comparison");
// Insert the test hooks on local and remote node.
// Test hook on remote node will throw CacheException while Test hook on local node will throw
// QueryException.
vm1.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
class MyQueryObserver extends IndexTrackingQueryObserver {
private int noOfAccess = 0;
@Override
public void startQuery(Query query) {
Object region = ((DefaultQuery) query).getRegionsInQuery(null).iterator().next();
LogWriterUtils.getLogWriter().info("Region type on VM1:" + region);
if (noOfAccess == 1) {
PartitionedRegion pr = (PartitionedRegion) PRQHelp.getCache().getRegion(name);
List buks = pr.getLocalPrimaryBucketsListTestOnly();
LogWriterUtils.getLogWriter().info("Available buckets:" + buks);
int bukId = ((Integer) (buks.get(0))).intValue();
LogWriterUtils.getLogWriter().info("Destroying bucket id:" + bukId);
pr.getDataStore().getLocalBucketById(bukId).destroyRegion();
}
++noOfAccess;
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
}
;
});
vm0.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
boolean gotException = false;
Cache cache = PRQHelp.getCache();
class MyQueryObserver extends QueryObserverAdapter {
private int noOfAccess = 0;
@Override
public void startQuery(Query query) {
Object region = ((DefaultQuery) query).getRegionsInQuery(null).iterator().next();
LogWriterUtils.getLogWriter().info("Region type on VM0:" + region);
if (noOfAccess == 2) {
PartitionedRegion pr = (PartitionedRegion) PRQHelp.getCache().getRegion(name);
List buks = pr.getLocalPrimaryBucketsListTestOnly();
LogWriterUtils.getLogWriter().info("Available buckets:" + buks);
int bukId = ((Integer) (buks.get(0))).intValue();
LogWriterUtils.getLogWriter().info("Destroying bucket id:" + bukId);
pr.getDataStore().getLocalBucketById(bukId).destroyRegion();
}
++noOfAccess;
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
final DefaultQuery query = (DefaultQuery) cache.getQueryService().newQuery("Select * from /" + name);
try {
query.execute();
LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Query executed successfully with ForceReattemptException on local and remote both.");
} catch (Exception ex) {
gotException = true;
Assert.fail("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Test received Exception", ex);
}
}
});
LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class PRQueryRemoteNodeExceptionDUnitTest method testPRWithLocalAndRemoteException.
/**
* This test <br>
* 1. Creates PR regions across with scope = DACK, 2 data-stores <br>
* 2. Creates a Local region on one of the VM's <br>
* 3. Puts in the same data both in PR region & the Local Region <br>
* 4. Queries the data both in local & PR <br>
* 5. Puts a QueryObservers in both local as well as remote data-store node, to throw some test
* exceptions. <br>
* 6. then re-executes the query on one of the data-store node. <br>
* 7. Verifies the exception thrown is from local node not from remote node <br>
*/
@Test
public void testPRWithLocalAndRemoteException() throws Exception {
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
setCacheInVMs(vm0, vm1);
List vmList = new LinkedList();
vmList.add(vm1);
vmList.add(vm0);
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
// creating a local region on one of the JVM's
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
// Generating portfolio object array to be populated across the PR's & Local
// Regions
final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
// Putting the data into the accessor node
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
// Putting the same data in the local region created
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node VM0 for result Set Comparison");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node VM0 for result Set Comparison");
// Execute query first time. This is to make sure all the buckets are
// created
// (lazy bucket creation).
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying on VM0 First time");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(name, localName));
// Insert the test hooks on local and remote node.
// Test hook on remote node will throw CacheException while Test hook on local node will throw
// QueryException.
vm1.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
class MyQueryObserver extends IndexTrackingQueryObserver {
@Override
public void startQuery(Query query) {
throw new RuntimeException("For testing purpose only from remote node");
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
}
;
});
vm0.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
boolean gotException = false;
Cache cache = PRQHelp.getCache();
class MyQueryObserver extends QueryObserverAdapter {
@Override
public void startQuery(Query query) {
throw new RuntimeException("For testing purpose only from local node");
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
final DefaultQuery query = (DefaultQuery) cache.getQueryService().newQuery("Select * from /" + name);
try {
query.execute();
} catch (Exception ex) {
gotException = true;
if (ex.getMessage().contains("local node")) {
// ex.printStackTrace();
LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from local node successfully.");
} else {
Assert.fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from local node rather received", ex);
}
}
if (!gotException) {
fail("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Test did not receive Exception as expected from local as well as remote node");
}
}
});
LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class PRQueryDUnitTest method testQueryResultsFromMembers.
@Test
public void testQueryResultsFromMembers() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore1 = host.getVM(2);
final VM datastore2 = host.getVM(3);
final int totalBuckets = 10;
final int redCop = 0;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
// Create bucket zero, one and two
int numEntries = 100;
for (int i = 1; i <= numEntries; i++) {
pr.put(new Integer(i), new Portfolio(i));
}
int[] limit = new int[] { 10, 15, 30, 0, 1, 9 };
String[] queries = new String[] { "select * from " + pr.getFullPath() + " LIMIT " + limit[0], "select * from " + pr.getFullPath() + " LIMIT " + limit[1], "select * from " + pr.getFullPath() + " LIMIT " + limit[2], "select * from " + pr.getFullPath() + " LIMIT " + limit[3], "select * from " + pr.getFullPath() + " LIMIT " + limit[4], "select * from " + pr.getFullPath() + " where ID > 10 LIMIT " + limit[5] };
try {
for (int q = 0; q < queries.length; q++) {
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery(queries[q]);
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// TODO assert this is the correct set of bucket Ids,
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int i = 0; i < totalBuckets; i++) {
buckets.add(new Integer(i));
}
final PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {
public HashMap resultsPerMember = new HashMap();
public void hook(int spot) throws RuntimeException {
int size = 0;
if (spot == 3) {
for (Object mr : qe.getResultsPerMember().entrySet()) {
Map.Entry e = (Map.Entry) mr;
Collection<Collection> results = (Collection<Collection>) e.getValue();
for (Collection<Object> r : results) {
if (this.resultsPerMember.containsKey(e.getKey())) {
this.resultsPerMember.put(e.getKey(), new Integer(r.size() + ((Integer) this.resultsPerMember.get(e.getKey())).intValue()));
} else {
this.resultsPerMember.put(e.getKey(), new Integer(r.size()));
}
}
}
}
}
}
;
final MyTestHook th = new MyTestHook();
qe.queryBuckets(th);
for (Object r : th.resultsPerMember.entrySet()) {
Map.Entry e = (Map.Entry) r;
Integer res = (Integer) e.getValue();
LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" + "Query [" + queries[q] + "] Member : " + e.getKey() + " results size :" + res.intValue());
assertEquals("Query [" + queries[q] + "]: The results returned by the member does not match the query limit size : Member : " + e.getKey(), limit[q], res.intValue());
}
}
} finally {
getCache().close();
}
}
Aggregations