use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class PRQueryRemoteNodeExceptionDUnitTest method testCacheCloseExceptionFromLocalAndRemote2.
@Test
public void testCacheCloseExceptionFromLocalAndRemote2() throws Exception {
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
setCacheInVMs(vm0, vm1);
List vmList = new LinkedList();
vmList.add(vm1);
vmList.add(vm0);
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
// creating a local region on one of the JVM's
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
// Generating portfolio object array to be populated across the PR's & Local
// Regions
final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
// Putting the data into the accessor node
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
// Putting the same data in the local region created
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node VM0 for result Set Comparison");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node VM0 for result Set Comparison");
// Insert the test hooks on local and remote node.
// Test hook on remote node will throw CacheException while Test hook on local node will throw
// QueryException.
vm1.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
class MyQueryObserver extends IndexTrackingQueryObserver {
private int noOfAccess = 0;
@Override
public void afterIterationEvaluation(Object result) {
LogWriterUtils.getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
if (noOfAccess > 1) {
PRQHelp.getCache().getRegion(name).destroyRegion();
}
++noOfAccess;
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
}
;
});
vm0.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
boolean gotException = false;
Cache cache = PRQHelp.getCache();
class MyQueryObserver extends QueryObserverAdapter {
private int noOfAccess = 0;
@Override
public void afterIterationEvaluation(Object result) {
// Object region = ((DefaultQuery)query).getRegionsInQuery(null).iterator().next();
// getLogWriter().info("Region type:"+region);
int i = 0;
while (i <= 10) {
Region region = PRQHelp.getCache().getRegion(name);
if (region == null || region.isDestroyed()) {
break;
}
try {
Thread.sleep(10);
} catch (Exception ex) {
}
i++;
}
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
final DefaultQuery query = (DefaultQuery) cache.getQueryService().newQuery("Select * from /" + name + " p where p.ID > 0");
try {
query.execute();
} catch (Exception ex) {
gotException = true;
if (ex instanceof QueryInvocationTargetException) {
LogWriterUtils.getLogWriter().info(ex.getMessage());
LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from remote node successfully as region.destroy happened before cache.close().");
} else {
Assert.fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from local node rather received", ex);
}
}
if (!gotException) {
fail("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Test did not receive Exception as expected from local as well as remote node");
}
}
});
LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class PRQueryRemoteNodeExceptionDUnitTest method testRemoteException.
@Test
public void testRemoteException() throws Exception {
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
setCacheInVMs(vm0, vm1);
List vmList = new LinkedList();
vmList.add(vm1);
vmList.add(vm0);
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
// creating a local region on one of the JVM's
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, PortfolioData.class));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
// Generating portfolio object array to be populated across the PR's & Local
// Regions
final PortfolioData[] portfolio = createPortfolioData(cnt, cntDest);
// Putting the data into the accessor node
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
// Putting the same data in the local region created
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node VM0 for result Set Comparison");
vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName, portfolio, cnt, cntDest));
LogWriterUtils.getLogWriter().info("PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node VM0 for result Set Comparison");
vm0.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
boolean gotException = false;
Cache cache = PRQHelp.getCache();
class MyQueryObserver extends QueryObserverAdapter {
@Override
public void startQuery(Query query) {
// Replacing QueryObserver of previous test.
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
}
});
// Insert the test hooks on local and remote node.
// Test hook on remote node will throw CacheException while Test hook on local node will throw
// QueryException.
vm1.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
class MyQueryObserver extends IndexTrackingQueryObserver {
@Override
public void startQuery(Query query) {
throw new RuntimeException("For testing purpose only from remote node");
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
}
;
});
vm0.invoke(new CacheSerializableRunnable(name) {
@Override
public void run2() throws CacheException {
boolean gotException = false;
Cache cache = PRQHelp.getCache();
final DefaultQuery query = (DefaultQuery) cache.getQueryService().newQuery("Select * from /" + name);
try {
query.execute();
} catch (Exception ex) {
gotException = true;
if (ex.getMessage().contains("remote node")) {
ex.printStackTrace();
LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from remote node successfully.");
} else {
Assert.fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from remote node rather received", ex);
}
}
if (!gotException) {
fail("PRQueryRemoteNodeExceptionDUnitTest#testRemoteException: Test did not receive Exception as expected from remote node");
}
}
});
LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class PRQueryDUnitTest method testDataLossDuringQueryProcessor.
/**
* Test data loss (bucket 0) while the PRQueryEvaluator is processing the query loop
*
* @throws Exception
*/
@Test
public void testDataLossDuringQueryProcessor() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore1 = host.getVM(2);
final VM datastore2 = host.getVM(3);
final int totalBuckets = 11;
final int redCop = 0;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
@Override
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(String.class);
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(String.class);
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).setLocalMaxMemory(0).create();
attr.setPartitionAttributes(prAttr);
PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
// Create bucket zero, one and two
pr.put(new Integer(0), "zero");
pr.put(new Integer(1), "one");
pr.put(new Integer(2), "two");
class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {
public boolean done = false;
public void hook(int spot) throws RuntimeException {
if (spot == 4) {
synchronized (this) {
if (done) {
return;
}
this.done = true;
}
datastore1.invoke(disconnectVM());
datastore2.invoke(disconnectVM());
}
}
}
;
final MyTestHook th = new MyTestHook();
// add expected exception strings
final IgnoredException ex = IgnoredException.addIgnoredException("Data loss detected");
try {
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery("select distinct * from " + pr.getFullPath());
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// TODO assert this is the correct set of bucket Ids,
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int i = 0; i < 3; i++) {
buckets.add(new Integer(i));
}
PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
qe.queryBuckets(th);
assertTrue(th.done);
assertTrue(false);
} catch (QueryException expected) {
assertTrue(th.done);
} finally {
ex.remove();
getCache().close();
}
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class LocalRegion method query.
@Override
public SelectResults query(String predicate) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException {
if (predicate == null) {
throw new IllegalArgumentException("The input query predicate is null. A null predicate is not allowed.");
}
predicate = predicate.trim();
SelectResults results;
if (hasServerProxy()) {
// Trim whitespace
String queryString = constructRegionQueryString(predicate.trim());
try {
results = getServerProxy().query(queryString, null);
} catch (Exception e) {
Throwable cause = e.getCause();
if (cause == null) {
cause = e;
}
throw new QueryInvocationTargetException(e.getMessage(), cause);
}
} else {
// TODO: params size is always zero so this whole block is wasted
Object[] params = new Object[0];
QueryService qs = getGemFireCache().getLocalQueryService();
String queryStr = constructRegionQueryString(predicate.trim());
DefaultQuery query = (DefaultQuery) qs.newQuery(queryStr);
if (query.getRegionsInQuery(params).size() != 1) {
throw new QueryInvalidException("Prevent multiple region query from being executed through region.query()");
}
results = (SelectResults) query.execute(params);
}
return results;
}
use of org.apache.geode.cache.query.internal.DefaultQuery in project geode by apache.
the class DataCommandFunction method select.
@SuppressWarnings("rawtypes")
private DataCommandResult select(Object principal, String queryString) {
InternalCache cache = getCache();
AtomicInteger nestedObjectCount = new AtomicInteger(0);
if (StringUtils.isEmpty(queryString)) {
return DataCommandResult.createSelectInfoResult(null, null, -1, null, CliStrings.QUERY__MSG__QUERY_EMPTY, false);
}
QueryService qs = cache.getQueryService();
// TODO : Find out if is this optimised use. Can you have something equivalent of parsed
// queries with names where name can be retrieved to avoid parsing every-time
Query query = qs.newQuery(queryString);
DefaultQuery tracedQuery = (DefaultQuery) query;
WrappedIndexTrackingQueryObserver queryObserver = null;
String queryVerboseMsg = null;
long startTime = -1;
if (tracedQuery.isTraced()) {
startTime = NanoTimer.getTime();
queryObserver = new WrappedIndexTrackingQueryObserver();
QueryObserverHolder.setInstance(queryObserver);
}
List<SelectResultRow> list = new ArrayList<>();
try {
Object results = query.execute();
if (tracedQuery.isTraced()) {
queryVerboseMsg = getLogMessage(queryObserver, startTime, queryString);
queryObserver.reset2();
}
if (results instanceof SelectResults) {
select_SelectResults((SelectResults) results, principal, list, nestedObjectCount);
} else {
select_NonSelectResults(results, list);
}
return DataCommandResult.createSelectResult(queryString, list, queryVerboseMsg, null, null, true);
} catch (FunctionDomainException | GfJsonException | QueryInvocationTargetException | NameResolutionException | TypeMismatchException e) {
logger.warn(e.getMessage(), e);
return DataCommandResult.createSelectResult(queryString, null, queryVerboseMsg, e, e.getMessage(), false);
} finally {
if (queryObserver != null) {
QueryObserverHolder.reset();
}
}
}
Aggregations