use of org.apache.geode.internal.cache.PartitionedRegionQueryEvaluator in project geode by apache.
the class PRQueryDUnitTest method testQueryResultsFromMembers.
@Test
public void testQueryResultsFromMembers() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore1 = host.getVM(2);
final VM datastore2 = host.getVM(3);
final int totalBuckets = 10;
final int redCop = 0;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
// Create bucket zero, one and two
int numEntries = 100;
for (int i = 1; i <= numEntries; i++) {
pr.put(new Integer(i), new Portfolio(i));
}
int[] limit = new int[] { 10, 15, 30, 0, 1, 9 };
String[] queries = new String[] { "select * from " + pr.getFullPath() + " LIMIT " + limit[0], "select * from " + pr.getFullPath() + " LIMIT " + limit[1], "select * from " + pr.getFullPath() + " LIMIT " + limit[2], "select * from " + pr.getFullPath() + " LIMIT " + limit[3], "select * from " + pr.getFullPath() + " LIMIT " + limit[4], "select * from " + pr.getFullPath() + " where ID > 10 LIMIT " + limit[5] };
try {
for (int q = 0; q < queries.length; q++) {
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery(queries[q]);
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// TODO assert this is the correct set of bucket Ids,
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int i = 0; i < totalBuckets; i++) {
buckets.add(new Integer(i));
}
final PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {
public HashMap resultsPerMember = new HashMap();
public void hook(int spot) throws RuntimeException {
int size = 0;
if (spot == 3) {
for (Object mr : qe.getResultsPerMember().entrySet()) {
Map.Entry e = (Map.Entry) mr;
Collection<Collection> results = (Collection<Collection>) e.getValue();
for (Collection<Object> r : results) {
if (this.resultsPerMember.containsKey(e.getKey())) {
this.resultsPerMember.put(e.getKey(), new Integer(r.size() + ((Integer) this.resultsPerMember.get(e.getKey())).intValue()));
} else {
this.resultsPerMember.put(e.getKey(), new Integer(r.size()));
}
}
}
}
}
}
;
final MyTestHook th = new MyTestHook();
qe.queryBuckets(th);
for (Object r : th.resultsPerMember.entrySet()) {
Map.Entry e = (Map.Entry) r;
Integer res = (Integer) e.getValue();
LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" + "Query [" + queries[q] + "] Member : " + e.getKey() + " results size :" + res.intValue());
assertEquals("Query [" + queries[q] + "]: The results returned by the member does not match the query limit size : Member : " + e.getKey(), limit[q], res.intValue());
}
}
} finally {
getCache().close();
}
}
use of org.apache.geode.internal.cache.PartitionedRegionQueryEvaluator in project geode by apache.
the class PRQueryDUnitTest method testDataLossDuringQueryProcessor.
/**
* Test data loss (bucket 0) while the PRQueryEvaluator is processing the query loop
*
* @throws Exception
*/
@Test
public void testDataLossDuringQueryProcessor() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore1 = host.getVM(2);
final VM datastore2 = host.getVM(3);
final int totalBuckets = 11;
final int redCop = 0;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
@Override
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(String.class);
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(String.class);
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).setLocalMaxMemory(0).create();
attr.setPartitionAttributes(prAttr);
PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
// Create bucket zero, one and two
pr.put(new Integer(0), "zero");
pr.put(new Integer(1), "one");
pr.put(new Integer(2), "two");
class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {
public boolean done = false;
public void hook(int spot) throws RuntimeException {
if (spot == 4) {
synchronized (this) {
if (done) {
return;
}
this.done = true;
}
datastore1.invoke(disconnectVM());
datastore2.invoke(disconnectVM());
}
}
}
;
final MyTestHook th = new MyTestHook();
// add expected exception strings
final IgnoredException ex = IgnoredException.addIgnoredException("Data loss detected");
try {
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery("select distinct * from " + pr.getFullPath());
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// TODO assert this is the correct set of bucket Ids,
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int i = 0; i < 3; i++) {
buckets.add(new Integer(i));
}
PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
qe.queryBuckets(th);
assertTrue(th.done);
assertTrue(false);
} catch (QueryException expected) {
assertTrue(th.done);
} finally {
ex.remove();
getCache().close();
}
}
use of org.apache.geode.internal.cache.PartitionedRegionQueryEvaluator in project geode by apache.
the class PRQueryDUnitTest method testSimulatedDataLossBeforeQueryProcessor.
/**
* Simulate a data loss (buckets 0 and 2) before the PRQueryEvaluator begins the query loop
*
* @throws Exception
*/
@Test
public void testSimulatedDataLossBeforeQueryProcessor() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
VM accessor = host.getVM(1);
VM datastore1 = host.getVM(2);
VM datastore2 = host.getVM(3);
final int totalBuckets = 11;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
@Override
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(String.class);
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
accessor.invoke(new CacheSerializableRunnable("Create accessor PR") {
@Override
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
attr.setValueConstraint(String.class);
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(1).setTotalNumBuckets(totalBuckets).setLocalMaxMemory(0).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
});
// add expected exception strings
final IgnoredException ex = IgnoredException.addIgnoredException("Data loss detected", accessor);
accessor.invoke(new SerializableCallable("Create bucket and test dataloss query") {
public Object call() throws Exception {
PartitionedRegion pr = (PartitionedRegion) getCache().getRegion(rName);
// Create bucket one
pr.put(new Integer(1), "one");
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery("select distinct * from " + pr.getFullPath());
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// Fake data loss
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int i = 0; i < 3; i++) {
buckets.add(new Integer(i));
}
try {
PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
qe.queryBuckets(null);
assertTrue(false);
} catch (QueryException expected) {
}
// getLogWriter().info("Select results are: " + results);
return Boolean.TRUE;
}
});
ex.remove();
}
use of org.apache.geode.internal.cache.PartitionedRegionQueryEvaluator in project geode by apache.
the class PRQueryDUnitTest method testQueryResultsFromMembersWithAccessor.
@Test
public void testQueryResultsFromMembersWithAccessor() throws Exception {
final String rName = getUniqueName();
Host host = Host.getHost(0);
final VM datastore1 = host.getVM(2);
final VM datastore2 = host.getVM(3);
final int totalBuckets = 20;
final int redCop = 0;
CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {
public void run2() throws CacheException {
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
attr.setPartitionAttributes(prAttr);
getCache().createRegion(rName, attr.create());
}
};
datastore1.invoke(createPR);
datastore2.invoke(createPR);
AttributesFactory attr = new AttributesFactory();
PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).setLocalMaxMemory(0).create();
attr.setPartitionAttributes(prAttr);
PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
// Create bucket zero, one and two
int numEntries = 100;
for (int i = 1; i <= numEntries; i++) {
pr.put(new Integer(i), new Portfolio(i));
}
int[] limit = new int[] { 10, 15, 30, 0, 1, 9 };
String[] queries = new String[] { "select * from " + pr.getFullPath() + " LIMIT " + limit[0], "select * from " + pr.getFullPath() + " LIMIT " + limit[1], "select * from " + pr.getFullPath() + " LIMIT " + limit[2], "select * from " + pr.getFullPath() + " LIMIT " + limit[3], "select * from " + pr.getFullPath() + " LIMIT " + limit[4], "select * from " + pr.getFullPath() + " where ID > 10 LIMIT " + limit[5] };
try {
for (int q = 0; q < queries.length; q++) {
Object[] params = new Object[0];
final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery(queries[q]);
final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
// TODO assert this is the correct set of bucket Ids,
final HashSet<Integer> buckets = new HashSet<Integer>();
for (int b = 0; b < totalBuckets; b++) {
buckets.add(b);
}
final PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {
public HashMap resultsPerMember = new HashMap();
public void hook(int spot) throws RuntimeException {
if (spot == 3) {
for (Object mr : qe.getResultsPerMember().entrySet()) {
Map.Entry e = (Map.Entry) mr;
Collection<Collection> results = (Collection<Collection>) e.getValue();
for (Collection<Object> r : results) {
if (this.resultsPerMember.containsKey(e.getKey())) {
this.resultsPerMember.put(e.getKey(), new Integer(r.size() + ((Integer) this.resultsPerMember.get(e.getKey())).intValue()));
} else {
this.resultsPerMember.put(e.getKey(), new Integer(r.size()));
}
}
}
}
}
}
;
final MyTestHook th = new MyTestHook();
qe.queryBuckets(th);
for (Object r : th.resultsPerMember.entrySet()) {
Map.Entry e = (Map.Entry) r;
Integer res = (Integer) e.getValue();
LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" + "Query [" + queries[q] + "] Member : " + e.getKey() + " results size :" + res.intValue());
if (res.intValue() != 0 || /* accessor member */
res.intValue() != limit[q]) {
assertEquals("Query [" + queries[q] + "]: The results returned by the member does not match the query limit size : Member : " + e.getKey(), limit[q], res.intValue());
}
}
}
} finally {
getCache().close();
}
}
Aggregations