use of org.apache.geode.cache.query.data.Portfolio in project geode by apache.
the class IteratorTypeDefJUnitTest method setUp.
@Before
public void setUp() throws java.lang.Exception {
CacheUtils.startCache();
Region region = CacheUtils.createRegion("portfolios", Portfolio.class);
for (int i = 0; i < 4; i++) {
region.put("" + i, new Portfolio(i));
}
CacheUtils.log(region);
}
use of org.apache.geode.cache.query.data.Portfolio in project geode by apache.
the class CompactRangeIndexJUnitTest method executeRangeQueryWithDistinct.
private int executeRangeQueryWithDistinct(int expResults) throws Exception {
String[] queries = { "181" };
int results = 0;
for (Object result : utils.executeQueries(queries)) {
if (result instanceof SelectResults) {
Collection<?> collection = ((SelectResults<?>) result).asList();
results = collection.size();
assertEquals(expResults, results);
int[] ids = {};
List expectedIds = new ArrayList(Arrays.asList(10, 9, 8, 7, 6, 5, 4, 3, 2));
for (Object e : collection) {
if (e instanceof Portfolio) {
assertTrue(expectedIds.contains(((Portfolio) e).getID()));
expectedIds.remove((Integer) ((Portfolio) e).getID());
}
}
}
}
return results;
}
use of org.apache.geode.cache.query.data.Portfolio in project geode by apache.
the class ConcurrentIndexInitOnOverflowRegionDUnitTest method testIndexUpdateWithRegionClear.
/**
* This tests if index updates are blocked while region.clear() is called and indexes are being
* reinitialized.
*/
@Test
public void testIndexUpdateWithRegionClear() {
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
final String regionName = "portfolio";
hooked = false;
// Create region and an index on it
vm0.invoke(new CacheSerializableRunnable("Create region and index") {
@Override
public void run2() throws CacheException {
Cache cache = getCache();
Region region = cache.createRegionFactory(RegionShortcut.LOCAL).create(regionName);
QueryService qService = cache.getQueryService();
try {
qService.createIndex("idIndex", "ID", "/" + regionName);
qService.createIndex("secIdIndex", "pos.secId", "/" + regionName + " p, p.positions.values pos");
} catch (Exception e) {
fail("Index creation failed." + e);
}
}
});
class LocalTestHook implements TestHook {
@Override
public void hook(int spot) throws RuntimeException {
switch(spot) {
case // processAction in IndexManager
6:
hooked = true;
// wait untill some thread unhooks.
while (hooked) {
Wait.pause(20);
}
break;
default:
break;
}
}
}
// Asynch invocation for continuous index updates
AsyncInvocation indexUpdateAsysnch = vm0.invokeAsync(new CacheSerializableRunnable("index updates") {
@Override
public void run2() throws CacheException {
Region region = getCache().getRegion(regionName);
for (int i = 0; i < 100; i++) {
if (i == 50)
IndexManager.testHook = new LocalTestHook();
region.put(i, new Portfolio(i));
if (i == 50)
Wait.pause(20);
}
}
});
// Region.clear() which should block other region updates.
vm0.invoke(new CacheSerializableRunnable("Clear the region") {
@Override
public void run2() throws CacheException {
Region region = getCache().getRegion(regionName);
while (!hooked) {
Wait.pause(100);
}
if (hooked) {
hooked = false;
IndexManager.testHook = null;
region.clear();
}
try {
QueryService qservice = getCache().getQueryService();
Index index = qservice.getIndex(region, "idIndex");
if (((CompactRangeIndex) index).getIndexStorage().size() > 1) {
fail("After clear region size is supposed to be zero as all index updates are blocked. Current region size is: " + region.size());
}
} finally {
IndexManager.testHook = null;
}
}
});
// Kill asynch thread
ThreadUtils.join(indexUpdateAsysnch, 20000);
// Verify region size which must be 50
vm0.invoke(new CacheSerializableRunnable("Check region size") {
@Override
public void run2() throws CacheException {
Region region = getCache().getRegion(regionName);
if (region.size() > 50) {
fail("After clear region size is supposed to be 50 as all index updates are blocked " + region.size());
}
}
});
}
use of org.apache.geode.cache.query.data.Portfolio in project geode by apache.
the class CopyOnReadIndexDUnitTest method helpTestPRQueryOnLocalNode.
// The tests sets up a partition region across 2 servers
// It does puts in each server, checking instance counts of portfolio objects
// Querying the data will result in deserialization of portfolio objects.
// In cases where index is present, the objects will be deserialized in the cache
public void helpTestPRQueryOnLocalNode(final String queryString, final int numPortfolios, final int numExpectedResults, final boolean hasIndex) throws Exception {
final int numPortfoliosPerVM = numPortfolios / 2;
resetInstanceCount(vm0);
resetInstanceCount(vm1);
createPartitionRegion(vm0, "portfolios");
createPartitionRegion(vm1, "portfolios");
if (hasIndex) {
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
QueryTestUtils utils = new QueryTestUtils();
utils.createIndex("idIndex", "p.ID", "/portfolios p");
return null;
}
});
}
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = 0; i < numPortfoliosPerVM; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
if (hasIndex) {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// We do not have an index, so we have not deserialized any values
Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
for (int i = numPortfoliosPerVM; i < numPortfolios; i++) {
Portfolio p = new Portfolio(i);
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + i, new Position("" + i, 20));
region.put("key " + i, p);
}
// PR indexes are created across nodes unlike Replicated Region Indexes
if (hasIndex) {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
Index index = getCache().getQueryService().getIndex(region, "idIndex");
if (index == null) {
QueryTestUtils utils = new QueryTestUtils();
index = utils.createIndex("idIndex", "p.ID", "/portfolios p");
}
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// numPortfoliosPerVM instances of Portfolio created for put operation
// We do not have an index, so we have not deserialized any values
Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
Iterator it = results.iterator();
assertEquals("Failed:" + queryString, numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
p.status = "discardStatus";
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
p.status = "discardStatus";
}
}
if (hasIndex) {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// Since we have deserialized and cached these values, we just need to add the number of
// results we did a copy of due to copy on read
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numPortfoliosPerVM + numExpectedResults), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to the query we deserialized the number of entries this vm currently hosts
// We had to deserialized the results from the other data nodes when we iterated through
// the results as well as our own
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
if (hasIndex) {
// After vm0 executed the query, we already had the values deserialized in our cache
// So it's the same total as before
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
} else {
// After vm0 executed the query, we had to deserialize the values in our vm
Wait.waitForCriterion(verifyPortfolioCount((int) ((PartitionedRegion) region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
}
return null;
}
});
vm0.invoke(new SerializableCallable() {
public Object call() throws Exception {
Region region = getCache().getRegion("/portfolios");
QueryService qs = getCache().getQueryService();
Query query = qs.newQuery(queryString);
SelectResults results = (SelectResults) query.execute();
assertEquals(numExpectedResults, results.size());
for (Object o : results) {
if (o instanceof Portfolio) {
Portfolio p = (Portfolio) o;
assertEquals("status should not have been changed", "testStatus", p.status);
} else {
Struct struct = (Struct) o;
Portfolio p = (Portfolio) struct.getFieldValues()[0];
assertEquals("status should not have been changed", "testStatus", p.status);
}
}
if (hasIndex) {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// This is the second query, because we have deserialized and cached these values, we just
// need to add the number of results a second time
Index index = getCache().getQueryService().getIndex(region, "idIndex");
Wait.waitForCriterion(verifyPortfolioCount((int) index.getStatistics().getNumberOfValues() + numExpectedResults + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
} else {
// operations we have done on this vm consist of:
// 50 instances of Portfolio created for put operation
// Due to index, we have deserialized all of the entries this vm currently host
// This is the second query, because we have deserialized and cached these values, we just
// need to add the number of results a second time
// Because we have no index, we have to again deserialize all the values that this vm is
// hosting
Wait.waitForCriterion(verifyPortfolioCount((int) (((PartitionedRegion) region).getLocalSize() + ((PartitionedRegion) region).getLocalSize() + numExpectedResults + numExpectedResults + numPortfoliosPerVM)), 5000, 200, true);
}
return null;
}
});
destroyRegion("portfolio", vm0);
}
use of org.apache.geode.cache.query.data.Portfolio in project geode by apache.
the class CopyOnReadIndexJUnitTest method createData.
/**
*
* @param region
* @param numObjects
* @param objectsAndResultsMultiplier number of similar objects to put into the cache so that
* results from queries will be satisfied by the multiple
*/
private void createData(Region region, int numObjects, int objectsAndResultsMultiplier) {
for (int i = 0; i < numObjects; i++) {
for (int j = 0; j < objectsAndResultsMultiplier; j++) {
int regionKey = i * objectsAndResultsMultiplier + j;
Portfolio p = new Portfolio(regionKey);
p.indexKey = i;
p.status = "testStatus";
p.positions = new HashMap();
p.positions.put("" + 1, new Position("" + i, i));
region.put("key-" + regionKey, p);
}
}
}
Aggregations