use of org.apache.geode.cache.CacheException in project geode by apache.
the class PartitionedRegionCqQueryDUnitTest method createServer.
/**
* Create a bridge server with partitioned region.
*
* @param server VM where to create the bridge server.
* @param port bridge server port.
* @param isAccessor if true the under lying partitioned region will not host data on this vm.
* @param redundantCopies number of redundant copies for the primary bucket.
*/
public void createServer(VM server, final int port, final boolean isAccessor, final int redundantCopies) {
SerializableRunnable createServer = new CacheSerializableRunnable("Create Cache Server") {
public void run2() throws CacheException {
LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
AttributesFactory attr = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
if (isAccessor) {
paf.setLocalMaxMemory(0);
}
PartitionAttributes prAttr = paf.setTotalNumBuckets(197).setRedundantCopies(redundantCopies).create();
attr.setPartitionAttributes(prAttr);
assertFalse(getSystem().isLoner());
// 0);
for (int i = 0; i < regions.length; i++) {
Region r = createRegion(regions[i], attr.create());
LogWriterUtils.getLogWriter().info("Server created the region: " + r);
}
try {
startBridgeServer(port, true);
} catch (Exception ex) {
Assert.fail("While starting CacheServer", ex);
}
}
};
server.invoke(createServer);
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class PartitionedRegionCqQueryOptimizedExecuteDUnitTest method testCqExecuteWithoutQueryExecutionAndNoRSCaching.
@Test
public void testCqExecuteWithoutQueryExecutionAndNoRSCaching() throws Exception {
final Host host = Host.getHost(0);
final VM server = host.getVM(0);
final VM client = host.getVM(1);
final int numOfEntries = 10;
final String cqName = "testCqExecuteWithoutQueryExecution_1";
server.invoke(new CacheSerializableRunnable("execute cq") {
public void run2() throws CacheException {
CqServiceProvider.MAINTAIN_KEYS = false;
}
});
createServer(server);
// Create values.
createValues(server, regions[0], numOfEntries);
final int thePort = server.invoke(() -> PartitionedRegionCqQueryDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server.getHost());
// Create client.
createClient(client, thePort, host0);
/* Create CQs. */
createCQ(client, cqName, cqs[0]);
cqHelper.validateCQCount(client, 1);
cqHelper.executeCQ(client, cqName, false, null);
server.invoke(new CacheSerializableRunnable("execute cq") {
public void run2() throws CacheException {
assertFalse("CqServiceImpl.EXECUTE_QUERY_DURING_INIT flag should be false ", CqServiceImpl.EXECUTE_QUERY_DURING_INIT);
assertFalse(DistributionConfig.GEMFIRE_PREFIX + "cq.MAINTAIN_KEYS flag should be false ", CqServiceProvider.MAINTAIN_KEYS);
int numOfQueryExecutions = (Integer) ((GemFireCacheImpl) getCache()).getCachePerfStats().getStats().get("queryExecutions");
assertEquals("Number of query executions for cq.execute should be 0 ", 0, numOfQueryExecutions);
}
});
// Create more values.
server.invoke(new CacheSerializableRunnable("Create values") {
public void run2() throws CacheException {
Region region1 = getRootRegion().getSubregion(regions[0]);
for (int i = numOfEntries + 1; i <= numOfEntries * 2; i++) {
region1.put(KEY + i, new Portfolio(i));
}
LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keySet().size());
}
});
cqHelper.waitForCreated(client, cqName, KEY + numOfEntries * 2);
cqHelper.validateCQ(client, cqName, /* resultSize: */
cqHelper.noTest, /* creates: */
numOfEntries, /* updates: */
0, /* deletes; */
0, /* queryInserts: */
numOfEntries, /* queryUpdates: */
0, /* queryDeletes: */
0, /* totalEvents: */
numOfEntries);
// Update values.
createValues(server, regions[0], 5);
createValues(server, regions[0], 10);
cqHelper.waitForUpdated(client, cqName, KEY + numOfEntries);
// validate Update events.
cqHelper.validateCQ(client, cqName, /* resultSize: */
cqHelper.noTest, /* creates: */
numOfEntries, /* updates: */
15, /* deletes; */
0, /* queryInserts: */
numOfEntries, /* queryUpdates: */
15, /* queryDeletes: */
0, /* totalEvents: */
numOfEntries + 15);
// Validate delete events.
cqHelper.deleteValues(server, regions[0], 5);
cqHelper.waitForDestroyed(client, cqName, KEY + 5);
cqHelper.validateCQ(client, cqName, /* resultSize: */
cqHelper.noTest, /* creates: */
numOfEntries, /* updates: */
15, /* deletes; */
5, /* queryInserts: */
numOfEntries, /* queryUpdates: */
15, /* queryDeletes: */
5, /* totalEvents: */
numOfEntries + 15 + 5);
cqHelper.closeClient(client);
cqHelper.closeServer(server);
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class CqQueryUsingPoolDUnitTest method executeCQ.
/**
* Execute/register CQ as running.
*
* @param initialResults true if initialResults are requested
* @param expectedResultsSize if >= 0, validate results against this size
* @param expectedErr if not null, an error we expect
*/
public void executeCQ(VM vm, final String cqName, final boolean initialResults, final int expectedResultsSize, final String[] expectedKeys, final String expectedErr) {
vm.invoke(new CacheSerializableRunnable("Execute CQ :" + cqName) {
private void work() throws CacheException {
LogWriterUtils.getLogWriter().info("### DEBUG EXECUTE CQ START ####");
// Get CQ Service.
QueryService cqService = null;
CqQuery cq1 = null;
cqService = getCache().getQueryService();
// Get CqQuery object.
try {
cq1 = cqService.getCq(cqName);
if (cq1 == null) {
LogWriterUtils.getLogWriter().info("Failed to get CqQuery object for CQ name: " + cqName);
fail("Failed to get CQ " + cqName);
} else {
LogWriterUtils.getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
assertTrue("newCq() state mismatch", cq1.getState().isStopped());
}
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
LogWriterUtils.getLogWriter().error(ex);
Assert.fail("Failed to execute CQ " + cqName, ex);
}
if (initialResults) {
SelectResults cqResults = null;
try {
cqResults = cq1.executeWithInitialResults();
} catch (Exception ex) {
fail("Failed to execute CQ " + cqName, ex);
}
LogWriterUtils.getLogWriter().info("initial result size = " + cqResults.size());
assertTrue("executeWithInitialResults() state mismatch", cq1.getState().isRunning());
if (expectedResultsSize >= 0) {
assertEquals("Unexpected results size for CQ: " + cqName + " CQ Query :" + cq1.getQueryString(), expectedResultsSize, cqResults.size());
}
if (expectedKeys != null) {
HashSet resultKeys = new HashSet();
for (Object o : cqResults.asList()) {
Struct s = (Struct) o;
resultKeys.add(s.get("key"));
}
for (int i = 0; i < expectedKeys.length; i++) {
assertTrue("Expected key :" + expectedKeys[i] + " Not found in CqResults for CQ: " + cqName + " CQ Query :" + cq1.getQueryString() + " Keys in CqResults :" + resultKeys, resultKeys.contains(expectedKeys[i]));
}
}
} else {
try {
cq1.execute();
} catch (Exception ex) {
if (expectedErr == null) {
LogWriterUtils.getLogWriter().info("CqService is :" + cqService, ex);
}
Assert.fail("Failed to execute CQ " + cqName, ex);
}
assertTrue("execute() state mismatch", cq1.getState().isRunning());
}
}
@Override
public void run2() throws CacheException {
if (expectedErr != null) {
getCache().getLogger().info("<ExpectedException action=add>" + expectedErr + "</ExpectedException>");
}
try {
work();
} finally {
if (expectedErr != null) {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedErr + "</ExpectedException>");
}
}
}
});
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class CqResultSetUsingPoolDUnitTest method testCqResultsWithRangeIndexOnPR.
/**
* Tests CQ Result Set with Range Index.
*
* @throws Exception
*/
@Test
public void testCqResultsWithRangeIndexOnPR() throws Exception {
final Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
VM client = host.getVM(2);
cqDUnitTest.createServerWithPR(server1, 0, false, 0);
cqDUnitTest.createServerWithPR(server2, 0, false, 0);
final int port = server1.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server1.getHost());
String poolName = "testCqResults";
cqDUnitTest.createPool(client, poolName, host0, port);
// Create client.
cqDUnitTest.createClient(client, port, host0);
// Create index.
server1.invoke(new CacheSerializableRunnable("Set RangeIndex Falg") {
public void run2() throws CacheException {
IndexManager.TEST_RANGEINDEX_ONLY = true;
}
});
server2.invoke(new CacheSerializableRunnable("Set RangeIndex Falg") {
public void run2() throws CacheException {
IndexManager.TEST_RANGEINDEX_ONLY = true;
}
});
cqDUnitTest.createFunctionalIndex(server1, "IdIndex", "p.ID", "/root/regionA p");
cqDUnitTest.createFunctionalIndex(server1, "statusIndex", "p.status", "/root/regionA p");
cqDUnitTest.createFunctionalIndex(server1, "portfolioIdIndex", "p.position1.portfolioId", "/root/regionA p");
// Put 5 entries into the region.
cqDUnitTest.createValues(server1, "regionA", 5);
// Test for supported queries.
String cqQuery = "";
for (int queryCnt = 0; queryCnt < condition.length; queryCnt++) {
cqQuery = selStr + condition[queryCnt];
cqDUnitTest.createCQ(client, poolName, "testCqResultsP_" + queryCnt, cqQuery);
cqDUnitTest.executeCQ(client, "testCqResultsP_" + queryCnt, true, resultSize[queryCnt], expectedKeys[queryCnt], null);
}
// Create index.
server1.invoke(new CacheSerializableRunnable("Set RangeIndex Falg") {
public void run2() throws CacheException {
IndexManager.TEST_RANGEINDEX_ONLY = false;
}
});
server2.invoke(new CacheSerializableRunnable("Set RangeIndex Falg") {
public void run2() throws CacheException {
IndexManager.TEST_RANGEINDEX_ONLY = false;
}
});
// Close.
cqDUnitTest.closeClient(client);
cqDUnitTest.closeServer(server1);
cqDUnitTest.closeServer(server2);
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class CqResultSetUsingPoolDUnitTest method testCqResultsCachingForMultipleCQs.
/**
* Tests CQ Result Set.
*
* @throws Exception
*/
@Test
public void testCqResultsCachingForMultipleCQs() throws Exception {
final Host host = Host.getHost(0);
VM server = host.getVM(0);
VM client1 = host.getVM(1);
VM client2 = host.getVM(2);
cqDUnitTest.createServer(server);
final int port = server.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server.getHost());
String poolName = "testCqResults";
final String cqName1 = "testCqResultsP_0";
final String cqName2 = "testCqResultsP_1";
cqDUnitTest.createPool(client1, poolName, host0, port);
cqDUnitTest.createPool(client2, poolName, host0, port);
// Create client.
cqDUnitTest.createClient(client1, port, host0);
cqDUnitTest.createClient(client2, port, host0);
// create CQ.
cqDUnitTest.createCQ(client1, poolName, cqName1, cqDUnitTest.cqs[0]);
cqDUnitTest.createCQ(client2, poolName, cqName2, cqDUnitTest.cqs[0]);
final int numObjects = 300;
final int totalObjects = 500;
// initialize Region.
server.invoke(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
for (int i = 1; i <= numObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// Keep updating region (async invocation).
server.invokeAsync(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
// Update (totalObjects - 1) entries.
for (int i = 1; i < totalObjects; i++) {
// Destroy entries.
if (i > 25 && i < 201) {
region.destroy("" + i);
continue;
}
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
// recreate destroyed entries.
for (int j = 26; j < 201; j++) {
Portfolio p = new Portfolio(j);
region.put("" + j, p);
}
// Add the last key.
Portfolio p = new Portfolio(totalObjects);
region.put("" + totalObjects, p);
}
});
// Execute CQ.
// While region operation is in progress execute CQ.
cqDUnitTest.executeCQ(client1, cqName1, true, null);
cqDUnitTest.executeCQ(client2, cqName2, true, null);
// Verify CQ Cache results.
server.invoke(new CacheSerializableRunnable("Verify CQ Cache results") {
public void run2() throws CacheException {
CqService cqService = null;
try {
cqService = ((DefaultQueryService) getCache().getQueryService()).getCqService();
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
Assert.fail("Failed to get the internal CqService.", ex);
}
// Wait till all the region update is performed.
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
while (true) {
if (region.get("" + totalObjects) == null) {
try {
Thread.sleep(50);
} catch (Exception ex) {
// ignore.
}
continue;
}
break;
}
Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
for (InternalCqQuery cq : cqs) {
ServerCQImpl cqQuery = (ServerCQImpl) cq;
int size = cqQuery.getCqResultKeysSize();
if (size != totalObjects) {
LogWriterUtils.getLogWriter().info("The number of Cached events " + size + " is not equal to the expected size " + totalObjects);
HashSet expectedKeys = new HashSet();
for (int i = 1; i < totalObjects; i++) {
expectedKeys.add("" + i);
}
Set cachedKeys = cqQuery.getCqResultKeyCache();
expectedKeys.removeAll(cachedKeys);
LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
}
assertEquals("The number of keys cached for cq " + cqQuery.getName() + " is wrong.", totalObjects, cqQuery.getCqResultKeysSize());
}
}
});
// Close.
cqDUnitTest.closeClient(client1);
cqDUnitTest.closeClient(client2);
cqDUnitTest.closeServer(server);
}
Aggregations