use of org.apache.geode.cache.CacheException in project geode by apache.
the class PdxQueryCQTestBase method resetTestObjectInstanceCount.
private void resetTestObjectInstanceCount() {
final Host host = Host.getHost(0);
for (int i = 0; i < 4; i++) {
VM vm = host.getVM(i);
vm.invoke(new CacheSerializableRunnable("Create Bridge Server") {
public void run2() throws CacheException {
TestObject.numInstance = 0;
PortfolioPdx.numInstance = 0;
PositionPdx.numInstance = 0;
PositionPdx.cnt = 0;
TestObject2.numInstance = 0;
}
});
}
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class PartitionedRegionCqQueryOptimizedExecuteDUnitTest method testCqExecuteWithoutQueryExecution.
@Test
public void testCqExecuteWithoutQueryExecution() throws Exception {
final Host host = Host.getHost(0);
final VM server = host.getVM(0);
final VM client = host.getVM(1);
final int numOfEntries = 10;
final String cqName = "testCqExecuteWithoutQueryExecution_1";
createServer(server);
// Create values.
createValues(server, regions[0], numOfEntries);
final int thePort = server.invoke(() -> PartitionedRegionCqQueryOptimizedExecuteDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server.getHost());
// Create client.
createClient(client, thePort, host0);
/* Create CQs. */
createCQ(client, cqName, cqs[0]);
cqHelper.validateCQCount(client, 1);
cqHelper.executeCQ(client, cqName, false, null);
server.invoke(new CacheSerializableRunnable("execute cq") {
public void run2() throws CacheException {
assertFalse("CqServiceImpl.EXECUTE_QUERY_DURING_INIT flag should be false ", CqServiceImpl.EXECUTE_QUERY_DURING_INIT);
int numOfQueryExecutions = (Integer) ((GemFireCacheImpl) getCache()).getCachePerfStats().getStats().get("queryExecutions");
assertEquals("Number of query executions for cq.execute should be 0 ", 0, numOfQueryExecutions);
}
});
// Create more values.
server.invoke(new CacheSerializableRunnable("Create values") {
public void run2() throws CacheException {
Region region1 = getRootRegion().getSubregion(regions[0]);
for (int i = numOfEntries + 1; i <= numOfEntries * 2; i++) {
region1.put(KEY + i, new Portfolio(i));
}
LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keySet().size());
}
});
cqHelper.waitForCreated(client, cqName, KEY + numOfEntries * 2);
cqHelper.validateCQ(client, cqName, /* resultSize: */
cqHelper.noTest, /* creates: */
numOfEntries, /* updates: */
0, /* deletes; */
0, /* queryInserts: */
numOfEntries, /* queryUpdates: */
0, /* queryDeletes: */
0, /* totalEvents: */
numOfEntries);
// Update values.
createValues(server, regions[0], 5);
createValues(server, regions[0], 10);
cqHelper.waitForUpdated(client, cqName, KEY + numOfEntries);
// validate Update events.
cqHelper.validateCQ(client, cqName, /* resultSize: */
cqHelper.noTest, /* creates: */
numOfEntries, /* updates: */
15, /* deletes; */
0, /* queryInserts: */
numOfEntries, /* queryUpdates: */
15, /* queryDeletes: */
0, /* totalEvents: */
numOfEntries + 15);
// Validate delete events.
cqHelper.deleteValues(server, regions[0], 5);
cqHelper.waitForDestroyed(client, cqName, KEY + 5);
cqHelper.validateCQ(client, cqName, /* resultSize: */
cqHelper.noTest, /* creates: */
numOfEntries, /* updates: */
15, /* deletes; */
5, /* queryInserts: */
numOfEntries, /* queryUpdates: */
15, /* queryDeletes: */
5, /* totalEvents: */
numOfEntries + 15 + 5);
cqHelper.closeClient(client);
cqHelper.closeServer(server);
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class PrCqUsingPoolDUnitTest method testCQsWithPutallsWithTx.
@Test
public void testCQsWithPutallsWithTx() throws Exception {
final Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
VM client = host.getVM(2);
// creating Bridge Server with data store. clients will connect to this
// bridge server.
createServer(server1, false, 1);
// create another server with data store.
createServer(server2, true, 1);
// create values
final int size = 100;
final int port = server1.invoke(() -> PrCqUsingPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server1.getHost());
String poolName = "testCQsWithPutallsTx";
createPool(client, poolName, new String[] { host0 }, new int[] { port });
createClient(client, new int[] { port }, host0, null);
// register cq.
String cqQueryString = "SELECT ALL * FROM /root/" + regions[0] + " p where p.ID < 101";
createCQ(client, poolName, "testCQEvents_0", cqQueryString);
cqHelper.executeCQ(client, "testCQEvents_0", false, null);
cqHelper.registerInterestListCQ(client, regions[0], size, true);
server1.invoke(new CacheSerializableRunnable("begin transaction") {
public void run2() throws CacheException {
getCache().getCacheTransactionManager().begin();
}
});
createValuesPutall(server1, regions[0], size);
server1.invoke(new CacheSerializableRunnable("commit transaction") {
public void run2() throws CacheException {
getCache().getCacheTransactionManager().commit();
}
});
for (int i = 1; i <= size; i++) {
cqHelper.waitForCreated(client, "testCQEvents_0", KEY + i);
}
// validate cq..
cqHelper.validateCQ(client, "testCQEvents_0", /* resultSize: */
CqQueryUsingPoolDUnitTest.noTest, /* creates: */
size, /* updates: */
0, /* deletes; */
0, /* queryInserts: */
size, /* queryUpdates: */
0, /* queryDeletes: */
0, /* totalEvents: */
size);
// size = 2;
// do updates
createValuesPutall(server1, regions[0], size);
for (int i = 1; i <= size; i++) {
cqHelper.waitForUpdated(client, "testCQEvents_0", KEY + i);
}
// validate cqs again.
cqHelper.validateCQ(client, "testCQEvents_0", /* resultSize: */
CqQueryUsingPoolDUnitTest.noTest, /* creates: */
size, /* updates: */
size, /* deletes; */
0, /* queryInserts: */
size, /* queryUpdates: */
size, /* queryDeletes: */
0, /* totalEvents: */
(size + size));
// destroy all the values.
int numInvalidates = size;
server1.invoke(new CacheSerializableRunnable("begin transaction") {
public void run2() throws CacheException {
getCache().getCacheTransactionManager().begin();
}
});
cqHelper.deleteValues(server1, regions[0], numInvalidates);
server1.invoke(new CacheSerializableRunnable("commit transaction") {
public void run2() throws CacheException {
getCache().getCacheTransactionManager().commit();
}
});
for (int i = 1; i <= numInvalidates; i++) {
cqHelper.waitForDestroyed(client, "testCQEvents_0", KEY + i);
}
// validate cqs after destroy.
cqHelper.validateCQ(client, "testCQEvents_0", /* resultSize: */
CqQueryUsingPoolDUnitTest.noTest, /* creates: */
size, /* updates: */
size, /* deletes; */
numInvalidates, /* queryInserts: */
size, /* queryUpdates: */
size, /* queryDeletes: */
numInvalidates, /* totalEvents: */
(size + size + numInvalidates));
cqHelper.closeClient(client);
cqHelper.closeServer(server2);
cqHelper.closeServer(server1);
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class PrCqUsingPoolDUnitTest method testEventsDuringQueryExecution.
/**
* Test for events created during the CQ query execution. When CQs are executed using
* executeWithInitialResults there may be possibility that the region changes during that time may
* not be reflected in the query result set thus making the query data and region data
* inconsistent.
*
* @throws Exception
*/
// GEODE-1181, 1253: random ports, eats exceptions (fixed some), async
@Category(FlakyTest.class)
// behavior
@Test
public void testEventsDuringQueryExecution() throws Exception {
final Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
VM client = host.getVM(2);
final String cqName = "testEventsDuringQueryExecution_0";
// Server.
createServer(server1);
createServer(server2);
final int port = server1.invoke(() -> PrCqUsingPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server1.getHost());
String poolName1 = "testEventsDuringQueryExecution";
createPool(client, poolName1, host0, port);
// create CQ.
createCQ(client, poolName1, cqName, cqs[0]);
final int numObjects = 200;
final int totalObjects = 500;
// initialize Region.
server1.invoke(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + regions[0]);
for (int i = 1; i <= numObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// Keep updating region (async invocation).
server1.invokeAsync(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + regions[0]);
for (int i = numObjects + 1; i <= totalObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// Execute CQ while update is in progress.
client.invoke(new CacheSerializableRunnable("Execute CQ") {
public void run2() throws CacheException {
QueryService cqService = getCache().getQueryService();
// Get CqQuery object.
CqQuery cq1 = cqService.getCq(cqName);
if (cq1 == null) {
fail("Failed to get CQ " + cqName);
}
SelectResults cqResults = null;
try {
cqResults = cq1.executeWithInitialResults();
} catch (Exception ex) {
throw new AssertionError("Failed to execute CQ " + cqName, ex);
}
// getLogWriter().info("initial result size = " + cqResults.size());
CqQueryTestListener cqListener = (CqQueryTestListener) cq1.getCqAttributes().getCqListener();
// Wait for the last key to arrive.
for (int i = 0; i < 4; i++) {
try {
cqListener.waitForCreated("" + totalObjects);
// Found skip from the loop.
break;
} catch (CacheException ex) {
if (i == 3) {
throw ex;
}
}
}
// Check if the events from CqListener are in order.
int oldId = 0;
for (Object cqEvent : cqListener.events.toArray()) {
int newId = new Integer(cqEvent.toString()).intValue();
if (oldId > newId) {
fail("Queued events for CQ Listener during execution with " + "Initial results is not in the order in which they are created.");
}
oldId = newId;
}
// Check if all the IDs are present as part of Select Results and CQ Events.
HashSet ids = new HashSet(cqListener.events);
for (Object o : cqResults.asList()) {
Struct s = (Struct) o;
ids.add(s.get("key"));
}
// Iterator iter = cqResults.asSet().iterator();
// while (iter.hasNext()) {
// Portfolio p = (Portfolio)iter.next();
// ids.add(p.getPk());
// //getLogWriter().info("Result set value : " + p.getPk());
// }
HashSet missingIds = new HashSet();
String key = "";
for (int i = 1; i <= totalObjects; i++) {
key = "" + i;
if (!(ids.contains(key))) {
missingIds.add(key);
}
}
if (!missingIds.isEmpty()) {
fail("Missing Keys in either ResultSet or the Cq Event list. " + " Missing keys : [size : " + missingIds.size() + "]" + missingIds + " Ids in ResultSet and CQ Events :" + ids);
}
}
});
cqHelper.closeClient(client);
cqHelper.closeServer(server2);
cqHelper.closeServer(server1);
}
use of org.apache.geode.cache.CacheException in project geode by apache.
the class PrCqUsingPoolDUnitTest method createCQ.
public void createCQ(VM vm, final String poolName, final String cqName, final String queryStr) {
vm.invoke(new CacheSerializableRunnable("Create CQ :" + cqName) {
public void run2() throws CacheException {
// pause(60 * 1000);
// getLogWriter().info("### DEBUG CREATE CQ START ####");
// pause(20 * 1000);
LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
// Get CQ Service.
QueryService cqService = null;
try {
cqService = (PoolManager.find(poolName)).getQueryService();
} catch (Exception cqe) {
cqe.printStackTrace();
Assert.fail("Failed to getCQService.", cqe);
}
// Create CQ Attributes.
CqAttributesFactory cqf = new CqAttributesFactory();
CqListener[] cqListeners = { new CqQueryTestListener(LogWriterUtils.getLogWriter()) };
((CqQueryTestListener) cqListeners[0]).cqName = cqName;
cqf.initCqListeners(cqListeners);
CqAttributes cqa = cqf.create();
// Create CQ.
try {
CqQuery cq1 = cqService.newCq(cqName, queryStr, cqa);
assertTrue("newCq() state mismatch", cq1.getState().isStopped());
LogWriterUtils.getLogWriter().info("Created a new CqQuery : " + cq1);
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("CqService is :" + cqService, ex);
throw new AssertionError("Failed to create CQ " + cqName + " . ", ex);
}
}
});
}
Aggregations