use of org.apache.geode.cache.query.internal.cq.InternalCqQuery in project geode by apache.
the class CqResultSetUsingPoolOptimizedExecuteDUnitTest method testCqResultsCachingWithFailOver.
/**
* Tests CQ Result Caching with CQ Failover. When EXECUTE_QUERY_DURING_INIT is false and new
* server calls execute during HA the results cache is not initialized.
*
* @throws Exception
*/
@Override
@Test
public void testCqResultsCachingWithFailOver() throws Exception {
final Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
VM client = host.getVM(2);
cqDUnitTest.createServer(server1);
final int port1 = server1.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server1.getHost());
final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
String poolName = "testCQFailOver";
final String cqName = "testCQFailOver_0";
cqDUnitTest.createPool(client, poolName, new String[] { host0, host0 }, new int[] { port1, ports[0] });
// create CQ.
cqDUnitTest.createCQ(client, poolName, cqName, cqDUnitTest.cqs[0]);
final int numObjects = 300;
final int totalObjects = 500;
// initialize Region.
server1.invoke(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
for (int i = 1; i <= numObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// Keep updating region (async invocation).
server1.invokeAsync(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
// Update (totalObjects - 1) entries.
for (int i = 1; i < totalObjects; i++) {
// Destroy entries.
if (i > 25 && i < 201) {
region.destroy("" + i);
continue;
}
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
// recreate destroyed entries.
for (int j = 26; j < 201; j++) {
Portfolio p = new Portfolio(j);
region.put("" + j, p);
}
// Add the last key.
Portfolio p = new Portfolio(totalObjects);
region.put("" + totalObjects, p);
}
});
// Execute CQ.
// While region operation is in progress execute CQ.
cqDUnitTest.executeCQ(client, cqName, true, null);
// Verify CQ Cache results.
server1.invoke(new CacheSerializableRunnable("Verify CQ Cache results") {
public void run2() throws CacheException {
CqServiceImpl CqServiceImpl = null;
try {
CqServiceImpl = (org.apache.geode.cache.query.internal.cq.CqServiceImpl) ((DefaultQueryService) getCache().getQueryService()).getCqService();
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Failed to get the internal CqServiceImpl.", ex);
Assert.fail("Failed to get the internal CqServiceImpl.", ex);
}
// Wait till all the region update is performed.
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
while (true) {
if (region.get("" + totalObjects) == null) {
try {
Thread.sleep(50);
} catch (Exception ex) {
// ignore.
}
continue;
}
break;
}
Collection<? extends InternalCqQuery> cqs = CqServiceImpl.getAllCqs();
for (InternalCqQuery cq : cqs) {
ServerCQImpl cqQuery = (ServerCQImpl) cq;
if (cqQuery.getName().equals(cqName)) {
int size = cqQuery.getCqResultKeysSize();
if (size != totalObjects) {
LogWriterUtils.getLogWriter().info("The number of Cached events " + size + " is not equal to the expected size " + totalObjects);
HashSet expectedKeys = new HashSet();
for (int i = 1; i < totalObjects; i++) {
expectedKeys.add("" + i);
}
Set cachedKeys = cqQuery.getCqResultKeyCache();
expectedKeys.removeAll(cachedKeys);
LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
}
assertEquals("The number of keys cached for cq " + cqName + " is wrong.", totalObjects, cqQuery.getCqResultKeysSize());
}
}
}
});
cqDUnitTest.createServer(server2, ports[0]);
final int thePort2 = server2.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
System.out.println("### Port on which server1 running : " + port1 + " Server2 running : " + thePort2);
Wait.pause(3 * 1000);
// Close server1 for CQ fail over to server2.
cqDUnitTest.closeServer(server1);
Wait.pause(3 * 1000);
// Verify CQ Cache results.
server2.invoke(new CacheSerializableRunnable("Verify CQ Cache results") {
public void run2() throws CacheException {
CqServiceImpl CqServiceImpl = null;
try {
CqServiceImpl = (CqServiceImpl) ((DefaultQueryService) getCache().getQueryService()).getCqService();
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Failed to get the internal CqServiceImpl.", ex);
Assert.fail("Failed to get the internal CqServiceImpl.", ex);
}
// Wait till all the region update is performed.
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
while (true) {
if (region.get("" + totalObjects) == null) {
try {
Thread.sleep(50);
} catch (Exception ex) {
// ignore.
}
continue;
}
break;
}
Collection<? extends InternalCqQuery> cqs = CqServiceImpl.getAllCqs();
for (InternalCqQuery cq : cqs) {
ServerCQImpl cqQuery = (ServerCQImpl) cq;
if (cqQuery.getName().equals(cqName)) {
int size = cqQuery.getCqResultKeysSize();
assertEquals("The number of keys cached for cq " + cqName + " is wrong.", 0, size);
}
}
}
});
// Close.
cqDUnitTest.closeClient(client);
cqDUnitTest.closeServer(server2);
}
use of org.apache.geode.cache.query.internal.cq.InternalCqQuery in project geode by apache.
the class CqStatsUsingPoolDUnitTest method validateCQStats.
private void validateCQStats(VM vm, final String cqName, final int creates, final int updates, final int deletes, final int totalEvents, final int cqListenerInvocations) {
vm.invoke(new CacheSerializableRunnable("Validate CQs") {
@Override
public void run2() throws CacheException {
LogWriterUtils.getLogWriter().info("### Validating CQ Stats. ### " + cqName);
// Get CQ Service.
QueryService qService = null;
try {
qService = getCache().getQueryService();
} catch (Exception cqe) {
cqe.printStackTrace();
fail("Failed to get query service.");
}
CqService cqService = null;
try {
cqService = ((DefaultQueryService) qService).getCqService();
} catch (CqException e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail("Failed to get CqService, CQ : " + cqName);
}
Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
if (cqs.size() == 0) {
fail("Failed to get CqQuery for CQ : " + cqName);
}
CqQueryImpl cQuery = (CqQueryImpl) cqs.iterator().next();
CqStatistics cqStats = cQuery.getStatistics();
CqQueryVsdStats cqVsdStats = ((CqQueryImpl) cQuery).getVsdStats();
if (cqStats == null || cqVsdStats == null) {
fail("Failed to get CqQuery Stats for CQ : " + cqName);
}
getCache().getLogger().info("#### CQ stats for " + cQuery.getName() + ": " + " Events Total: " + cqStats.numEvents() + " Events Created: " + cqStats.numInserts() + " Events Updated: " + cqStats.numUpdates() + " Events Deleted: " + cqStats.numDeletes() + " CQ Listener invocations: " + cqVsdStats.getNumCqListenerInvocations() + " Initial results time (nano sec): " + cqVsdStats.getCqInitialResultsTime());
// Check for totalEvents count.
if (totalEvents != CqQueryUsingPoolDUnitTest.noTest) {
// Result size validation.
assertEquals("Total Event Count mismatch", totalEvents, cqStats.numEvents());
}
// Check for create count.
if (creates != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("Create Event mismatch", creates, cqStats.numInserts());
}
// Check for update count.
if (updates != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("Update Event mismatch", updates, cqStats.numUpdates());
}
// Check for delete count.
if (deletes != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("Delete Event mismatch", deletes, cqStats.numDeletes());
}
// Check for CQ listener invocations.
if (cqListenerInvocations != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("CQ Listener invocations mismatch", cqListenerInvocations, cqVsdStats.getNumCqListenerInvocations());
}
//// Check for initial results time.
// if (initialResultsTime != CqQueryUsingPoolDUnitTest.noTest &&
//// cqVsdStats.getCqInitialResultsTime() <= 0) {
// assertIndexDetailsEquals("Initial results time mismatch", initialResultsTime,
//// cqVsdStats.getCqInitialResultsTime());
// }
}
});
}
use of org.apache.geode.cache.query.internal.cq.InternalCqQuery in project geode by apache.
the class ProxyQueryService method getCqs.
public ClientCQ[] getCqs(String regionName) throws CqException {
preOp();
Collection<? extends InternalCqQuery> cqs = null;
try {
ArrayList<CqQuery> cqList = new ArrayList<CqQuery>();
cqs = ((DefaultQueryService) realQueryService).getCqService().getAllCqs(regionName);
for (InternalCqQuery cq : cqs) {
if (this.cqNames.contains(cq.getName())) {
cqList.add((CqQuery) cq);
}
}
ClientCQ[] results = new ClientCQ[cqList.size()];
cqList.toArray(results);
return results;
} catch (CqException cqe) {
if (logger.isDebugEnabled()) {
logger.debug("Unable to get Cqs. Error: {}", cqe.getMessage(), cqe);
}
throw cqe;
}
}
use of org.apache.geode.cache.query.internal.cq.InternalCqQuery in project geode by apache.
the class CacheServerBridge method getContinuousQueryList.
public String[] getContinuousQueryList() {
CqService cqService = cache.getCqService();
if (cqService != null) {
Collection<? extends InternalCqQuery> allCqs = cqService.getAllCqs();
if (allCqs != null && allCqs.size() > 0) {
String[] allCqStr = new String[allCqs.size()];
int i = 0;
for (InternalCqQuery cq : allCqs) {
allCqStr[i] = cq.getName();
i++;
}
return allCqStr;
}
}
return ManagementConstants.NO_DATA_STRING;
}
use of org.apache.geode.cache.query.internal.cq.InternalCqQuery in project geode by apache.
the class CqResultSetUsingPoolDUnitTest method testCqResultsCachingWithFailOver.
/**
* Tests CQ Result Caching with CQ Failover.
*
* @throws Exception
*/
// GEODE-1251
@Category(FlakyTest.class)
@Test
public void testCqResultsCachingWithFailOver() throws Exception {
final Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
VM client = host.getVM(2);
cqDUnitTest.createServer(server1);
final int port1 = server1.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server1.getHost());
final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
String poolName = "testCQFailOver";
final String cqName = "testCQFailOver_0";
cqDUnitTest.createPool(client, poolName, new String[] { host0, host0 }, new int[] { port1, ports[0] });
// create CQ.
cqDUnitTest.createCQ(client, poolName, cqName, cqDUnitTest.cqs[0]);
final int numObjects = 300;
final int totalObjects = 500;
// initialize Region.
server1.invoke(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
for (int i = 1; i <= numObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// Keep updating region (async invocation).
server1.invokeAsync(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
// Update (totalObjects - 1) entries.
for (int i = 1; i < totalObjects; i++) {
// Destroy entries.
if (i > 25 && i < 201) {
region.destroy("" + i);
continue;
}
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
// recreate destroyed entries.
for (int j = 26; j < 201; j++) {
Portfolio p = new Portfolio(j);
region.put("" + j, p);
}
// Add the last key.
Portfolio p = new Portfolio(totalObjects);
region.put("" + totalObjects, p);
}
});
// Execute CQ.
// While region operation is in progress execute CQ.
cqDUnitTest.executeCQ(client, cqName, true, null);
// Verify CQ Cache results.
server1.invoke(new CacheSerializableRunnable("Verify CQ Cache results") {
public void run2() throws CacheException {
CqService cqService = null;
try {
cqService = ((DefaultQueryService) getCache().getQueryService()).getCqService();
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
Assert.fail("Failed to get the internal CqService.", ex);
}
// Wait till all the region update is performed.
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
while (true) {
if (region.get("" + totalObjects) == null) {
try {
Thread.sleep(50);
} catch (Exception ex) {
// ignore.
}
continue;
}
break;
}
Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
for (InternalCqQuery cq : cqs) {
ServerCQImpl cqQuery = (ServerCQImpl) cq;
if (cqQuery.getName().equals(cqName)) {
int size = cqQuery.getCqResultKeysSize();
if (size != totalObjects) {
LogWriterUtils.getLogWriter().info("The number of Cached events " + size + " is not equal to the expected size " + totalObjects);
HashSet expectedKeys = new HashSet();
for (int i = 1; i < totalObjects; i++) {
expectedKeys.add("" + i);
}
Set cachedKeys = cqQuery.getCqResultKeyCache();
expectedKeys.removeAll(cachedKeys);
LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
}
assertEquals("The number of keys cached for cq " + cqName + " is wrong.", totalObjects, cqQuery.getCqResultKeysSize());
}
}
}
});
cqDUnitTest.createServer(server2, ports[0]);
final int thePort2 = server2.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
System.out.println("### Port on which server1 running : " + port1 + " Server2 running : " + thePort2);
Wait.pause(3 * 1000);
// Close server1 for CQ fail over to server2.
cqDUnitTest.closeServer(server1);
Wait.pause(3 * 1000);
// Verify CQ Cache results.
server2.invoke(new CacheSerializableRunnable("Verify CQ Cache results") {
public void run2() throws CacheException {
CqService cqService = null;
try {
cqService = ((DefaultQueryService) getCache().getQueryService()).getCqService();
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
Assert.fail("Failed to get the internal CqService.", ex);
}
// Wait till all the region update is performed.
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
while (true) {
if (region.get("" + totalObjects) == null) {
try {
Thread.sleep(50);
} catch (Exception ex) {
// ignore.
}
continue;
}
break;
}
Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
for (InternalCqQuery cq : cqs) {
ServerCQImpl cqQuery = (ServerCQImpl) cq;
if (cqQuery.getName().equals(cqName)) {
int size = cqQuery.getCqResultKeysSize();
if (size != totalObjects) {
LogWriterUtils.getLogWriter().info("The number of Cached events " + size + " is not equal to the expected size " + totalObjects);
HashSet expectedKeys = new HashSet();
for (int i = 1; i < totalObjects; i++) {
expectedKeys.add("" + i);
}
Set cachedKeys = cqQuery.getCqResultKeyCache();
expectedKeys.removeAll(cachedKeys);
LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
}
assertEquals("The number of keys cached for cq " + cqName + " is wrong.", totalObjects, cqQuery.getCqResultKeysSize());
}
}
}
});
// Close.
cqDUnitTest.closeClient(client);
cqDUnitTest.closeServer(server2);
}
Aggregations