use of org.apache.geode.cache.query.internal.DefaultQueryService in project geode by apache.
the class CqStatsUsingPoolDUnitTest method validateCQServiceStats.
private void validateCQServiceStats(VM vm, final int created, final int activated, final int stopped, final int closed, final int cqsOnClient, final int cqsOnRegion, final int clientsWithCqs) {
vm.invoke(new CacheSerializableRunnable("Validate CQ Service Stats") {
@Override
public void run2() throws CacheException {
LogWriterUtils.getLogWriter().info("### Validating CQ Service Stats. ### ");
// Get CQ Service.
QueryService qService = null;
try {
qService = getCache().getQueryService();
} catch (Exception cqe) {
cqe.printStackTrace();
fail("Failed to getCQService.");
}
CqServiceStatistics cqServiceStats = null;
cqServiceStats = qService.getCqStatistics();
CqServiceVsdStats cqServiceVsdStats = null;
try {
cqServiceVsdStats = ((CqServiceImpl) ((DefaultQueryService) qService).getCqService()).stats();
} catch (CqException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
if (cqServiceStats == null) {
fail("Failed to get CQ Service Stats");
}
getCache().getLogger().info("#### CQ Service stats: " + " CQs created: " + cqServiceStats.numCqsCreated() + " CQs active: " + cqServiceStats.numCqsActive() + " CQs stopped: " + cqServiceStats.numCqsStopped() + " CQs closed: " + cqServiceStats.numCqsClosed() + " CQs on Client: " + cqServiceStats.numCqsOnClient() + " CQs on region /root/regionA : " + cqServiceVsdStats.numCqsOnRegion(GemFireCacheImpl.getInstance(), "/root/regionA") + " Clients with CQs: " + cqServiceVsdStats.getNumClientsWithCqs());
// Check for created count.
if (created != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("Number of CQs created mismatch", created, cqServiceStats.numCqsCreated());
}
// Check for activated count.
if (activated != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("Number of CQs activated mismatch", activated, cqServiceStats.numCqsActive());
}
// Check for stopped count.
if (stopped != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("Number of CQs stopped mismatch", stopped, cqServiceStats.numCqsStopped());
}
// Check for closed count.
if (closed != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("Number of CQs closed mismatch", closed, cqServiceStats.numCqsClosed());
}
// Check for CQs on client count.
if (cqsOnClient != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("Number of CQs on client mismatch", cqsOnClient, cqServiceStats.numCqsOnClient());
}
// Check for CQs on region.
if (cqsOnRegion != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("Number of CQs on region /root/regionA mismatch", cqsOnRegion, cqServiceVsdStats.numCqsOnRegion(GemFireCacheImpl.getInstance(), "/root/regionA"));
}
// Check for clients with CQs count.
if (clientsWithCqs != CqQueryUsingPoolDUnitTest.noTest) {
assertEquals("Clints with CQs mismatch", clientsWithCqs, cqServiceVsdStats.getNumClientsWithCqs());
}
}
});
}
use of org.apache.geode.cache.query.internal.DefaultQueryService in project geode by apache.
the class PoolImpl method getQueryService.
/**
* Returns the QueryService, that can be used to execute Query functions on the servers associated
* with this pool.
*
* @return the QueryService
*/
public QueryService getQueryService() {
Cache cache = CacheFactory.getInstance(InternalDistributedSystem.getAnyInstance());
DefaultQueryService queryService = new DefaultQueryService((InternalCache) cache);
queryService.setPool(this);
return queryService;
}
use of org.apache.geode.cache.query.internal.DefaultQueryService in project geode by apache.
the class IndexUtils method findIndex.
public static IndexData findIndex(String regionpath, String[] defintions, CompiledValue indexedExpression, String projectionAttributes, InternalCache cache, boolean usePrimaryIndex, ExecutionContext context) throws AmbiguousNameException, TypeMismatchException, NameResolutionException {
DefaultQueryService qs = (DefaultQueryService) cache.getLocalQueryService();
IndexData indxData = null;
if (usePrimaryIndex) {
if (useOnlyExactIndexs) {
indxData = qs.getIndex(regionpath, defintions, IndexType.PRIMARY_KEY, indexedExpression, context);
} else {
indxData = qs.getBestMatchIndex(regionpath, defintions, IndexType.PRIMARY_KEY, indexedExpression, context);
}
// is and equality or not equals condition
if (indxData == null) {
if (useOnlyExactIndexs) {
indxData = qs.getIndex(regionpath, defintions, IndexType.HASH, indexedExpression, context);
} else {
indxData = qs.getBestMatchIndex(regionpath, defintions, IndexType.HASH, indexedExpression, context);
}
}
}
// Index
if (indxData == null || !indxData._index.isValid()) {
if (useOnlyExactIndexs) {
indxData = qs.getIndex(regionpath, defintions, IndexType.FUNCTIONAL, indexedExpression, context);
} else {
indxData = qs.getBestMatchIndex(regionpath, defintions, IndexType.FUNCTIONAL, indexedExpression, context);
}
} else {
// if exact PRIMARY_KEY Index not found then try to find exact FUNCTIONAL Index
if (indxData._matchLevel != 0) {
IndexData functionalIndxData = qs.getIndex(regionpath, defintions, IndexType.FUNCTIONAL, /* do not use pk index */
indexedExpression, context);
// if FUNCTIONAL Index is exact match then use or else use PRIMARY_KEY Index
if (functionalIndxData != null && functionalIndxData._index.isValid()) {
indxData = functionalIndxData;
}
}
}
return indxData;
}
use of org.apache.geode.cache.query.internal.DefaultQueryService in project geode by apache.
the class LocalRegion method createOQLIndexes.
void createOQLIndexes(InternalRegionArguments internalRegionArgs, boolean recoverFromDisk) {
if (internalRegionArgs == null || internalRegionArgs.getIndexes() == null || internalRegionArgs.getIndexes().isEmpty()) {
return;
}
if (logger.isDebugEnabled()) {
logger.debug("LocalRegion.createOQLIndexes on region {}", this.getFullPath());
}
long start = getCachePerfStats().startIndexInitialization();
List oqlIndexes = internalRegionArgs.getIndexes();
if (this.indexManager == null) {
this.indexManager = IndexUtils.getIndexManager(this, true);
}
DiskRegion dr = this.getDiskRegion();
boolean isOverflowToDisk = false;
if (dr != null) {
isOverflowToDisk = dr.isOverflowEnabled();
if (recoverFromDisk && !isOverflowToDisk) {
// Refer bug #44119
// For disk regions, index creation should wait for async value creation to complete before
// it starts its iteration
// In case of disk overflow regions the waitForAsyncRecovery is done in populateOQLIndexes
// method via getBestIterator()
dr.waitForAsyncRecovery();
}
}
Set<Index> indexes = new HashSet<Index>();
Set<Index> prIndexes = new HashSet<>();
int initLevel = 0;
try {
// Release the initialization latch for index creation.
initLevel = LocalRegion.setThreadInitLevelRequirement(ANY_INIT);
for (Object o : oqlIndexes) {
IndexCreationData icd = (IndexCreationData) o;
try {
if (icd.getPartitionedIndex() != null) {
ExecutionContext externalContext = new ExecutionContext(null, this.cache);
if (internalRegionArgs.getPartitionedRegion() != null) {
externalContext.setBucketRegion(internalRegionArgs.getPartitionedRegion(), (BucketRegion) this);
}
if (logger.isDebugEnabled()) {
logger.debug("IndexManager Index creation process for {}", icd.getIndexName());
}
// load entries during initialization only for non overflow regions
indexes.add(this.indexManager.createIndex(icd.getIndexName(), icd.getIndexType(), icd.getIndexExpression(), icd.getIndexFromClause(), icd.getIndexImportString(), externalContext, icd.getPartitionedIndex(), !isOverflowToDisk));
prIndexes.add(icd.getPartitionedIndex());
} else {
if (logger.isDebugEnabled()) {
logger.debug("QueryService Index creation process for {}" + icd.getIndexName());
}
DefaultQueryService qs = (DefaultQueryService) getGemFireCache().getLocalQueryService();
String fromClause = icd.getIndexType() == IndexType.FUNCTIONAL || icd.getIndexType() == IndexType.HASH ? icd.getIndexFromClause() : this.getFullPath();
// load entries during initialization only for non overflow regions
indexes.add(qs.createIndex(icd.getIndexName(), icd.getIndexType(), icd.getIndexExpression(), fromClause, icd.getIndexImportString(), !isOverflowToDisk));
}
} catch (Exception ex) {
logger.info("Failed to create index {} on region {} with exception: {}", icd.getIndexName(), this.getFullPath(), ex);
// exception.
if (internalRegionArgs.getDeclarativeIndexCreation()) {
throw new InternalGemFireError(LocalizedStrings.GemFireCache_INDEX_CREATION_EXCEPTION_1.toLocalizedString(icd.getIndexName(), this.getFullPath()), ex);
}
}
}
} finally {
// Reset the initialization lock.
LocalRegion.setThreadInitLevelRequirement(initLevel);
}
// Load data into OQL indexes in case of disk recovery and disk overflow
if (isOverflowToDisk) {
if (recoverFromDisk) {
populateOQLIndexes(indexes);
} else {
// Empty indexes are created for overflow regions but not populated at this stage
// since this is not recovery.
// Setting the populate flag to true so that the indexes can apply updates.
this.indexManager.setPopulateFlagForIndexes(indexes);
}
// due to bug #52096, the pr index populate flags were not being set
// we should revisit and clean up the index creation code paths
this.indexManager.setPopulateFlagForIndexes(prIndexes);
}
getCachePerfStats().endIndexInitialization(start);
}
use of org.apache.geode.cache.query.internal.DefaultQueryService in project geode by apache.
the class CqResultSetUsingPoolDUnitTest method testCqResultsCachingWithFailOver.
/**
* Tests CQ Result Caching with CQ Failover.
*
* @throws Exception
*/
// GEODE-1251
@Category(FlakyTest.class)
@Test
public void testCqResultsCachingWithFailOver() throws Exception {
final Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
VM client = host.getVM(2);
cqDUnitTest.createServer(server1);
final int port1 = server1.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server1.getHost());
final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
String poolName = "testCQFailOver";
final String cqName = "testCQFailOver_0";
cqDUnitTest.createPool(client, poolName, new String[] { host0, host0 }, new int[] { port1, ports[0] });
// create CQ.
cqDUnitTest.createCQ(client, poolName, cqName, cqDUnitTest.cqs[0]);
final int numObjects = 300;
final int totalObjects = 500;
// initialize Region.
server1.invoke(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
for (int i = 1; i <= numObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// Keep updating region (async invocation).
server1.invokeAsync(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
// Update (totalObjects - 1) entries.
for (int i = 1; i < totalObjects; i++) {
// Destroy entries.
if (i > 25 && i < 201) {
region.destroy("" + i);
continue;
}
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
// recreate destroyed entries.
for (int j = 26; j < 201; j++) {
Portfolio p = new Portfolio(j);
region.put("" + j, p);
}
// Add the last key.
Portfolio p = new Portfolio(totalObjects);
region.put("" + totalObjects, p);
}
});
// Execute CQ.
// While region operation is in progress execute CQ.
cqDUnitTest.executeCQ(client, cqName, true, null);
// Verify CQ Cache results.
server1.invoke(new CacheSerializableRunnable("Verify CQ Cache results") {
public void run2() throws CacheException {
CqService cqService = null;
try {
cqService = ((DefaultQueryService) getCache().getQueryService()).getCqService();
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
Assert.fail("Failed to get the internal CqService.", ex);
}
// Wait till all the region update is performed.
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
while (true) {
if (region.get("" + totalObjects) == null) {
try {
Thread.sleep(50);
} catch (Exception ex) {
// ignore.
}
continue;
}
break;
}
Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
for (InternalCqQuery cq : cqs) {
ServerCQImpl cqQuery = (ServerCQImpl) cq;
if (cqQuery.getName().equals(cqName)) {
int size = cqQuery.getCqResultKeysSize();
if (size != totalObjects) {
LogWriterUtils.getLogWriter().info("The number of Cached events " + size + " is not equal to the expected size " + totalObjects);
HashSet expectedKeys = new HashSet();
for (int i = 1; i < totalObjects; i++) {
expectedKeys.add("" + i);
}
Set cachedKeys = cqQuery.getCqResultKeyCache();
expectedKeys.removeAll(cachedKeys);
LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
}
assertEquals("The number of keys cached for cq " + cqName + " is wrong.", totalObjects, cqQuery.getCqResultKeysSize());
}
}
}
});
cqDUnitTest.createServer(server2, ports[0]);
final int thePort2 = server2.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
System.out.println("### Port on which server1 running : " + port1 + " Server2 running : " + thePort2);
Wait.pause(3 * 1000);
// Close server1 for CQ fail over to server2.
cqDUnitTest.closeServer(server1);
Wait.pause(3 * 1000);
// Verify CQ Cache results.
server2.invoke(new CacheSerializableRunnable("Verify CQ Cache results") {
public void run2() throws CacheException {
CqService cqService = null;
try {
cqService = ((DefaultQueryService) getCache().getQueryService()).getCqService();
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
Assert.fail("Failed to get the internal CqService.", ex);
}
// Wait till all the region update is performed.
Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
while (true) {
if (region.get("" + totalObjects) == null) {
try {
Thread.sleep(50);
} catch (Exception ex) {
// ignore.
}
continue;
}
break;
}
Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
for (InternalCqQuery cq : cqs) {
ServerCQImpl cqQuery = (ServerCQImpl) cq;
if (cqQuery.getName().equals(cqName)) {
int size = cqQuery.getCqResultKeysSize();
if (size != totalObjects) {
LogWriterUtils.getLogWriter().info("The number of Cached events " + size + " is not equal to the expected size " + totalObjects);
HashSet expectedKeys = new HashSet();
for (int i = 1; i < totalObjects; i++) {
expectedKeys.add("" + i);
}
Set cachedKeys = cqQuery.getCqResultKeyCache();
expectedKeys.removeAll(cachedKeys);
LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
}
assertEquals("The number of keys cached for cq " + cqName + " is wrong.", totalObjects, cqQuery.getCqResultKeysSize());
}
}
}
});
// Close.
cqDUnitTest.closeClient(client);
cqDUnitTest.closeServer(server2);
}
Aggregations