Search in sources :

Example 11 with DefaultQueryService

use of org.apache.geode.cache.query.internal.DefaultQueryService in project geode by apache.

the class CqStatsUsingPoolDUnitTest method validateCQServiceStats.

private void validateCQServiceStats(VM vm, final int created, final int activated, final int stopped, final int closed, final int cqsOnClient, final int cqsOnRegion, final int clientsWithCqs) {
    vm.invoke(new CacheSerializableRunnable("Validate CQ Service Stats") {

        @Override
        public void run2() throws CacheException {
            LogWriterUtils.getLogWriter().info("### Validating CQ Service Stats. ### ");
            // Get CQ Service.
            QueryService qService = null;
            try {
                qService = getCache().getQueryService();
            } catch (Exception cqe) {
                cqe.printStackTrace();
                fail("Failed to getCQService.");
            }
            CqServiceStatistics cqServiceStats = null;
            cqServiceStats = qService.getCqStatistics();
            CqServiceVsdStats cqServiceVsdStats = null;
            try {
                cqServiceVsdStats = ((CqServiceImpl) ((DefaultQueryService) qService).getCqService()).stats();
            } catch (CqException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
            if (cqServiceStats == null) {
                fail("Failed to get CQ Service Stats");
            }
            getCache().getLogger().info("#### CQ Service stats: " + " CQs created: " + cqServiceStats.numCqsCreated() + " CQs active: " + cqServiceStats.numCqsActive() + " CQs stopped: " + cqServiceStats.numCqsStopped() + " CQs closed: " + cqServiceStats.numCqsClosed() + " CQs on Client: " + cqServiceStats.numCqsOnClient() + " CQs on region /root/regionA : " + cqServiceVsdStats.numCqsOnRegion(GemFireCacheImpl.getInstance(), "/root/regionA") + " Clients with CQs: " + cqServiceVsdStats.getNumClientsWithCqs());
            // Check for created count.
            if (created != CqQueryUsingPoolDUnitTest.noTest) {
                assertEquals("Number of CQs created mismatch", created, cqServiceStats.numCqsCreated());
            }
            // Check for activated count.
            if (activated != CqQueryUsingPoolDUnitTest.noTest) {
                assertEquals("Number of CQs activated mismatch", activated, cqServiceStats.numCqsActive());
            }
            // Check for stopped count.
            if (stopped != CqQueryUsingPoolDUnitTest.noTest) {
                assertEquals("Number of CQs stopped mismatch", stopped, cqServiceStats.numCqsStopped());
            }
            // Check for closed count.
            if (closed != CqQueryUsingPoolDUnitTest.noTest) {
                assertEquals("Number of CQs closed mismatch", closed, cqServiceStats.numCqsClosed());
            }
            // Check for CQs on client count.
            if (cqsOnClient != CqQueryUsingPoolDUnitTest.noTest) {
                assertEquals("Number of CQs on client mismatch", cqsOnClient, cqServiceStats.numCqsOnClient());
            }
            // Check for CQs on region.
            if (cqsOnRegion != CqQueryUsingPoolDUnitTest.noTest) {
                assertEquals("Number of CQs on region /root/regionA mismatch", cqsOnRegion, cqServiceVsdStats.numCqsOnRegion(GemFireCacheImpl.getInstance(), "/root/regionA"));
            }
            // Check for clients with CQs count.
            if (clientsWithCqs != CqQueryUsingPoolDUnitTest.noTest) {
                assertEquals("Clints with CQs mismatch", clientsWithCqs, cqServiceVsdStats.getNumClientsWithCqs());
            }
        }
    });
}
Also used : CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) CacheException(org.apache.geode.cache.CacheException) DefaultQueryService(org.apache.geode.cache.query.internal.DefaultQueryService) QueryService(org.apache.geode.cache.query.QueryService) CqServiceVsdStats(org.apache.geode.cache.query.internal.cq.CqServiceVsdStats) CqException(org.apache.geode.cache.query.CqException) CqException(org.apache.geode.cache.query.CqException) CacheException(org.apache.geode.cache.CacheException) CqServiceStatistics(org.apache.geode.cache.query.CqServiceStatistics) CqServiceImpl(org.apache.geode.cache.query.internal.cq.CqServiceImpl)

Example 12 with DefaultQueryService

use of org.apache.geode.cache.query.internal.DefaultQueryService in project geode by apache.

the class PoolImpl method getQueryService.

/**
   * Returns the QueryService, that can be used to execute Query functions on the servers associated
   * with this pool.
   * 
   * @return the QueryService
   */
public QueryService getQueryService() {
    Cache cache = CacheFactory.getInstance(InternalDistributedSystem.getAnyInstance());
    DefaultQueryService queryService = new DefaultQueryService((InternalCache) cache);
    queryService.setPool(this);
    return queryService;
}
Also used : DefaultQueryService(org.apache.geode.cache.query.internal.DefaultQueryService) Cache(org.apache.geode.cache.Cache) InternalCache(org.apache.geode.internal.cache.InternalCache)

Example 13 with DefaultQueryService

use of org.apache.geode.cache.query.internal.DefaultQueryService in project geode by apache.

the class IndexUtils method findIndex.

public static IndexData findIndex(String regionpath, String[] defintions, CompiledValue indexedExpression, String projectionAttributes, InternalCache cache, boolean usePrimaryIndex, ExecutionContext context) throws AmbiguousNameException, TypeMismatchException, NameResolutionException {
    DefaultQueryService qs = (DefaultQueryService) cache.getLocalQueryService();
    IndexData indxData = null;
    if (usePrimaryIndex) {
        if (useOnlyExactIndexs) {
            indxData = qs.getIndex(regionpath, defintions, IndexType.PRIMARY_KEY, indexedExpression, context);
        } else {
            indxData = qs.getBestMatchIndex(regionpath, defintions, IndexType.PRIMARY_KEY, indexedExpression, context);
        }
        // is and equality or not equals condition
        if (indxData == null) {
            if (useOnlyExactIndexs) {
                indxData = qs.getIndex(regionpath, defintions, IndexType.HASH, indexedExpression, context);
            } else {
                indxData = qs.getBestMatchIndex(regionpath, defintions, IndexType.HASH, indexedExpression, context);
            }
        }
    }
    // Index
    if (indxData == null || !indxData._index.isValid()) {
        if (useOnlyExactIndexs) {
            indxData = qs.getIndex(regionpath, defintions, IndexType.FUNCTIONAL, indexedExpression, context);
        } else {
            indxData = qs.getBestMatchIndex(regionpath, defintions, IndexType.FUNCTIONAL, indexedExpression, context);
        }
    } else {
        // if exact PRIMARY_KEY Index not found then try to find exact FUNCTIONAL Index
        if (indxData._matchLevel != 0) {
            IndexData functionalIndxData = qs.getIndex(regionpath, defintions, IndexType.FUNCTIONAL, /* do not use pk index */
            indexedExpression, context);
            // if FUNCTIONAL Index is exact match then use or else use PRIMARY_KEY Index
            if (functionalIndxData != null && functionalIndxData._index.isValid()) {
                indxData = functionalIndxData;
            }
        }
    }
    return indxData;
}
Also used : DefaultQueryService(org.apache.geode.cache.query.internal.DefaultQueryService)

Example 14 with DefaultQueryService

use of org.apache.geode.cache.query.internal.DefaultQueryService in project geode by apache.

the class LocalRegion method createOQLIndexes.

void createOQLIndexes(InternalRegionArguments internalRegionArgs, boolean recoverFromDisk) {
    if (internalRegionArgs == null || internalRegionArgs.getIndexes() == null || internalRegionArgs.getIndexes().isEmpty()) {
        return;
    }
    if (logger.isDebugEnabled()) {
        logger.debug("LocalRegion.createOQLIndexes on region {}", this.getFullPath());
    }
    long start = getCachePerfStats().startIndexInitialization();
    List oqlIndexes = internalRegionArgs.getIndexes();
    if (this.indexManager == null) {
        this.indexManager = IndexUtils.getIndexManager(this, true);
    }
    DiskRegion dr = this.getDiskRegion();
    boolean isOverflowToDisk = false;
    if (dr != null) {
        isOverflowToDisk = dr.isOverflowEnabled();
        if (recoverFromDisk && !isOverflowToDisk) {
            // Refer bug #44119
            // For disk regions, index creation should wait for async value creation to complete before
            // it starts its iteration
            // In case of disk overflow regions the waitForAsyncRecovery is done in populateOQLIndexes
            // method via getBestIterator()
            dr.waitForAsyncRecovery();
        }
    }
    Set<Index> indexes = new HashSet<Index>();
    Set<Index> prIndexes = new HashSet<>();
    int initLevel = 0;
    try {
        // Release the initialization latch for index creation.
        initLevel = LocalRegion.setThreadInitLevelRequirement(ANY_INIT);
        for (Object o : oqlIndexes) {
            IndexCreationData icd = (IndexCreationData) o;
            try {
                if (icd.getPartitionedIndex() != null) {
                    ExecutionContext externalContext = new ExecutionContext(null, this.cache);
                    if (internalRegionArgs.getPartitionedRegion() != null) {
                        externalContext.setBucketRegion(internalRegionArgs.getPartitionedRegion(), (BucketRegion) this);
                    }
                    if (logger.isDebugEnabled()) {
                        logger.debug("IndexManager Index creation process for {}", icd.getIndexName());
                    }
                    // load entries during initialization only for non overflow regions
                    indexes.add(this.indexManager.createIndex(icd.getIndexName(), icd.getIndexType(), icd.getIndexExpression(), icd.getIndexFromClause(), icd.getIndexImportString(), externalContext, icd.getPartitionedIndex(), !isOverflowToDisk));
                    prIndexes.add(icd.getPartitionedIndex());
                } else {
                    if (logger.isDebugEnabled()) {
                        logger.debug("QueryService Index creation process for {}" + icd.getIndexName());
                    }
                    DefaultQueryService qs = (DefaultQueryService) getGemFireCache().getLocalQueryService();
                    String fromClause = icd.getIndexType() == IndexType.FUNCTIONAL || icd.getIndexType() == IndexType.HASH ? icd.getIndexFromClause() : this.getFullPath();
                    // load entries during initialization only for non overflow regions
                    indexes.add(qs.createIndex(icd.getIndexName(), icd.getIndexType(), icd.getIndexExpression(), fromClause, icd.getIndexImportString(), !isOverflowToDisk));
                }
            } catch (Exception ex) {
                logger.info("Failed to create index {} on region {} with exception: {}", icd.getIndexName(), this.getFullPath(), ex);
                // exception.
                if (internalRegionArgs.getDeclarativeIndexCreation()) {
                    throw new InternalGemFireError(LocalizedStrings.GemFireCache_INDEX_CREATION_EXCEPTION_1.toLocalizedString(icd.getIndexName(), this.getFullPath()), ex);
                }
            }
        }
    } finally {
        // Reset the initialization lock.
        LocalRegion.setThreadInitLevelRequirement(initLevel);
    }
    // Load data into OQL indexes in case of disk recovery and disk overflow
    if (isOverflowToDisk) {
        if (recoverFromDisk) {
            populateOQLIndexes(indexes);
        } else {
            // Empty indexes are created for overflow regions but not populated at this stage
            // since this is not recovery.
            // Setting the populate flag to true so that the indexes can apply updates.
            this.indexManager.setPopulateFlagForIndexes(indexes);
        }
        // due to bug #52096, the pr index populate flags were not being set
        // we should revisit and clean up the index creation code paths
        this.indexManager.setPopulateFlagForIndexes(prIndexes);
    }
    getCachePerfStats().endIndexInitialization(start);
}
Also used : Index(org.apache.geode.cache.query.Index) Endpoint(org.apache.geode.cache.client.internal.Endpoint) TimeoutException(org.apache.geode.cache.TimeoutException) NameResolutionException(org.apache.geode.cache.query.NameResolutionException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) InternalGemFireException(org.apache.geode.InternalGemFireException) ConflictingPersistentDataException(org.apache.geode.cache.persistence.ConflictingPersistentDataException) CacheRuntimeException(org.apache.geode.cache.CacheRuntimeException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) EntryDestroyedException(org.apache.geode.cache.EntryDestroyedException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) ExecutionException(java.util.concurrent.ExecutionException) TypeMismatchException(org.apache.geode.cache.query.TypeMismatchException) FunctionDomainException(org.apache.geode.cache.query.FunctionDomainException) EntryExistsException(org.apache.geode.cache.EntryExistsException) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) StatisticsDisabledException(org.apache.geode.cache.StatisticsDisabledException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) FailedSynchronizationException(org.apache.geode.cache.FailedSynchronizationException) NoSuchElementException(java.util.NoSuchElementException) QueryException(org.apache.geode.cache.query.QueryException) RedundancyAlreadyMetException(org.apache.geode.internal.cache.partitioned.RedundancyAlreadyMetException) QueryInvalidException(org.apache.geode.cache.query.QueryInvalidException) LowMemoryException(org.apache.geode.cache.LowMemoryException) ServerOperationException(org.apache.geode.cache.client.ServerOperationException) SystemException(javax.transaction.SystemException) SubscriptionNotEnabledException(org.apache.geode.cache.client.SubscriptionNotEnabledException) RegionExistsException(org.apache.geode.cache.RegionExistsException) RegionReinitializedException(org.apache.geode.cache.RegionReinitializedException) CancelException(org.apache.geode.CancelException) DiskAccessException(org.apache.geode.cache.DiskAccessException) CacheWriterException(org.apache.geode.cache.CacheWriterException) IndexMaintenanceException(org.apache.geode.cache.query.IndexMaintenanceException) TransactionException(org.apache.geode.cache.TransactionException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) CacheClosedException(org.apache.geode.cache.CacheClosedException) RollbackException(javax.transaction.RollbackException) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) MultiIndexCreationException(org.apache.geode.cache.query.MultiIndexCreationException) DeltaSerializationException(org.apache.geode.DeltaSerializationException) IndexCreationData(org.apache.geode.cache.query.internal.index.IndexCreationData) ExecutionContext(org.apache.geode.cache.query.internal.ExecutionContext) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) ArrayList(java.util.ArrayList) List(java.util.List) StoredObject(org.apache.geode.internal.offheap.StoredObject) DefaultQueryService(org.apache.geode.cache.query.internal.DefaultQueryService) HashSet(java.util.HashSet) InternalGemFireError(org.apache.geode.InternalGemFireError)

Example 15 with DefaultQueryService

use of org.apache.geode.cache.query.internal.DefaultQueryService in project geode by apache.

the class CqResultSetUsingPoolDUnitTest method testCqResultsCachingWithFailOver.

/**
   * Tests CQ Result Caching with CQ Failover.
   * 
   * @throws Exception
   */
// GEODE-1251
@Category(FlakyTest.class)
@Test
public void testCqResultsCachingWithFailOver() throws Exception {
    final Host host = Host.getHost(0);
    VM server1 = host.getVM(0);
    VM server2 = host.getVM(1);
    VM client = host.getVM(2);
    cqDUnitTest.createServer(server1);
    final int port1 = server1.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
    String poolName = "testCQFailOver";
    final String cqName = "testCQFailOver_0";
    cqDUnitTest.createPool(client, poolName, new String[] { host0, host0 }, new int[] { port1, ports[0] });
    // create CQ.
    cqDUnitTest.createCQ(client, poolName, cqName, cqDUnitTest.cqs[0]);
    final int numObjects = 300;
    final int totalObjects = 500;
    // initialize Region.
    server1.invoke(new CacheSerializableRunnable("Update Region") {

        public void run2() throws CacheException {
            Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
            for (int i = 1; i <= numObjects; i++) {
                Portfolio p = new Portfolio(i);
                region.put("" + i, p);
            }
        }
    });
    // Keep updating region (async invocation).
    server1.invokeAsync(new CacheSerializableRunnable("Update Region") {

        public void run2() throws CacheException {
            Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
            // Update (totalObjects - 1) entries.
            for (int i = 1; i < totalObjects; i++) {
                // Destroy entries.
                if (i > 25 && i < 201) {
                    region.destroy("" + i);
                    continue;
                }
                Portfolio p = new Portfolio(i);
                region.put("" + i, p);
            }
            // recreate destroyed entries.
            for (int j = 26; j < 201; j++) {
                Portfolio p = new Portfolio(j);
                region.put("" + j, p);
            }
            // Add the last key.
            Portfolio p = new Portfolio(totalObjects);
            region.put("" + totalObjects, p);
        }
    });
    // Execute CQ.
    // While region operation is in progress execute CQ.
    cqDUnitTest.executeCQ(client, cqName, true, null);
    // Verify CQ Cache results.
    server1.invoke(new CacheSerializableRunnable("Verify CQ Cache results") {

        public void run2() throws CacheException {
            CqService cqService = null;
            try {
                cqService = ((DefaultQueryService) getCache().getQueryService()).getCqService();
            } catch (Exception ex) {
                LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
                Assert.fail("Failed to get the internal CqService.", ex);
            }
            // Wait till all the region update is performed.
            Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
            while (true) {
                if (region.get("" + totalObjects) == null) {
                    try {
                        Thread.sleep(50);
                    } catch (Exception ex) {
                    // ignore.
                    }
                    continue;
                }
                break;
            }
            Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
            for (InternalCqQuery cq : cqs) {
                ServerCQImpl cqQuery = (ServerCQImpl) cq;
                if (cqQuery.getName().equals(cqName)) {
                    int size = cqQuery.getCqResultKeysSize();
                    if (size != totalObjects) {
                        LogWriterUtils.getLogWriter().info("The number of Cached events " + size + " is not equal to the expected size " + totalObjects);
                        HashSet expectedKeys = new HashSet();
                        for (int i = 1; i < totalObjects; i++) {
                            expectedKeys.add("" + i);
                        }
                        Set cachedKeys = cqQuery.getCqResultKeyCache();
                        expectedKeys.removeAll(cachedKeys);
                        LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
                    }
                    assertEquals("The number of keys cached for cq " + cqName + " is wrong.", totalObjects, cqQuery.getCqResultKeysSize());
                }
            }
        }
    });
    cqDUnitTest.createServer(server2, ports[0]);
    final int thePort2 = server2.invoke(() -> CqQueryUsingPoolDUnitTest.getCacheServerPort());
    System.out.println("### Port on which server1 running : " + port1 + " Server2 running : " + thePort2);
    Wait.pause(3 * 1000);
    // Close server1 for CQ fail over to server2.
    cqDUnitTest.closeServer(server1);
    Wait.pause(3 * 1000);
    // Verify CQ Cache results.
    server2.invoke(new CacheSerializableRunnable("Verify CQ Cache results") {

        public void run2() throws CacheException {
            CqService cqService = null;
            try {
                cqService = ((DefaultQueryService) getCache().getQueryService()).getCqService();
            } catch (Exception ex) {
                LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
                Assert.fail("Failed to get the internal CqService.", ex);
            }
            // Wait till all the region update is performed.
            Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
            while (true) {
                if (region.get("" + totalObjects) == null) {
                    try {
                        Thread.sleep(50);
                    } catch (Exception ex) {
                    // ignore.
                    }
                    continue;
                }
                break;
            }
            Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
            for (InternalCqQuery cq : cqs) {
                ServerCQImpl cqQuery = (ServerCQImpl) cq;
                if (cqQuery.getName().equals(cqName)) {
                    int size = cqQuery.getCqResultKeysSize();
                    if (size != totalObjects) {
                        LogWriterUtils.getLogWriter().info("The number of Cached events " + size + " is not equal to the expected size " + totalObjects);
                        HashSet expectedKeys = new HashSet();
                        for (int i = 1; i < totalObjects; i++) {
                            expectedKeys.add("" + i);
                        }
                        Set cachedKeys = cqQuery.getCqResultKeyCache();
                        expectedKeys.removeAll(cachedKeys);
                        LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
                    }
                    assertEquals("The number of keys cached for cq " + cqName + " is wrong.", totalObjects, cqQuery.getCqResultKeysSize());
                }
            }
        }
    });
    // Close.
    cqDUnitTest.closeClient(client);
    cqDUnitTest.closeServer(server2);
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) CacheException(org.apache.geode.cache.CacheException) Portfolio(org.apache.geode.cache.query.data.Portfolio) Host(org.apache.geode.test.dunit.Host) CqService(org.apache.geode.cache.query.internal.cq.CqService) ServerCQImpl(org.apache.geode.cache.query.internal.cq.ServerCQImpl) CacheException(org.apache.geode.cache.CacheException) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) Collection(java.util.Collection) InternalCqQuery(org.apache.geode.cache.query.internal.cq.InternalCqQuery) DefaultQueryService(org.apache.geode.cache.query.internal.DefaultQueryService) HashSet(java.util.HashSet) Category(org.junit.experimental.categories.Category) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Aggregations

DefaultQueryService (org.apache.geode.cache.query.internal.DefaultQueryService)22 CacheException (org.apache.geode.cache.CacheException)15 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)14 CqService (org.apache.geode.cache.query.internal.cq.CqService)11 Collection (java.util.Collection)10 InternalCqQuery (org.apache.geode.cache.query.internal.cq.InternalCqQuery)9 Test (org.junit.Test)9 CqException (org.apache.geode.cache.query.CqException)8 ServerCQImpl (org.apache.geode.cache.query.internal.cq.ServerCQImpl)8 Host (org.apache.geode.test.dunit.Host)8 VM (org.apache.geode.test.dunit.VM)8 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)8 IOException (java.io.IOException)7 Portfolio (org.apache.geode.cache.query.data.Portfolio)6 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)6 Set (java.util.Set)5 Region (org.apache.geode.cache.Region)5 QueryService (org.apache.geode.cache.query.QueryService)5 HashSet (java.util.HashSet)4 ArrayList (java.util.ArrayList)3