Search in sources :

Example 31 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForRRAndPRQueryAndCompareResults.

public SerializableRunnableIF getCacheSerializableRunnableForRRAndPRQueryAndCompareResults(final String name, final String coloName, final String localName, final String coloLocalName) {
    SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Querying the PR region
            String[] queries = new String[] { "r1.ID = r2.id", "r1.ID = r2.id AND r1.ID > 5", "r1.ID = r2.id AND r1.status = 'active'", // "r1.ID = r2.id LIMIT 10",
            "r1.ID = r2.id ORDER BY r1.ID", "r1.ID = r2.id ORDER BY r2.id", "r1.ID = r2.id ORDER BY r2.status", "r1.ID = r2.id AND r1.status != r2.status", "r1.ID = r2.id AND r1.status = r2.status", "r1.ID = r2.id AND r1.positions.size = r2.positions.size", "r1.ID = r2.id AND r1.positions.size > r2.positions.size", "r1.ID = r2.id AND r1.positions.size < r2.positions.size", "r1.ID = r2.id AND r1.positions.size = r2.positions.size AND r2.positions.size > 0", "r1.ID = r2.id AND (r1.positions.size > r2.positions.size OR r2.positions.size > 0)", "r1.ID = r2.id AND (r1.positions.size < r2.positions.size OR r1.positions.size > 0)" };
            Object[][] r = new Object[queries.length][2];
            Region region = null;
            region = cache.getRegion(name);
            assertNotNull(region);
            region = cache.getRegion(coloName);
            assertNotNull(region);
            region = cache.getRegion(localName);
            assertNotNull(region);
            region = cache.getRegion(coloLocalName);
            assertNotNull(region);
            final String[] expectedExceptions = new String[] { RegionDestroyedException.class.getName(), ReplyException.class.getName(), CacheClosedException.class.getName(), ForceReattemptException.class.getName(), QueryInvocationTargetException.class.getName() };
            for (final String expectedException : expectedExceptions) {
                getCache().getLogger().info("<ExpectedException action=add>" + expectedException + "</ExpectedException>");
            }
            QueryService qs = getCache().getQueryService();
            Object[] params;
            try {
                for (int j = 0; j < queries.length; j++) {
                    getCache().getLogger().info("About to execute local query: " + queries[j]);
                    Function func = new TestQueryFunction("testfunction");
                    Object funcResult = FunctionService.onRegion((getCache().getRegion(name) instanceof PartitionedRegion) ? getCache().getRegion(name) : getCache().getRegion(coloName)).setArguments("<trace> Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + name + " r1, r1.positions.values pos1, /" + coloName + " r2 where " + queries[j]).execute(func).getResult();
                    r[j][0] = ((ArrayList) funcResult).get(0);
                    getCache().getLogger().info("About to execute local query: " + queries[j]);
                    SelectResults r2 = (SelectResults) qs.newQuery("Select " + (queries[j].contains("ORDER BY") ? "DISTINCT" : "") + " * from /" + localName + " r1, r1.positions.values pos1, /" + coloLocalName + " r2 where " + queries[j]).execute();
                    r[j][1] = r2.asList();
                }
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
                StructSetOrResultsSet ssORrs = new StructSetOrResultsSet();
                ssORrs.CompareQueryResultsAsListWithoutAndWithIndexes(r, queries.length, false, false, queries);
            } catch (QueryInvocationTargetException e) {
                // cause and see whether or not it's okay
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (QueryException e) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying" + e, e);
                throw new TestException("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception", e);
            } catch (RegionDestroyedException rde) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ", rde);
            } catch (CancelException cce) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ", cce);
            } finally {
                for (final String expectedException : expectedExceptions) {
                    getCache().getLogger().info("<ExpectedException action=remove>" + expectedException + "</ExpectedException>");
                }
            }
        }
    };
    return (CacheSerializableRunnable) PrRegion;
}
Also used : StructSetOrResultsSet(org.apache.geode.cache.query.functional.StructSetOrResultsSet) TestException(util.TestException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) Function(org.apache.geode.cache.execute.Function) QueryException(org.apache.geode.cache.query.QueryException) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) CancelException(org.apache.geode.CancelException) Cache(org.apache.geode.cache.Cache)

Example 32 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class LocalRegion method clearRegionLocally.

/**
   * Common code used by both clear and localClear. On the lines of destroyRegion, this method will
   * be invoked for clearing the local cache.The cmnClearRegion will be overridden in the derived
   * class DistributedRegion too. For clear operation , no CacheWriter will be invoked . It will
   * only have afterClear callback. Also like destroyRegion & invalidateRegion , the clear operation
   * will not take distributedLock. The clear operation will also clear the local tranxnl entries .
   * The clear operation will have immediate committed state.
   */
void clearRegionLocally(RegionEventImpl regionEvent, boolean cacheWrite, RegionVersionVector vector) {
    final boolean isRvvDebugEnabled = logger.isTraceEnabled(LogMarker.RVV);
    RegionVersionVector rvv = vector;
    if (this.serverRegionProxy != null) {
        // clients and local regions do not maintain a full RVV. can't use it with clear()
        rvv = null;
    }
    if (rvv != null && this.dataPolicy.withStorage()) {
        if (isRvvDebugEnabled) {
            logger.trace(LogMarker.RVV, "waiting for my version vector to dominate{}mine={}{} other={}", getLineSeparator(), getLineSeparator(), this.versionVector.fullToString(), rvv);
        }
        boolean result = this.versionVector.waitToDominate(rvv, this);
        if (!result) {
            if (isRvvDebugEnabled) {
                logger.trace(LogMarker.RVV, "incrementing clearTimeouts for {} rvv={}", getName(), this.versionVector.fullToString());
            }
            getCachePerfStats().incClearTimeouts();
        }
    }
    // If the initial image operation is still in progress then we need will have to do the clear
    // operation at the end of the GII.For this we try to acquire the lock of GII the boolean
    // returned is true that means lock was obtained which also means that GII is still in progress.
    boolean isGIIinProgress = lockGII();
    if (isGIIinProgress) {
        // Also we should try & abort the GII
        try {
            getImageState().setClearRegionFlag(true, /* Clear region */
            rvv);
        } finally {
            unlockGII();
        }
    }
    if (cacheWrite && !isGIIinProgress) {
        this.cacheWriteBeforeRegionClear(regionEvent);
    }
    RegionVersionVector myVector = getVersionVector();
    if (myVector != null) {
        if (isRvvDebugEnabled) {
            logger.trace(LogMarker.RVV, "processing version information for {}", regionEvent);
        }
        if (!regionEvent.isOriginRemote() && !regionEvent.getOperation().isLocal()) {
            // generate a new version for the operation
            VersionTag tag = VersionTag.create(getVersionMember());
            tag.setVersionTimeStamp(cacheTimeMillis());
            tag.setRegionVersion(myVector.getNextVersionWhileLocked());
            if (isRvvDebugEnabled) {
                logger.trace(LogMarker.RVV, "generated version tag for clear: {}", tag);
            }
            regionEvent.setVersionTag(tag);
        } else {
            VersionTag tag = regionEvent.getVersionTag();
            if (tag != null) {
                if (isRvvDebugEnabled) {
                    logger.trace(LogMarker.RVV, "recording version tag for clear: {}", tag);
                }
                // clear() events always have the ID in the tag
                myVector.recordVersion(tag.getMemberID(), tag);
            }
        }
    }
    // Clear the expiration task for all the entries. It is possible that
    // after clearing it some new entries may get added before issuing clear
    // on the map , but that should be OK, as the expiration thread will
    // silently move ahead if the entry to be expired no longer existed
    this.cancelAllEntryExpiryTasks();
    if (this.entryUserAttributes != null) {
        this.entryUserAttributes.clear();
    }
    // be set to the current vector versions
    if (rvv == null && myVector != null) {
        myVector.removeOldVersions();
    }
    // clear the disk region if present
    if (this.diskRegion != null) {
        // persist current rvv and rvvgc which contained version for clear() itself
        if (this.getDataPolicy().withPersistence()) {
            // null means not to change dr.rvvTrust
            if (isRvvDebugEnabled) {
                logger.trace(LogMarker.RVV, "Clear: Saved current rvv: {}", this.diskRegion.getRegionVersionVector());
            }
            this.diskRegion.writeRVV(this, null);
            this.diskRegion.writeRVVGC(this);
        }
        // clear the entries in disk
        this.diskRegion.clear(this, rvv);
    } else // this will be done in diskRegion.clear if it is not null else it has to be
    // done here
    {
        // Now remove the tx entries for this region
        txClearRegion();
        // Now clear the map of committed entries
        Set<VersionSource> remainingIDs = clearEntries(rvv);
        if (!this.dataPolicy.withPersistence()) {
            // persistent regions do not reap IDs
            if (myVector != null) {
                myVector.removeOldMembers(remainingIDs);
            }
        }
    }
    if (!isProxy()) {
        // TODO made indexManager variable is made volatile. Is it necessary?
        if (this.indexManager != null) {
            try {
                this.indexManager.rerunIndexCreationQuery();
            } catch (QueryException qe) {
                // TODO: never throw an annonymous class (and outer-class is not serializable)
                throw new CacheRuntimeException(LocalizedStrings.LocalRegion_EXCEPTION_OCCURRED_WHILE_RE_CREATING_INDEX_DATA_ON_CLEARED_REGION.toLocalizedString(), qe) {

                    private static final long serialVersionUID = 0L;
                };
            }
        }
    }
    if (ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
        CacheObserverHolder.getInstance().afterRegionClear(regionEvent);
    }
    if (isGIIinProgress) {
        return;
    }
    regionEvent.setEventType(EnumListenerEvent.AFTER_REGION_CLEAR);
    // Issue a callback to afterClear if the region is initialized
    boolean hasListener = hasListener();
    if (hasListener) {
        dispatchListenerEvent(EnumListenerEvent.AFTER_REGION_CLEAR, regionEvent);
    }
}
Also used : QueryException(org.apache.geode.cache.query.QueryException) CacheRuntimeException(org.apache.geode.cache.CacheRuntimeException) VersionSource(org.apache.geode.internal.cache.versions.VersionSource) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) RegionVersionVector(org.apache.geode.internal.cache.versions.RegionVersionVector)

Example 33 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class LocalRegion method basicPutPart2.

protected long basicPutPart2(EntryEventImpl event, RegionEntry entry, boolean isInitialized, long lastModified, boolean clearConflict) {
    final boolean isNewKey = event.getOperation().isCreate();
    // Invoke callbacks only if we are not creating a tombstone
    final boolean invokeCallbacks = event.basicGetNewValue() != Token.TOMBSTONE;
    if (isNewKey) {
        updateStatsForCreate();
    }
    // fix for bug 31102
    final boolean lruRecentUse = event.isNetSearch() || event.isLoad();
    // the event may have a version timestamp that we need to use, so get the
    // event time to store in the entry
    long lastModifiedTime = event.getEventTime(lastModified);
    updateStatsForPut(entry, lastModifiedTime, lruRecentUse);
    if (!isProxy()) {
        if (!clearConflict && this.indexManager != null) {
            try {
                if (!entry.isInvalid()) {
                    this.indexManager.updateIndexes(entry, isNewKey ? IndexManager.ADD_ENTRY : IndexManager.UPDATE_ENTRY, isNewKey ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
                }
            } catch (QueryException e) {
                throw new IndexMaintenanceException(e);
            } finally {
                IndexManager.setIndexBufferTime(lastModifiedTime, cacheTimeMillis());
            }
        }
    }
    if (invokeCallbacks) {
        boolean doCallback = false;
        if (isInitialized) {
            // from invokePutCallbacks
            if (event.isGenerateCallbacks()) {
                doCallback = true;
            }
        } else if (this.isUsedForPartitionedRegionBucket) {
            // invokePutCallbacks in BucketRegion will be more discriminating
            doCallback = true;
        }
        if (doCallback) {
            if (event.isBulkOpInProgress() && this.isUsedForPartitionedRegionBucket) {
                if (logger.isDebugEnabled()) {
                    logger.debug("For bulk operation on bucket region, not to notify gateway sender earlier.");
                }
            } else {
                notifyGatewaySender(event.getOperation().isUpdate() ? EnumListenerEvent.AFTER_UPDATE : EnumListenerEvent.AFTER_CREATE, event);
            }
            // Notify listeners
            if (!event.isBulkOpInProgress()) {
                try {
                    entry.dispatchListenerEvents(event);
                } catch (InterruptedException ignore) {
                    Thread.currentThread().interrupt();
                    this.stopper.checkCancelInProgress(null);
                }
            }
        }
    }
    return lastModifiedTime;
}
Also used : QueryException(org.apache.geode.cache.query.QueryException) IndexMaintenanceException(org.apache.geode.cache.query.IndexMaintenanceException)

Example 34 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class PersistentPartitionedRegionDUnitTest method checkReadWriteOperationsWithOfflineMember.

private void checkReadWriteOperationsWithOfflineMember(VM vm0, final int aVM0Bucket, final int aVM1Bucket) {
    // This should work, because this bucket is still available.
    checkData(vm0, aVM0Bucket, aVM0Bucket + 1, "a");
    try {
        checkData(vm0, aVM1Bucket, aVM1Bucket + 1, null);
        fail("Should not have been able to read from missing buckets!");
    } catch (RMIException e) {
        // We expect a PartitionOfflineException
        if (!(e.getCause() instanceof PartitionOfflineException)) {
            throw e;
        }
    }
    IgnoredException expect = IgnoredException.addIgnoredException("PartitionOfflineException", vm0);
    // Try a function execution
    vm0.invoke(new SerializableRunnable("Test ways to read") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion(PR_REGION_NAME);
            try {
                FunctionService.onRegion(region).execute(new TestFunction());
                fail("Should not have been able to read from missing buckets!");
            } catch (PartitionOfflineException e) {
            // expected
            }
            // This should work, because this bucket is still available.
            FunctionService.onRegion(region).withFilter(Collections.singleton(aVM0Bucket)).execute(new TestFunction());
            // This should fail, because this bucket is offline
            try {
                FunctionService.onRegion(region).withFilter(Collections.singleton(aVM1Bucket)).execute(new TestFunction());
                fail("Should not have been able to read from missing buckets!");
            } catch (PartitionOfflineException e) {
            // expected
            }
            // This should fail, because a bucket is offline
            try {
                HashSet filter = new HashSet();
                filter.add(aVM0Bucket);
                filter.add(aVM1Bucket);
                FunctionService.onRegion(region).withFilter(filter).execute(new TestFunction());
                fail("Should not have been able to read from missing buckets!");
            } catch (PartitionOfflineException e) {
            // expected
            }
            // This should fail, because a bucket is offline
            try {
                FunctionService.onRegion(region).execute(new TestFunction());
                fail("Should not have been able to read from missing buckets!");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                cache.getQueryService().newQuery("select * from /" + PR_REGION_NAME).execute();
                fail("Should not have been able to read from missing buckets!");
            } catch (PartitionOfflineException e) {
            // expected
            } catch (QueryException e) {
                throw new RuntimeException(e);
            }
            try {
                Set keys = region.keySet();
                // iterate over all of the keys
                for (Object key : keys) {
                }
                fail("Should not have been able to iterate over keyset");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                // iterate over all of the keys
                for (Object key : region.values()) {
                }
                fail("Should not have been able to iterate over set");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                // iterate over all of the keys
                for (Object key : region.entrySet()) {
                }
                fail("Should not have been able to iterate over set");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                region.get(aVM1Bucket);
                fail("Should not have been able to get an offline key");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                region.containsKey(aVM1Bucket);
                fail("Should not have been able to get an offline key");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                region.getEntry(aVM1Bucket);
                fail("Should not have been able to get an offline key");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                region.invalidate(aVM1Bucket);
                fail("Should not have been able to get an offline key");
            } catch (PartitionOfflineException e) {
            // expected
            }
            try {
                region.destroy(aVM1Bucket);
                fail("Should not have been able to get an offline key");
            } catch (PartitionOfflineException e) {
            // expected
            }
        }
    });
    try {
        createData(vm0, aVM1Bucket, aVM1Bucket + 1, "b");
        fail("Should not have been able to write to missing buckets!");
    } catch (RMIException e) {
        // We expect to see a partition offline exception here.
        if (!(e.getCause() instanceof PartitionOfflineException)) {
            throw e;
        }
    }
    expect.remove();
}
Also used : RMIException(org.apache.geode.test.dunit.RMIException) QueryException(org.apache.geode.cache.query.QueryException) Set(java.util.Set) HashSet(java.util.HashSet) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) IgnoredException(org.apache.geode.test.dunit.IgnoredException) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) Cache(org.apache.geode.cache.Cache) HashSet(java.util.HashSet)

Example 35 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class CqServiceImpl method executeCqs.

@Override
public synchronized void executeCqs(Collection<? extends InternalCqQuery> cqs) throws CqException {
    if (cqs == null) {
        return;
    }
    String cqName = null;
    for (InternalCqQuery internalCq : cqs) {
        CqQuery cq = internalCq;
        if (!cq.isClosed() && cq.isStopped()) {
            try {
                cqName = cq.getName();
                cq.execute();
            } catch (QueryException | CqClosedException e) {
                if (logger.isDebugEnabled()) {
                    logger.debug("Failed to execute the CQ, CqName : {} Error : {}", cqName, e.getMessage());
                }
            }
        }
    }
}
Also used : QueryException(org.apache.geode.cache.query.QueryException) CqClosedException(org.apache.geode.cache.query.CqClosedException) CqQuery(org.apache.geode.cache.query.CqQuery)

Aggregations

QueryException (org.apache.geode.cache.query.QueryException)35 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)15 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)15 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)15 Region (org.apache.geode.cache.Region)14 Cache (org.apache.geode.cache.Cache)13 QueryInvocationTargetException (org.apache.geode.cache.query.QueryInvocationTargetException)13 SelectResults (org.apache.geode.cache.query.SelectResults)13 QueryService (org.apache.geode.cache.query.QueryService)12 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)12 LocalRegion (org.apache.geode.internal.cache.LocalRegion)11 CancelException (org.apache.geode.CancelException)10 TestException (util.TestException)10 StructSetOrResultsSet (org.apache.geode.cache.query.functional.StructSetOrResultsSet)8 IndexMaintenanceException (org.apache.geode.cache.query.IndexMaintenanceException)6 HashSet (java.util.HashSet)4 Iterator (java.util.Iterator)4 CacheException (org.apache.geode.cache.CacheException)4 Function (org.apache.geode.cache.execute.Function)4 ForceReattemptException (org.apache.geode.internal.cache.ForceReattemptException)4