Search in sources :

Example 6 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class PRQueryDUnitHelper method getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults.

/**
   * This function <br>
   * 1. Creates & executes a query with Constants on the given PR Region <br>
   * 2. Executes the same query on the local region <br>
   * 3. Compares the appropriate resultSet <br>
   */
public CacheSerializableRunnable getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults(final String regionName, final String localRegion) {
    SerializableRunnable PrRegion = new CacheSerializableRunnable("PRQuery") {

        @Override
        public void run2() throws CacheException {
            Cache cache = getCache();
            // Querying the localRegion and the PR region
            String[] query = { "TRUE", "FALSE", "UNDEFINED", "NULL" };
            Object[][] r = new Object[query.length][2];
            Region local = cache.getRegion(localRegion);
            Region region = cache.getRegion(regionName);
            try {
                for (int j = 0; j < query.length; j++) {
                    r[j][0] = local.query(query[j]);
                    r[j][1] = region.query(query[j]);
                }
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults: Queries Executed successfully on Local region & PR Region");
                compareTwoQueryResults(r, query.length);
            } catch (QueryException e) {
                org.apache.geode.test.dunit.LogWriterUtils.getLogWriter().error("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults: Caught an Exception while querying Constants" + e, e);
                fail("PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults: Caught Exception while querying Constants. Exception is " + e);
            }
        }
    };
    return (CacheSerializableRunnable) PrRegion;
}
Also used : QueryException(org.apache.geode.cache.query.QueryException) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) Cache(org.apache.geode.cache.Cache)

Example 7 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class PRQueryDUnitTest method testDataLossDuringQueryProcessor.

/**
   * Test data loss (bucket 0) while the PRQueryEvaluator is processing the query loop
   * 
   * @throws Exception
   */
@Test
public void testDataLossDuringQueryProcessor() throws Exception {
    final String rName = getUniqueName();
    Host host = Host.getHost(0);
    final VM datastore1 = host.getVM(2);
    final VM datastore2 = host.getVM(3);
    final int totalBuckets = 11;
    final int redCop = 0;
    CacheSerializableRunnable createPR = new CacheSerializableRunnable("Create PR") {

        @Override
        public void run2() throws CacheException {
            AttributesFactory attr = new AttributesFactory();
            attr.setValueConstraint(String.class);
            PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).create();
            attr.setPartitionAttributes(prAttr);
            getCache().createRegion(rName, attr.create());
        }
    };
    datastore1.invoke(createPR);
    datastore2.invoke(createPR);
    AttributesFactory attr = new AttributesFactory();
    attr.setValueConstraint(String.class);
    PartitionAttributes prAttr = new PartitionAttributesFactory().setRedundantCopies(redCop).setTotalNumBuckets(totalBuckets).setLocalMaxMemory(0).create();
    attr.setPartitionAttributes(prAttr);
    PartitionedRegion pr = (PartitionedRegion) getCache().createRegion(rName, attr.create());
    // Create bucket zero, one and two
    pr.put(new Integer(0), "zero");
    pr.put(new Integer(1), "one");
    pr.put(new Integer(2), "two");
    class MyTestHook implements PartitionedRegionQueryEvaluator.TestHook {

        public boolean done = false;

        public void hook(int spot) throws RuntimeException {
            if (spot == 4) {
                synchronized (this) {
                    if (done) {
                        return;
                    }
                    this.done = true;
                }
                datastore1.invoke(disconnectVM());
                datastore2.invoke(disconnectVM());
            }
        }
    }
    ;
    final MyTestHook th = new MyTestHook();
    // add expected exception strings
    final IgnoredException ex = IgnoredException.addIgnoredException("Data loss detected");
    try {
        Object[] params = new Object[0];
        final DefaultQuery query = (DefaultQuery) getCache().getQueryService().newQuery("select distinct * from " + pr.getFullPath());
        final SelectResults results = query.getSimpleSelect().getEmptyResultSet(params, getCache(), query);
        // TODO assert this is the correct set of bucket Ids,
        final HashSet<Integer> buckets = new HashSet<Integer>();
        for (int i = 0; i < 3; i++) {
            buckets.add(new Integer(i));
        }
        PartitionedRegionQueryEvaluator qe = new PartitionedRegionQueryEvaluator(pr.getSystem(), pr, query, params, results, buckets);
        qe.queryBuckets(th);
        assertTrue(th.done);
        assertTrue(false);
    } catch (QueryException expected) {
        assertTrue(th.done);
    } finally {
        ex.remove();
        getCache().close();
    }
}
Also used : DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) Host(org.apache.geode.test.dunit.Host) PartitionedRegionQueryEvaluator(org.apache.geode.internal.cache.PartitionedRegionQueryEvaluator) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) QueryException(org.apache.geode.cache.query.QueryException) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) IgnoredException(org.apache.geode.test.dunit.IgnoredException) HashSet(java.util.HashSet) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 8 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class AbstractRegionEntry method destroy.

/**
   * @throws EntryNotFoundException if expectedOldValue is not null and is not equal to current
   *         value
   */
@Override
@Released
public boolean destroy(LocalRegion region, EntryEventImpl event, boolean inTokenMode, boolean cacheWrite, @Unretained Object expectedOldValue, boolean forceDestroy, boolean removeRecoveredEntry) throws CacheWriterException, EntryNotFoundException, TimeoutException, RegionClearedException {
    // A design decision was made to not retrieve the old value from the disk
    // if the entry has been evicted to only have the CacheListener afterDestroy
    // method ignore it. We don't want to pay the performance penalty. The
    // getValueInVM method does not retrieve the value from disk if it has been
    // evicted. Instead, it uses the NotAvailable token.
    //
    // If the region is a WAN queue region, the old value is actually used by the
    // afterDestroy callback on a secondary. It is not needed on a primary.
    // Since the destroy that sets WAN_QUEUE_TOKEN always originates on the primary
    // we only pay attention to WAN_QUEUE_TOKEN if the event is originRemote.
    //
    // We also read old value from disk or buffer
    // in the case where there is a non-null expectedOldValue
    // see PartitionedRegion#remove(Object key, Object value)
    ReferenceCountHelper.skipRefCountTracking();
    @Retained @Released Object curValue = _getValueRetain(region, true);
    ReferenceCountHelper.unskipRefCountTracking();
    boolean proceed;
    try {
        if (curValue == null) {
            curValue = Token.NOT_AVAILABLE;
        }
        if (curValue == Token.NOT_AVAILABLE) {
            // the state of the transmitting cache's entry & should be used here
            if (event.getCallbackArgument() != null && event.getCallbackArgument().equals(RegionQueue.WAN_QUEUE_TOKEN) && event.isOriginRemote()) {
                // check originRemote for bug 40508
                // curValue = getValue(region); can cause deadlock if GII is occurring
                curValue = getValueOnDiskOrBuffer(region);
            } else {
                FilterProfile fp = region.getFilterProfile();
                if (fp != null && (fp.getCqCount() > 0 || expectedOldValue != null)) {
                    // curValue = getValue(region); can cause deadlock will fault in the value
                    // and will confuse LRU.
                    curValue = getValueOnDiskOrBuffer(region);
                }
            }
        }
        if (expectedOldValue != null) {
            if (!checkExpectedOldValue(expectedOldValue, curValue, region)) {
                throw new EntryNotFoundException(LocalizedStrings.AbstractRegionEntry_THE_CURRENT_VALUE_WAS_NOT_EQUAL_TO_EXPECTED_VALUE.toLocalizedString());
            }
        }
        if (inTokenMode && event.hasOldValue()) {
            proceed = true;
        } else {
            proceed = event.setOldValue(curValue, curValue instanceof GatewaySenderEventImpl) || removeRecoveredEntry || forceDestroy || region.getConcurrencyChecksEnabled() || (event.getOperation() == Operation.REMOVE && (curValue == null || curValue == Token.LOCAL_INVALID || curValue == Token.INVALID));
        }
    } finally {
        OffHeapHelper.releaseWithNoTracking(curValue);
    }
    if (proceed) {
        // after the entry not found exception above.
        if (!removeRecoveredEntry) {
            region.generateAndSetVersionTag(event, this);
        }
        if (cacheWrite) {
            region.cacheWriteBeforeDestroy(event, expectedOldValue);
            if (event.getRegion().getServerProxy() != null) {
                // server will return a version tag
                // update version information (may throw ConcurrentCacheModificationException)
                VersionStamp stamp = getVersionStamp();
                if (stamp != null) {
                    stamp.processVersionTag(event);
                }
            }
        }
        region.recordEvent(event);
        // RegionEntry (the old value) is invalid
        if (!region.isProxy() && !isInvalid()) {
            IndexManager indexManager = region.getIndexManager();
            if (indexManager != null) {
                try {
                    if (isValueNull()) {
                        @Released Object value = getValueOffHeapOrDiskWithoutFaultIn(region);
                        try {
                            Object preparedValue = prepareValueForCache(region, value, false);
                            _setValue(preparedValue);
                            releaseOffHeapRefIfRegionBeingClosedOrDestroyed(region, preparedValue);
                        } finally {
                            OffHeapHelper.release(value);
                        }
                    }
                    indexManager.updateIndexes(this, IndexManager.REMOVE_ENTRY, IndexProtocol.OTHER_OP);
                } catch (QueryException e) {
                    throw new IndexMaintenanceException(e);
                }
            }
        }
        boolean removeEntry = false;
        VersionTag v = event.getVersionTag();
        if (region.concurrencyChecksEnabled && !removeRecoveredEntry && !event.isFromRILocalDestroy()) {
            // Destroy will write a tombstone instead
            if (v == null || !v.hasValidVersion()) {
                // localDestroy and eviction and ops received with no version tag
                // should create a tombstone using the existing version stamp, as should
                // (bug #45245) responses from servers that do not have valid version information
                VersionStamp stamp = this.getVersionStamp();
                if (stamp != null) {
                    // proxy has no stamps
                    v = stamp.asVersionTag();
                    event.setVersionTag(v);
                }
            }
            removeEntry = v == null || !v.hasValidVersion();
        } else {
            removeEntry = true;
        }
        if (removeEntry) {
            boolean isThisTombstone = isTombstone();
            if (inTokenMode && !event.getOperation().isEviction()) {
                setValue(region, Token.DESTROYED);
            } else {
                removePhase1(region, false);
            }
            if (isThisTombstone) {
                region.unscheduleTombstone(this);
            }
        } else {
            makeTombstone(region, v);
        }
        return true;
    } else {
        return false;
    }
}
Also used : IndexManager(org.apache.geode.cache.query.internal.index.IndexManager) QueryException(org.apache.geode.cache.query.QueryException) Released(org.apache.geode.internal.offheap.annotations.Released) Retained(org.apache.geode.internal.offheap.annotations.Retained) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) GatewaySenderEventImpl(org.apache.geode.internal.cache.wan.GatewaySenderEventImpl) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) StoredObject(org.apache.geode.internal.offheap.StoredObject) VersionStamp(org.apache.geode.internal.cache.versions.VersionStamp) IndexMaintenanceException(org.apache.geode.cache.query.IndexMaintenanceException) Released(org.apache.geode.internal.offheap.annotations.Released)

Example 9 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class LocalRegion method recursiveDestroyRegion.

/**
   * Removes entries and recursively destroys subregions.
   *
   * @param eventSet collects the events for all destroyed regions if null, then we're closing so
   *        don't send events to callbacks or destroy the disk region
   */
private void recursiveDestroyRegion(Set eventSet, RegionEventImpl regionEvent, boolean cacheWrite) throws CacheWriterException, TimeoutException {
    final boolean isClose = regionEvent.getOperation().isClose();
    // do the cacheWriter beforeRegionDestroy first to fix bug 47736
    if (eventSet != null && cacheWrite) {
        try {
            cacheWriteBeforeRegionDestroy(regionEvent);
        } catch (CancelException e) {
            // I don't think this should ever happens: bulletproofing for bug 39454
            if (!this.cache.forcedDisconnect()) {
                logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_RECURSIVEDESTROYREGION_PROBLEM_IN_CACHEWRITEBEFOREREGIONDESTROY), e);
            }
        }
    }
    if (this.eventTracker != null) {
        this.eventTracker.stop();
    }
    if (logger.isTraceEnabled(LogMarker.RVV) && getVersionVector() != null) {
        logger.trace(LogMarker.RVV, "version vector for {} is {}", getName(), getVersionVector().fullToString());
    }
    cancelTTLExpiryTask();
    cancelIdleExpiryTask();
    cancelAllEntryExpiryTasks();
    if (!isInternalRegion()) {
        getCachePerfStats().incRegions(-1);
    }
    this.cache.getInternalResourceManager(false).removeResourceListener(this);
    if (getMembershipAttributes().hasRequiredRoles()) {
        if (!isInternalRegion()) {
            getCachePerfStats().incReliableRegions(-1);
        }
    }
    // then the add only needs to be done if hasListener || hasAdminListener
    if (eventSet != null) {
        eventSet.add(regionEvent);
    }
    try {
        // call recursiveDestroyRegion on each subregion and remove it
        // from this subregion map
        Collection values = this.subregions.values();
        for (Iterator itr = values.iterator(); itr.hasNext(); ) {
            // element is a LocalRegion
            Object element = itr.next();
            LocalRegion region;
            try {
                LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
                try {
                    // converts to a LocalRegion
                    region = toRegion(element);
                } finally {
                    LocalRegion.setThreadInitLevelRequirement(LocalRegion.AFTER_INITIAL_IMAGE);
                }
            } catch (CancelException ignore) {
                // ignore, keep going through the motions though
                region = (LocalRegion) element;
            } catch (RegionDestroyedException ignore) {
                // SharedRegionData was destroyed
                continue;
            }
            // failed initialization removing it from the parent subregion map
            if (region.isDestroyed) {
                continue;
            }
            // BEGIN operating on subregion of this region (rgn)
            if (eventSet != null) {
                regionEvent = (RegionEventImpl) regionEvent.clone();
                regionEvent.region = region;
            }
            try {
                region.recursiveDestroyRegion(eventSet, regionEvent, cacheWrite);
                if (!region.isInternalRegion()) {
                    InternalDistributedSystem system = region.cache.getInternalDistributedSystem();
                    system.handleResourceEvent(ResourceEvent.REGION_REMOVE, region);
                }
            } catch (CancelException e) {
                // I don't think this should ever happen: bulletproofing for bug 39454
                if (!this.cache.forcedDisconnect()) {
                    logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_RECURSIVEDESTROYREGION_RECURSION_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, region.getFullPath()), e);
                }
            }
            // remove from this subregion map;
            itr.remove();
        // END operating on subregion of this region
        }
        try {
            if (this.indexManager != null) {
                try {
                    if (this instanceof BucketRegion) {
                        this.indexManager.removeBucketIndexes(getPartitionedRegion());
                    }
                    this.indexManager.destroy();
                } catch (QueryException e) {
                    throw new IndexMaintenanceException(e);
                }
            }
        } catch (CancelException e) {
            // I don't think this should ever happens: bulletproofing for bug 39454
            if (!this.cache.forcedDisconnect()) {
                logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_BASICDESTROYREGION_INDEX_REMOVAL_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, getFullPath()), e);
            }
        }
    } finally {
        // mark this region as destroyed.
        if (regionEvent.isReinitializing()) {
            this.reinitialized_old = true;
        }
        this.cache.setRegionByPath(getFullPath(), null);
        if (this.eventTracker != null) {
            this.eventTracker.stop();
        }
        if (this.diskRegion != null) {
            this.diskRegion.prepareForClose(this);
        }
        this.isDestroyed = true;
        // after isDestroyed is set to true call removeResourceListener to fix bug 49555
        this.cache.getInternalResourceManager(false).removeResourceListener(this);
        closeEntries();
        if (logger.isDebugEnabled()) {
            logger.debug("recursiveDestroyRegion: Region Destroyed: {}", getFullPath());
        }
        // otherwise, the listener will be closed after the destroy event
        try {
            postDestroyRegion(!isClose, regionEvent);
        } catch (CancelException e) {
            logger.warn(LocalizedMessage.create(LocalizedStrings.LocalRegion_RECURSIVEDESTROYREGION_POSTDESTROYREGION_FAILED_DUE_TO_CACHE_CLOSURE_REGION_0, getFullPath()), e);
        }
        // fix for bug #47061
        if (getServerProxy() == null) {
            closeCqs();
        }
        detachPool();
        if (eventSet != null) {
            closeCallbacksExceptListener();
        } else {
            closeAllCallbacks();
        }
        if (this.concurrencyChecksEnabled && this.dataPolicy.withReplication() && !this.cache.isClosed()) {
            this.cache.getTombstoneService().unscheduleTombstones(this);
        }
        if (this.hasOwnStats) {
            this.cachePerfStats.close();
        }
    }
}
Also used : QueryException(org.apache.geode.cache.query.QueryException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) Iterator(java.util.Iterator) Collection(java.util.Collection) StoredObject(org.apache.geode.internal.offheap.StoredObject) CancelException(org.apache.geode.CancelException) InternalDistributedSystem(org.apache.geode.distributed.internal.InternalDistributedSystem) IndexMaintenanceException(org.apache.geode.cache.query.IndexMaintenanceException)

Example 10 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class MemberFunctionResultSender method sendResult.

public void sendResult(Object oneResult) {
    if (!this.function.hasResult()) {
        throw new IllegalStateException(LocalizedStrings.ExecuteFunction_CANNOT_0_RESULTS_HASRESULT_FALSE.toLocalizedString("send"));
    }
    if (this.serverSender != null) {
        // Client-Server
        if (logger.isDebugEnabled()) {
            logger.debug("MemberFunctionResultSender sending result from local node to client {}", oneResult);
        }
        this.serverSender.sendResult(oneResult);
    } else {
        // P2P
        if (this.msg != null) {
            try {
                this.msg.sendReplyForOneResult(dm, oneResult, false, enableOrderedResultStreming);
            } catch (QueryException e) {
                throw new FunctionException(e);
            } catch (ForceReattemptException e) {
                throw new FunctionException(e);
            } catch (InterruptedException e) {
                throw new FunctionException(e);
            }
        } else {
            this.rc.addResult(this.dm.getDistributionManagerId(), oneResult);
            FunctionStats.getFunctionStats(function.getId(), this.dm.getSystem()).incResultsReceived();
        }
        // incrementing result sent stats.
        FunctionStats.getFunctionStats(function.getId(), this.dm.getSystem()).incResultsReturned();
    }
}
Also used : QueryException(org.apache.geode.cache.query.QueryException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) FunctionException(org.apache.geode.cache.execute.FunctionException)

Aggregations

QueryException (org.apache.geode.cache.query.QueryException)35 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)15 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)15 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)15 Region (org.apache.geode.cache.Region)14 Cache (org.apache.geode.cache.Cache)13 QueryInvocationTargetException (org.apache.geode.cache.query.QueryInvocationTargetException)13 SelectResults (org.apache.geode.cache.query.SelectResults)13 QueryService (org.apache.geode.cache.query.QueryService)12 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)12 LocalRegion (org.apache.geode.internal.cache.LocalRegion)11 CancelException (org.apache.geode.CancelException)10 TestException (util.TestException)10 StructSetOrResultsSet (org.apache.geode.cache.query.functional.StructSetOrResultsSet)8 IndexMaintenanceException (org.apache.geode.cache.query.IndexMaintenanceException)6 HashSet (java.util.HashSet)4 Iterator (java.util.Iterator)4 CacheException (org.apache.geode.cache.CacheException)4 Function (org.apache.geode.cache.execute.Function)4 ForceReattemptException (org.apache.geode.internal.cache.ForceReattemptException)4