Search in sources :

Example 16 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class RemoveIndexesMessage method operateOnPartitionedRegion.

/**
   * This method is responsible to remove index on the given partitioned region.
   * 
   * @param dm Distribution maanger for the system
   * @param pr Partitioned region to remove indexes on.
   * 
   * @throws CacheException indicates a cache level error
   * @throws ForceReattemptException if the peer is no longer available
   * @throws InterruptedException if the thread is interrupted in the operation for example during
   *         shutdown.
   */
@Override
protected boolean operateOnPartitionedRegion(DistributionManager dm, PartitionedRegion pr, long startTime) throws CacheException, QueryException, ForceReattemptException, InterruptedException {
    // TODO Auto-generated method stub
    ReplyException replyEx = null;
    boolean result = true;
    // invalid
    int bucketIndexRemoved = 0;
    int numIndexesRemoved = 0;
    logger.info(LocalizedMessage.create(LocalizedStrings.RemoveIndexesMessage_WILL_REMOVE_THE_INDEXES_ON_THIS_PR___0, pr));
    try {
        if (this.removeSingleIndex) {
            bucketIndexRemoved = pr.removeIndex(this.indexName);
        } else {
            // remotely orignated
            bucketIndexRemoved = pr.removeIndexes(true);
        }
        numIndexesRemoved = pr.getDataStore().getAllLocalBuckets().size();
    } catch (Exception ex) {
        result = false;
        replyEx = new ReplyException(ex);
    }
    // send back the reply.
    sendReply(getSender(), getProcessorId(), dm, replyEx, result, bucketIndexRemoved, numIndexesRemoved);
    return false;
}
Also used : ReplyException(org.apache.geode.distributed.internal.ReplyException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) ReplyException(org.apache.geode.distributed.internal.ReplyException) PartitionedRegionException(org.apache.geode.internal.cache.PartitionedRegionException) QueryException(org.apache.geode.cache.query.QueryException)

Example 17 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class CqServiceImpl method stopCqs.

@Override
public synchronized void stopCqs(Collection<? extends InternalCqQuery> cqs) throws CqException {
    final boolean isDebugEnabled = logger.isDebugEnabled();
    if (isDebugEnabled) {
        if (cqs == null) {
            logger.debug("CqService.stopCqs cqs : null");
        } else {
            logger.debug("CqService.stopCqs cqs : ({} queries)", cqs.size());
        }
    }
    if (cqs == null) {
        return;
    }
    String cqName = null;
    for (InternalCqQuery internalCqQuery : cqs) {
        CqQuery cq = internalCqQuery;
        if (!cq.isClosed() && cq.isRunning()) {
            try {
                cqName = cq.getName();
                cq.stop();
            } catch (QueryException | CqClosedException e) {
                if (isDebugEnabled) {
                    logger.debug("Failed to stop the CQ, CqName : {} Error : {}", cqName, e.getMessage());
                }
            }
        }
    }
}
Also used : QueryException(org.apache.geode.cache.query.QueryException) CqClosedException(org.apache.geode.cache.query.CqClosedException) CqQuery(org.apache.geode.cache.query.CqQuery)

Example 18 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class AbstractRegionMap method initialImagePut.

public boolean initialImagePut(final Object key, final long lastModified, Object newValue, final boolean wasRecovered, boolean deferLRUCallback, VersionTag entryVersion, InternalDistributedMember sender, boolean isSynchronizing) {
    boolean result = false;
    boolean done = false;
    boolean cleared = false;
    final LocalRegion owner = _getOwner();
    if (newValue == Token.TOMBSTONE && !owner.getConcurrencyChecksEnabled()) {
        return false;
    }
    if (owner instanceof HARegion && newValue instanceof CachedDeserializable) {
        Object actualVal = ((CachedDeserializable) newValue).getDeserializedValue(null, null);
        if (actualVal instanceof HAEventWrapper) {
            HAEventWrapper haEventWrapper = (HAEventWrapper) actualVal;
            // Key was removed at sender side so not putting it into the HARegion
            if (haEventWrapper.getClientUpdateMessage() == null) {
                return false;
            }
            // Getting the instance from singleton CCN..This assumes only one bridge
            // server in the VM
            HAContainerWrapper haContainer = (HAContainerWrapper) CacheClientNotifier.getInstance().getHaContainer();
            if (haContainer == null) {
                return false;
            }
            HAEventWrapper original = null;
            // synchronized (haContainer) {
            do {
                ClientUpdateMessageImpl oldMsg = (ClientUpdateMessageImpl) haContainer.putIfAbsent(haEventWrapper, haEventWrapper.getClientUpdateMessage());
                if (oldMsg != null) {
                    original = (HAEventWrapper) haContainer.getKey(haEventWrapper);
                    if (original == null) {
                        continue;
                    }
                    synchronized (original) {
                        if ((HAEventWrapper) haContainer.getKey(original) != null) {
                            original.incAndGetReferenceCount();
                            HARegionQueue.addClientCQsAndInterestList(oldMsg, haEventWrapper, haContainer, owner.getName());
                            haEventWrapper.setClientUpdateMessage(null);
                            newValue = CachedDeserializableFactory.create(original, ((CachedDeserializable) newValue).getSizeInBytes());
                        } else {
                            original = null;
                        }
                    }
                } else {
                    // putIfAbsent successful
                    synchronized (haEventWrapper) {
                        haEventWrapper.incAndGetReferenceCount();
                        haEventWrapper.setHAContainer(haContainer);
                        haEventWrapper.setClientUpdateMessage(null);
                        haEventWrapper.setIsRefFromHAContainer(true);
                    }
                    break;
                }
            // try until we either get a reference to HAEventWrapper from
            // HAContainer or successfully put one into it.
            } while (original == null);
        /*
         * entry = (Map.Entry)haContainer.getEntry(haEventWrapper); if (entry != null) { original =
         * (HAEventWrapper)entry.getKey(); original.incAndGetReferenceCount(); } else {
         * haEventWrapper.incAndGetReferenceCount(); haEventWrapper.setHAContainer(haContainer);
         * haContainer.put(haEventWrapper, haEventWrapper .getClientUpdateMessage());
         * haEventWrapper.setClientUpdateMessage(null);
         * haEventWrapper.setIsRefFromHAContainer(true); } } if (entry != null) {
         * HARegionQueue.addClientCQsAndInterestList(entry, haEventWrapper, haContainer,
         * owner.getName()); haEventWrapper.setClientUpdateMessage(null); newValue =
         * CachedDeserializableFactory.create(original,
         * ((CachedDeserializable)newValue).getSizeInBytes()); }
         */
        }
    }
    try {
        RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.REMOVED_PHASE1);
        EntryEventImpl event = null;
        @Retained @Released Object oldValue = null;
        try {
            RegionEntry oldRe = null;
            synchronized (newRe) {
                try {
                    oldRe = putEntryIfAbsent(key, newRe);
                    while (!done && oldRe != null) {
                        synchronized (oldRe) {
                            if (oldRe.isRemovedPhase2()) {
                                owner.getCachePerfStats().incRetries();
                                _getMap().remove(key, oldRe);
                                oldRe = putEntryIfAbsent(key, newRe);
                            } else {
                                boolean acceptedVersionTag = false;
                                if (entryVersion != null && owner.concurrencyChecksEnabled) {
                                    Assert.assertTrue(entryVersion.getMemberID() != null, "GII entry versions must have identifiers");
                                    try {
                                        boolean isTombstone = (newValue == Token.TOMBSTONE);
                                        // don't reschedule the tombstone if it hasn't changed
                                        boolean isSameTombstone = oldRe.isTombstone() && isTombstone && oldRe.getVersionStamp().asVersionTag().equals(entryVersion);
                                        if (isSameTombstone) {
                                            return true;
                                        }
                                        processVersionTagForGII(oldRe, owner, entryVersion, isTombstone, sender, !wasRecovered || isSynchronizing);
                                        acceptedVersionTag = true;
                                    } catch (ConcurrentCacheModificationException e) {
                                        return false;
                                    }
                                }
                                final boolean oldIsTombstone = oldRe.isTombstone();
                                final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
                                try {
                                    result = oldRe.initialImagePut(owner, lastModified, newValue, wasRecovered, acceptedVersionTag);
                                    if (result) {
                                        if (oldIsTombstone) {
                                            owner.unscheduleTombstone(oldRe);
                                            if (newValue != Token.TOMBSTONE) {
                                                lruEntryCreate(oldRe);
                                            } else {
                                                lruEntryUpdate(oldRe);
                                            }
                                        }
                                        if (newValue == Token.TOMBSTONE) {
                                            owner.updateSizeOnRemove(key, oldSize);
                                            if (owner.getServerProxy() == null && owner.getVersionVector().isTombstoneTooOld(entryVersion.getMemberID(), entryVersion.getRegionVersion())) {
                                                // the received tombstone has already been reaped, so don't retain it
                                                removeTombstone(oldRe, entryVersion, false, false);
                                                return false;
                                            } else {
                                                owner.scheduleTombstone(oldRe, entryVersion);
                                                lruEntryDestroy(oldRe);
                                            }
                                        } else {
                                            int newSize = owner.calculateRegionEntryValueSize(oldRe);
                                            if (!oldIsTombstone) {
                                                owner.updateSizeOnPut(key, oldSize, newSize);
                                            } else {
                                                owner.updateSizeOnCreate(key, newSize);
                                            }
                                            EntryLogger.logInitialImagePut(_getOwnerObject(), key, newValue);
                                        }
                                    }
                                    if (owner.getIndexManager() != null) {
                                        // as the update could not locate the old key
                                        if (!oldRe.isRemoved()) {
                                            owner.getIndexManager().updateIndexes(oldRe, IndexManager.REMOVE_ENTRY, IndexProtocol.BEFORE_UPDATE_OP);
                                        }
                                        owner.getIndexManager().updateIndexes(oldRe, oldRe.isRemoved() ? IndexManager.ADD_ENTRY : IndexManager.UPDATE_ENTRY, oldRe.isRemoved() ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
                                    }
                                    done = true;
                                } finally {
                                    if (event != null) {
                                        event.release();
                                        event = null;
                                    }
                                }
                            }
                        }
                    }
                    if (!done) {
                        boolean versionTagAccepted = false;
                        if (entryVersion != null && owner.concurrencyChecksEnabled) {
                            Assert.assertTrue(entryVersion.getMemberID() != null, "GII entry versions must have identifiers");
                            try {
                                boolean isTombstone = (newValue == Token.TOMBSTONE);
                                processVersionTagForGII(newRe, owner, entryVersion, isTombstone, sender, !wasRecovered || isSynchronizing);
                                versionTagAccepted = true;
                            } catch (ConcurrentCacheModificationException e) {
                                return false;
                            }
                        }
                        result = newRe.initialImageInit(owner, lastModified, newValue, true, wasRecovered, versionTagAccepted);
                        try {
                            if (result) {
                                if (newValue == Token.TOMBSTONE) {
                                    owner.scheduleTombstone(newRe, entryVersion);
                                } else {
                                    owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(newRe));
                                    EntryLogger.logInitialImagePut(_getOwnerObject(), key, newValue);
                                    lruEntryCreate(newRe);
                                }
                                incEntryCount(1);
                            }
                            // Update local indexes
                            if (owner.getIndexManager() != null) {
                                // the update could not locate the old key
                                if (oldRe != null && !oldRe.isRemoved()) {
                                    owner.getIndexManager().updateIndexes(oldRe, IndexManager.REMOVE_ENTRY, IndexProtocol.BEFORE_UPDATE_OP);
                                }
                                owner.getIndexManager().updateIndexes(newRe, newRe.isRemoved() ? IndexManager.REMOVE_ENTRY : IndexManager.UPDATE_ENTRY, newRe.isRemoved() ? IndexProtocol.OTHER_OP : IndexProtocol.AFTER_UPDATE_OP);
                            }
                            done = true;
                        } finally {
                            if (event != null) {
                                event.release();
                                event = null;
                            }
                        }
                    }
                } finally {
                    if (done && result) {
                        initialImagePutEntry(newRe);
                    }
                    if (!done) {
                        removeEntry(key, newRe, false);
                        if (owner.getIndexManager() != null) {
                            owner.getIndexManager().updateIndexes(newRe, IndexManager.REMOVE_ENTRY, IndexProtocol.OTHER_OP);
                        }
                    }
                }
            }
        // synchronized
        } finally {
            if (event != null)
                event.release();
            OffHeapHelper.release(oldValue);
        }
    } catch (RegionClearedException rce) {
        // Asif: do not issue any sort of callbacks
        done = false;
        cleared = true;
    } catch (QueryException qe) {
        done = false;
        cleared = true;
    } finally {
        if (done && !deferLRUCallback) {
            lruUpdateCallback();
        } else if (!cleared) {
            resetThreadLocals();
        }
    }
    return result;
}
Also used : ClientUpdateMessageImpl(org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessageImpl) HAContainerWrapper(org.apache.geode.internal.cache.ha.HAContainerWrapper) Released(org.apache.geode.internal.offheap.annotations.Released) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) QueryException(org.apache.geode.cache.query.QueryException) Retained(org.apache.geode.internal.offheap.annotations.Retained) StoredObject(org.apache.geode.internal.offheap.StoredObject) HAEventWrapper(org.apache.geode.internal.cache.tier.sockets.HAEventWrapper)

Example 19 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class ResourceManagerWithQueryMonitorDUnitTest method doCriticalMemoryHitDuringGatherTestWithMultipleServers.

// tests low memory hit while gathering partition region results
private void doCriticalMemoryHitDuringGatherTestWithMultipleServers(final String regionName, boolean createPR, final int criticalThreshold, final boolean disabledQueryMonitorForLowMem, final int queryTimeout, final boolean hitCriticalThreshold) throws Exception {
    // create region on the server
    final Host host = Host.getHost(0);
    final VM server1 = host.getVM(0);
    final VM server2 = host.getVM(1);
    final VM client = host.getVM(2);
    final int numObjects = 200;
    try {
        final int[] port = AvailablePortHelper.getRandomAvailableTCPPorts(2);
        startCacheServer(server1, port[0], criticalThreshold, disabledQueryMonitorForLowMem, queryTimeout, regionName, createPR, 0);
        startCacheServer(server2, port[1], criticalThreshold, true, -1, regionName, createPR, 0);
        startClient(client, server1, port[0], regionName);
        populateData(server2, regionName, numObjects);
        createCancelDuringGatherTestHook(server1);
        client.invoke(new SerializableCallable("executing query to be canceled by gather") {

            public Object call() {
                QueryService qs = null;
                try {
                    qs = getCache().getQueryService();
                    Query query = qs.newQuery("Select * From /" + regionName);
                    query.execute();
                } catch (ServerOperationException soe) {
                    if (soe.getRootCause() instanceof QueryException) {
                        QueryException e = (QueryException) soe.getRootCause();
                        if (!isExceptionDueToLowMemory(e, CRITICAL_HEAP_USED)) {
                            throw new CacheException(soe) {
                            };
                        } else {
                            return 0;
                        }
                    }
                } catch (Exception e) {
                    throw new CacheException(e) {
                    };
                }
                // assertTrue(((CancelDuringGatherHook)DefaultQuery.testHook).triggeredOOME);
                throw new CacheException("should have hit low memory") {
                };
            }
        });
        verifyRejectedObjects(server1, disabledQueryMonitorForLowMem, queryTimeout, hitCriticalThreshold);
        // Pause for a second and then let's recover
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
        // Recover from critical heap
        if (hitCriticalThreshold) {
            vmRecoversFromCriticalHeap(server1);
        }
        // Check to see if query execution is ok under "normal" or "healthy" conditions
        client.invoke(new CacheSerializableRunnable("Executing query when system is 'Normal'") {

            public void run2() {
                try {
                    QueryService qs = getCache().getQueryService();
                    Query query = qs.newQuery("Select * From /" + regionName);
                    SelectResults results = (SelectResults) query.execute();
                    assertEquals(numObjects, results.size());
                } catch (QueryInvocationTargetException e) {
                    assertFalse(true);
                } catch (NameResolutionException e) {
                    assertFalse(true);
                } catch (TypeMismatchException e) {
                    assertFalse(true);
                } catch (FunctionDomainException e) {
                    assertFalse(true);
                }
            }
        });
        // Recover from critical heap
        if (hitCriticalThreshold) {
            vmRecoversFromCriticalHeap(server1);
        }
    } finally {
        stopServer(server1);
        stopServer(server2);
    }
}
Also used : DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) CacheException(org.apache.geode.cache.CacheException) TypeMismatchException(org.apache.geode.cache.query.TypeMismatchException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) Host(org.apache.geode.test.dunit.Host) NameResolutionException(org.apache.geode.cache.query.NameResolutionException) FunctionDomainException(org.apache.geode.cache.query.FunctionDomainException) NameResolutionException(org.apache.geode.cache.query.NameResolutionException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) QueryExecutionTimeoutException(org.apache.geode.cache.query.QueryExecutionTimeoutException) ServerOperationException(org.apache.geode.cache.client.ServerOperationException) IndexInvalidException(org.apache.geode.cache.query.IndexInvalidException) CacheException(org.apache.geode.cache.CacheException) QueryExecutionLowMemoryException(org.apache.geode.cache.query.QueryExecutionLowMemoryException) QueryException(org.apache.geode.cache.query.QueryException) TypeMismatchException(org.apache.geode.cache.query.TypeMismatchException) QueryException(org.apache.geode.cache.query.QueryException) SelectResults(org.apache.geode.cache.query.SelectResults) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) QueryService(org.apache.geode.cache.query.QueryService) FunctionDomainException(org.apache.geode.cache.query.FunctionDomainException) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) ServerOperationException(org.apache.geode.cache.client.ServerOperationException)

Example 20 with QueryException

use of org.apache.geode.cache.query.QueryException in project geode by apache.

the class PRQueryProcessor method doBucketQuery.

/**
   * @throws ForceReattemptException if bucket was moved so caller should try query again
   */
private void doBucketQuery(final Integer bId, final PartitionedRegionDataStore prds, final DefaultQuery query, final Object[] params, final PRQueryResultCollector rq) throws QueryException, ForceReattemptException, InterruptedException {
    final BucketRegion bukRegion = (BucketRegion) prds.localBucket2RegionMap.get(bId);
    final PartitionedRegion pr = prds.getPartitionedRegion();
    try {
        pr.checkReadiness();
        if (bukRegion == null) {
            if (pr.isLocallyDestroyed || pr.isClosed) {
                throw new RegionDestroyedException("PR destroyed during query", pr.getFullPath());
            } else {
                throw new ForceReattemptException("Bucket id " + pr.bucketStringForLogs(bId) + " not found on VM " + pr.getMyId());
            }
        }
        bukRegion.waitForData();
        SelectResults results = null;
        // If the query has LIMIT and is not order by, apply the limit while building the result set.
        int limit = -1;
        if (query.getSimpleSelect().getOrderByAttrs() == null) {
            limit = query.getLimit(params);
        }
        if (!bukRegion.isBucketDestroyed()) {
            // If the result queue has reached the limit, no need to
            // execute the query. Handle the bucket destroy condition
            // and add the end bucket token.
            int numBucketsProcessed = getNumBucketsProcessed();
            if (limit < 0 || (rq.size() - numBucketsProcessed) < limit) {
                results = (SelectResults) query.prExecuteOnBucket(params, pr, bukRegion);
                this.resultType = results.getCollectionType().getElementType();
            }
            if (!bukRegion.isBucketDestroyed()) {
                // here before we can start adding to the results queue
                if (results != null) {
                    for (Object r : results) {
                        if (r == null) {
                            // Blocking queue does not support adding null.
                            rq.put(DefaultQuery.NULL_RESULT);
                        } else {
                            // Avoid if query is distinct as this Integer could be a region value.
                            if (!query.getSimpleSelect().isDistinct() && query.getSimpleSelect().isCount() && r instanceof Integer) {
                                if ((Integer) r != 0) {
                                    rq.put(r);
                                }
                            } else {
                                rq.put(r);
                            }
                        }
                        // Check if limit is satisfied.
                        if (limit >= 0 && (rq.size() - numBucketsProcessed) >= limit) {
                            break;
                        }
                    }
                }
                rq.put(new EndOfBucket(bId));
                this.incNumBucketsProcessed();
                // success
                return;
            }
        }
        // if we get here then the bucket must have been moved
        checkForBucketMoved(bId, bukRegion, pr);
        Assert.assertTrue(false, "checkForBucketMoved should have thrown ForceReattemptException");
    } catch (RegionDestroyedException rde) {
        checkForBucketMoved(bId, bukRegion, pr);
        throw rde;
    } catch (QueryException qe) {
        checkForBucketMoved(bId, bukRegion, pr);
        throw qe;
    }
}
Also used : Integer(java.lang.Integer) QueryException(org.apache.geode.cache.query.QueryException) SelectResults(org.apache.geode.cache.query.SelectResults) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException)

Aggregations

QueryException (org.apache.geode.cache.query.QueryException)35 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)15 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)15 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)15 Region (org.apache.geode.cache.Region)14 Cache (org.apache.geode.cache.Cache)13 QueryInvocationTargetException (org.apache.geode.cache.query.QueryInvocationTargetException)13 SelectResults (org.apache.geode.cache.query.SelectResults)13 QueryService (org.apache.geode.cache.query.QueryService)12 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)12 LocalRegion (org.apache.geode.internal.cache.LocalRegion)11 CancelException (org.apache.geode.CancelException)10 TestException (util.TestException)10 StructSetOrResultsSet (org.apache.geode.cache.query.functional.StructSetOrResultsSet)8 IndexMaintenanceException (org.apache.geode.cache.query.IndexMaintenanceException)6 HashSet (java.util.HashSet)4 Iterator (java.util.Iterator)4 CacheException (org.apache.geode.cache.CacheException)4 Function (org.apache.geode.cache.execute.Function)4 ForceReattemptException (org.apache.geode.internal.cache.ForceReattemptException)4