Search in sources :

Example 86 with CancelException

use of org.apache.geode.CancelException in project geode by apache.

the class PartitionedRegionHelper method cleanUpMetaDataOnNodeFailure.

// TODO rebalancing - this code was added here in the merge of -r22804:23093 from trunk
// because of changes made on trunk that require this method, which was removed on
// prRebalancing. It probably needs refactoring.
// The idea here is to remove meta data from the partitioned region for a node that
// has left the cache.
// A couple options that didn't work
// - remove metadata in region advisor for PR instead - this doesn't work because
// the a member can close it's cache and then recreate the same region. Another member
// might end up removing meta data after the region is recreated, leading to inconsistent metadata
// - remove metadata on cache closure in the member that is closing - This didn't work because
// we can't do region operations after isClosing is set to true (to remove metadata). Removing
// metadata
// before is closing is set to true results operations being silently ignored because of
// inconsistent metadata
// and regions.
/**
   * Clean the config meta data for a DistributedMember which has left the DistributedSystem, one
   * PartitionedRegion at a time.
   */
public static void cleanUpMetaDataOnNodeFailure(DistributedMember failedMemId) {
    try {
        final InternalCache cache = GemFireCacheImpl.getInstance();
        if (cache == null || cache.getCancelCriterion().isCancelInProgress()) {
            return;
        }
        DM dm = cache.getInternalDistributedSystem().getDistributionManager();
        if (logger.isDebugEnabled()) {
            logger.debug("Cleaning PartitionedRegion meta data for memberId={}", failedMemId);
        }
        Region rootReg = PartitionedRegionHelper.getPRRoot(cache, false);
        if (rootReg == null) {
            return;
        }
        final ArrayList<String> ks = new ArrayList<String>(rootReg.keySet());
        if (ks.size() > 1) {
            Collections.shuffle(ks, PartitionedRegion.RANDOM);
        }
        for (String prName : ks) {
            try {
                cleanUpMetaDataForRegion(cache, prName, failedMemId, null);
            } catch (CancelException ignore) {
            // okay to ignore this - metadata will be cleaned up by cache close operation
            } catch (Exception e) {
                if (logger.isDebugEnabled()) {
                    logger.debug("Got exception in cleaning up metadata. {}", e.getMessage(), e);
                }
            }
        }
    } catch (CancelException ignore) {
    // ignore
    }
}
Also used : ArrayList(java.util.ArrayList) DM(org.apache.geode.distributed.internal.DM) Region(org.apache.geode.cache.Region) CancelException(org.apache.geode.CancelException) RegionExistsException(org.apache.geode.cache.RegionExistsException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) EntryDestroyedException(org.apache.geode.cache.EntryDestroyedException) CancelException(org.apache.geode.CancelException) IOException(java.io.IOException) PRLocallyDestroyedException(org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException) PartitionNotAvailableException(org.apache.geode.cache.partition.PartitionNotAvailableException) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Example 87 with CancelException

use of org.apache.geode.CancelException in project geode by apache.

the class PartitionedTXRegionStub method postPutAll.

/**
   * Create PutAllPRMsgs for each bucket, and send them.
   * 
   * @param putallO DistributedPutAllOperation object.
   */
public void postPutAll(DistributedPutAllOperation putallO, VersionedObjectList successfulPuts, LocalRegion r) throws TransactionException {
    if (r.getCache().isCacheAtShutdownAll()) {
        throw new CacheClosedException("Cache is shutting down");
    }
    PartitionedRegion pr = (PartitionedRegion) r;
    final long startTime = PartitionedRegionStats.startTime();
    // build all the msgs by bucketid
    HashMap prMsgMap = putallO.createPRMessages();
    PutAllPartialResult partialKeys = new PutAllPartialResult(putallO.putAllDataSize);
    // this is rebuilt by this method
    successfulPuts.clear();
    Iterator itor = prMsgMap.entrySet().iterator();
    while (itor.hasNext()) {
        Map.Entry mapEntry = (Map.Entry) itor.next();
        Integer bucketId = (Integer) mapEntry.getKey();
        PutAllPRMessage prMsg = (PutAllPRMessage) mapEntry.getValue();
        pr.checkReadiness();
        try {
            VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg, pr);
            // prMsg.saveKeySet(partialKeys);
            partialKeys.addKeysAndVersions(versions);
            successfulPuts.addAll(versions);
        } catch (PutAllPartialResultException pre) {
            // sendMsgByBucket applied partial keys
            partialKeys.consolidate(pre.getResult());
        } catch (Exception ex) {
            // If failed at other exception
            @Released EntryEventImpl firstEvent = prMsg.getFirstEvent(pr);
            try {
                partialKeys.saveFailedKey(firstEvent.getKey(), ex);
            } finally {
                firstEvent.release();
            }
        }
    }
    pr.prStats.endPutAll(startTime);
    if (partialKeys.hasFailure()) {
        pr.getCache().getLoggerI18n().info(LocalizedStrings.Region_PutAll_Applied_PartialKeys_0_1, new Object[] { pr.getFullPath(), partialKeys });
        if (putallO.isBridgeOperation()) {
            if (partialKeys.getFailure() instanceof CancelException) {
                throw (CancelException) partialKeys.getFailure();
            } else {
                throw new PutAllPartialResultException(partialKeys);
            }
        } else {
            if (partialKeys.getFailure() instanceof RuntimeException) {
                throw (RuntimeException) partialKeys.getFailure();
            } else {
                throw new RuntimeException(partialKeys.getFailure());
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) CacheClosedException(org.apache.geode.cache.CacheClosedException) PrimaryBucketException(org.apache.geode.internal.cache.PrimaryBucketException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) TransactionDataRebalancedException(org.apache.geode.cache.TransactionDataRebalancedException) TransactionException(org.apache.geode.cache.TransactionException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) CacheClosedException(org.apache.geode.cache.CacheClosedException) TransactionDataNodeHasDepartedException(org.apache.geode.cache.TransactionDataNodeHasDepartedException) DataLocationException(org.apache.geode.internal.cache.DataLocationException) CancelException(org.apache.geode.CancelException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) TransactionDataNotColocatedException(org.apache.geode.cache.TransactionDataNotColocatedException) BucketNotFoundException(org.apache.geode.internal.cache.BucketNotFoundException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) Entry(org.apache.geode.cache.Region.Entry) PutAllPRMessage(org.apache.geode.internal.cache.partitioned.PutAllPRMessage) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Iterator(java.util.Iterator) CancelException(org.apache.geode.CancelException) HashMap(java.util.HashMap) Map(java.util.Map)

Example 88 with CancelException

use of org.apache.geode.CancelException in project geode by apache.

the class PartitionedTXRegionStub method postRemoveAll.

@Override
public void postRemoveAll(DistributedRemoveAllOperation op, VersionedObjectList successfulOps, LocalRegion r) {
    if (r.getCache().isCacheAtShutdownAll()) {
        throw new CacheClosedException("Cache is shutting down");
    }
    PartitionedRegion pr = (PartitionedRegion) r;
    final long startTime = PartitionedRegionStats.startTime();
    // build all the msgs by bucketid
    HashMap<Integer, RemoveAllPRMessage> prMsgMap = op.createPRMessages();
    PutAllPartialResult partialKeys = new PutAllPartialResult(op.removeAllDataSize);
    // this is rebuilt by this method
    successfulOps.clear();
    Iterator<Map.Entry<Integer, RemoveAllPRMessage>> itor = prMsgMap.entrySet().iterator();
    while (itor.hasNext()) {
        Map.Entry<Integer, RemoveAllPRMessage> mapEntry = itor.next();
        Integer bucketId = mapEntry.getKey();
        RemoveAllPRMessage prMsg = mapEntry.getValue();
        pr.checkReadiness();
        try {
            VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg, pr);
            // prMsg.saveKeySet(partialKeys);
            partialKeys.addKeysAndVersions(versions);
            successfulOps.addAll(versions);
        } catch (PutAllPartialResultException pre) {
            // sendMsgByBucket applied partial keys
            partialKeys.consolidate(pre.getResult());
        } catch (Exception ex) {
            // If failed at other exception
            @Released EntryEventImpl firstEvent = prMsg.getFirstEvent(pr);
            try {
                partialKeys.saveFailedKey(firstEvent.getKey(), ex);
            } finally {
                firstEvent.release();
            }
        }
    }
    pr.prStats.endRemoveAll(startTime);
    if (partialKeys.hasFailure()) {
        pr.getCache().getLoggerI18n().info(LocalizedStrings.Region_RemoveAll_Applied_PartialKeys_0_1, new Object[] { pr.getFullPath(), partialKeys });
        if (op.isBridgeOperation()) {
            if (partialKeys.getFailure() instanceof CancelException) {
                throw (CancelException) partialKeys.getFailure();
            } else {
                throw new PutAllPartialResultException(partialKeys);
            }
        } else {
            if (partialKeys.getFailure() instanceof RuntimeException) {
                throw (RuntimeException) partialKeys.getFailure();
            } else {
                throw new RuntimeException(partialKeys.getFailure());
            }
        }
    }
}
Also used : EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) RemoveAllPRMessage(org.apache.geode.internal.cache.partitioned.RemoveAllPRMessage) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) CacheClosedException(org.apache.geode.cache.CacheClosedException) PrimaryBucketException(org.apache.geode.internal.cache.PrimaryBucketException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) TransactionDataRebalancedException(org.apache.geode.cache.TransactionDataRebalancedException) TransactionException(org.apache.geode.cache.TransactionException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) CacheClosedException(org.apache.geode.cache.CacheClosedException) TransactionDataNodeHasDepartedException(org.apache.geode.cache.TransactionDataNodeHasDepartedException) DataLocationException(org.apache.geode.internal.cache.DataLocationException) CancelException(org.apache.geode.CancelException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) TransactionDataNotColocatedException(org.apache.geode.cache.TransactionDataNotColocatedException) BucketNotFoundException(org.apache.geode.internal.cache.BucketNotFoundException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) Entry(org.apache.geode.cache.Region.Entry) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) CancelException(org.apache.geode.CancelException) HashMap(java.util.HashMap) Map(java.util.Map)

Example 89 with CancelException

use of org.apache.geode.CancelException in project geode by apache.

the class ConnectionPoolDUnitTest method basicTestLifetimeExpire.

private void basicTestLifetimeExpire(final boolean threadLocal) throws CacheException {
    final String name = this.getName();
    final Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    AsyncInvocation putAI = null;
    AsyncInvocation putAI2 = null;
    try {
        // Create two bridge servers
        SerializableRunnable createCacheServer = new CacheSerializableRunnable("Create Cache Server") {

            public void run2() throws CacheException {
                AttributesFactory factory = getBridgeServerRegionAttributes(null, null);
                factory.setCacheListener(new DelayListener(25));
                createRegion(name, factory.create());
                try {
                    startBridgeServer(0);
                } catch (Exception ex) {
                    org.apache.geode.test.dunit.Assert.fail("While starting CacheServer", ex);
                }
            }
        };
        vm0.invoke(createCacheServer);
        final int port0 = vm0.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
        final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
        vm1.invoke(createCacheServer);
        final int port1 = vm1.invoke(() -> ConnectionPoolDUnitTest.getCacheServerPort());
        SerializableRunnable stopCacheServer = new SerializableRunnable("Stop CacheServer") {

            public void run() {
                stopBridgeServer(getCache());
            }
        };
        // we only had to stop it to reserve a port
        vm1.invoke(stopCacheServer);
        // Create one bridge client in this VM
        SerializableRunnable create = new CacheSerializableRunnable("Create region") {

            public void run2() throws CacheException {
                getLonerSystem();
                getCache();
                AttributesFactory factory = new AttributesFactory();
                factory.setScope(Scope.LOCAL);
                factory.setConcurrencyChecksEnabled(false);
                ClientServerTestCase.configureConnectionPool(factory, host0, port0, port1, false, /* queue */
                -1, 0, null, 100, 500, threadLocal, 500);
                Region region = createRegion(name, factory.create());
                // force connections to form
                region.put("keyInit", new Integer(0));
                region.put("keyInit2", new Integer(0));
            }
        };
        vm2.invoke(create);
        // Launch async thread that puts objects into cache. This thread will execute until
        // the test has ended.
        SerializableRunnable putter1 = new CacheSerializableRunnable("Put objects") {

            public void run2() throws CacheException {
                Region region = getRootRegion().getSubregion(name);
                PoolImpl pool = getPool(region);
                PoolStats stats = pool.getStats();
                baselineLifetimeCheck = stats.getLoadConditioningCheck();
                baselineLifetimeExtensions = stats.getLoadConditioningExtensions();
                baselineLifetimeConnect = stats.getLoadConditioningConnect();
                baselineLifetimeDisconnect = stats.getLoadConditioningDisconnect();
                try {
                    int count = 0;
                    while (!stopTestLifetimeExpire) {
                        count++;
                        region.put("keyAI1", new Integer(count));
                    }
                } catch (NoAvailableServersException ex) {
                    if (stopTestLifetimeExpire) {
                        return;
                    } else {
                        throw ex;
                    }
                // } catch (RegionDestroyedException e) { //will be thrown when the test ends
                // /*ignore*/
                // } catch (CancelException e) { //will be thrown when the test ends
                // /*ignore*/
                }
            }
        };
        SerializableRunnable putter2 = new CacheSerializableRunnable("Put objects") {

            public void run2() throws CacheException {
                Region region = getRootRegion().getSubregion(name);
                try {
                    int count = 0;
                    while (!stopTestLifetimeExpire) {
                        count++;
                        region.put("keyAI2", new Integer(count));
                    }
                } catch (NoAvailableServersException ex) {
                    if (stopTestLifetimeExpire) {
                        return;
                    } else {
                        throw ex;
                    }
                // } catch (RegionDestroyedException e) { //will be thrown when the test ends
                // /*ignore*/
                // } catch (CancelException e) { //will be thrown when the test ends
                // /*ignore*/
                }
            }
        };
        putAI = vm2.invokeAsync(putter1);
        putAI2 = vm2.invokeAsync(putter2);
        SerializableRunnable verify1Server = new CacheSerializableRunnable("verify1Server") {

            public void run2() throws CacheException {
                Region region = getRootRegion().getSubregion(name);
                PoolImpl pool = getPool(region);
                final PoolStats stats = pool.getStats();
                verifyServerCount(pool, 1);
                WaitCriterion ev = new WaitCriterion() {

                    public boolean done() {
                        return stats.getLoadConditioningCheck() >= (10 + baselineLifetimeCheck);
                    }

                    public String description() {
                        return null;
                    }
                };
                Wait.waitForCriterion(ev, 30 * 1000, 200, true);
                // make sure no replacements are happening.
                // since we have 2 threads and 2 cnxs and 1 server
                // when lifetimes are up we should only want to connect back to the
                // server we are already connected to and thus just extend our lifetime
                assertTrue("baselineLifetimeCheck=" + baselineLifetimeCheck + " but stats.getLoadConditioningCheck()=" + stats.getLoadConditioningCheck(), stats.getLoadConditioningCheck() >= (10 + baselineLifetimeCheck));
                baselineLifetimeCheck = stats.getLoadConditioningCheck();
                assertTrue(stats.getLoadConditioningExtensions() > baselineLifetimeExtensions);
                assertTrue(stats.getLoadConditioningConnect() == baselineLifetimeConnect);
                assertTrue(stats.getLoadConditioningDisconnect() == baselineLifetimeDisconnect);
            }
        };
        SerializableRunnable verify2Servers = new CacheSerializableRunnable("verify2Servers") {

            public void run2() throws CacheException {
                Region region = getRootRegion().getSubregion(name);
                PoolImpl pool = getPool(region);
                final PoolStats stats = pool.getStats();
                verifyServerCount(pool, 2);
                // make sure some replacements are happening.
                // since we have 2 threads and 2 cnxs and 2 servers
                // when lifetimes are up we should connect to the other server sometimes.
                // int retry = 300;
                // while ((retry-- > 0)
                // && (stats.getLoadConditioningCheck() < (10+baselineLifetimeCheck))) {
                // pause(100);
                // }
                // assertTrue("Bug 39209 expected "
                // + stats.getLoadConditioningCheck()
                // + " to be >= "
                // + (10+baselineLifetimeCheck),
                // stats.getLoadConditioningCheck() >= (10+baselineLifetimeCheck));
                // TODO: does this WaitCriterion actually help?
                WaitCriterion wc = new WaitCriterion() {

                    String excuse;

                    public boolean done() {
                        int actual = stats.getLoadConditioningCheck();
                        int expected = 10 + baselineLifetimeCheck;
                        if (actual >= expected) {
                            return true;
                        }
                        excuse = "Bug 39209 expected " + actual + " to be >= " + expected;
                        return false;
                    }

                    public String description() {
                        return excuse;
                    }
                };
                try {
                    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
                } catch (AssertionError e) {
                    // dumpStack();
                    throw e;
                }
                assertTrue(stats.getLoadConditioningConnect() > baselineLifetimeConnect);
                assertTrue(stats.getLoadConditioningDisconnect() > baselineLifetimeDisconnect);
            }
        };
        vm2.invoke(verify1Server);
        assertEquals(true, putAI.isAlive());
        assertEquals(true, putAI2.isAlive());
    } finally {
        vm2.invoke(new SerializableRunnable("Stop Putters") {

            public void run() {
                stopTestLifetimeExpire = true;
            }
        });
        try {
            if (putAI != null) {
                // Verify that no exception has occurred in the putter thread
                ThreadUtils.join(putAI, 30 * 1000);
                if (putAI.exceptionOccurred()) {
                    org.apache.geode.test.dunit.Assert.fail("While putting entries: ", putAI.getException());
                }
            }
            if (putAI2 != null) {
                // Verify that no exception has occurred in the putter thread
                ThreadUtils.join(putAI, 30 * 1000);
            // FIXME this thread does not terminate
            // if (putAI2.exceptionOccurred()) {
            // fail("While putting entries: ", putAI.getException());
            // }
            }
        } finally {
            vm2.invoke(new SerializableRunnable("Stop Putters") {

                public void run() {
                    stopTestLifetimeExpire = false;
                }
            });
            // Close Pool
            vm2.invoke(new CacheSerializableRunnable("Close Pool") {

                public void run2() throws CacheException {
                    Region region = getRootRegion().getSubregion(name);
                    String poolName = region.getAttributes().getPoolName();
                    region.localDestroyRegion();
                    PoolManager.find(poolName).destroy();
                }
            });
            SerializableRunnable stopCacheServer = new SerializableRunnable("Stop CacheServer") {

                public void run() {
                    stopBridgeServer(getCache());
                }
            };
            vm1.invoke(stopCacheServer);
            vm0.invoke(stopCacheServer);
        }
    }
}
Also used : NoAvailableServersException(org.apache.geode.cache.client.NoAvailableServersException) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) Host(org.apache.geode.test.dunit.Host) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) PoolImpl(org.apache.geode.cache.client.internal.PoolImpl) NoAvailableServersException(org.apache.geode.cache.client.NoAvailableServersException) CancelException(org.apache.geode.CancelException) IOException(java.io.IOException) Endpoint(org.apache.geode.cache.client.internal.Endpoint) PoolStats(org.apache.geode.internal.cache.PoolStats) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) VM(org.apache.geode.test.dunit.VM) LocalRegion(org.apache.geode.internal.cache.LocalRegion)

Example 90 with CancelException

use of org.apache.geode.CancelException in project geode by apache.

the class TcpServer method processRequest.

/**
   * fix for bug 33711 - client requests are spun off to another thread for processing. Requests are
   * synchronized in processGossip.
   */
private void processRequest(final Socket sock) {
    executor.execute(() -> {
        long startTime = DistributionStats.getStatTime();
        DataInputStream input = null;
        Object request, response;
        try {
            socketCreator.configureServerSSLSocket(sock);
            sock.setSoTimeout(READ_TIMEOUT);
            try {
                input = new DataInputStream(sock.getInputStream());
            } catch (StreamCorruptedException e) {
                // Some garbage can be left on the socket stream
                // if a peer disappears at exactly the wrong moment.
                log.debug("Discarding illegal request from " + (sock.getInetAddress().getHostAddress() + ":" + sock.getPort()), e);
                return;
            }
            int gossipVersion = readGossipVersion(sock, input);
            short versionOrdinal;
            if (gossipVersion <= getCurrentGossipVersion() && GOSSIP_TO_GEMFIRE_VERSION_MAP.containsKey(gossipVersion)) {
                // Create a versioned stream to remember sender's GemFire version
                versionOrdinal = (short) GOSSIP_TO_GEMFIRE_VERSION_MAP.get(gossipVersion);
            } else {
                // Close the socket. We can not accept requests from a newer version
                try {
                    sock.getOutputStream().write("unknown protocol version".getBytes());
                    sock.getOutputStream().flush();
                } catch (IOException e) {
                    log.debug("exception in sending reply to process using unknown protocol " + gossipVersion, e);
                }
                sock.close();
                return;
            }
            if (Version.GFE_71.compareTo(versionOrdinal) <= 0) {
                // Recent versions of TcpClient will send the version ordinal
                versionOrdinal = input.readShort();
            }
            if (log.isDebugEnabled() && versionOrdinal != Version.CURRENT_ORDINAL) {
                log.debug("Locator reading request from " + sock.getInetAddress() + " with version " + Version.fromOrdinal(versionOrdinal, false));
            }
            input = new VersionedDataInputStream(input, Version.fromOrdinal(versionOrdinal, false));
            request = DataSerializer.readObject(input);
            if (log.isDebugEnabled()) {
                log.debug("Locator received request " + request + " from " + sock.getInetAddress());
            }
            if (request instanceof ShutdownRequest) {
                shuttingDown = true;
                // Don't call shutdown from within the worker thread, see java bug #6576792.
                // Closing the socket will cause our acceptor thread to shutdown the executor
                this.serverSocketPortAtClose = srv_sock.getLocalPort();
                srv_sock.close();
                response = new ShutdownResponse();
            } else if (request instanceof InfoRequest) {
                response = handleInfoRequest(request);
            } else if (request instanceof VersionRequest) {
                response = handleVersionRequest(request);
            } else {
                response = handler.processRequest(request);
            }
            handler.endRequest(request, startTime);
            startTime = DistributionStats.getStatTime();
            if (response != null) {
                DataOutputStream output = new DataOutputStream(sock.getOutputStream());
                if (versionOrdinal != Version.CURRENT_ORDINAL) {
                    output = new VersionedDataOutputStream(output, Version.fromOrdinal(versionOrdinal, false));
                }
                DataSerializer.writeObject(response, output);
                output.flush();
            }
            handler.endResponse(request, startTime);
        } catch (EOFException ignore) {
        // client went away - ignore
        } catch (CancelException ignore) {
        // ignore
        } catch (ClassNotFoundException ex) {
            String sender = null;
            if (sock != null) {
                sender = sock.getInetAddress().getHostAddress();
            }
            log.info("Unable to process request from " + sender + " exception=" + ex.getMessage());
        } catch (Exception ex) {
            String sender = null;
            if (sock != null) {
                sender = sock.getInetAddress().getHostAddress();
            }
            if (ex instanceof IOException) {
                // log with severe.
                if (!sock.isClosed()) {
                    log.info("Exception in processing request from " + sender, ex);
                }
            } else {
                log.fatal("Exception in processing request from " + sender, ex);
            }
        } catch (VirtualMachineError err) {
            SystemFailure.initiateFailure(err);
            throw err;
        } catch (Throwable ex) {
            SystemFailure.checkFailure();
            String sender = null;
            if (sock != null) {
                sender = sock.getInetAddress().getHostAddress();
            }
            try {
                log.fatal("Exception in processing request from " + sender, ex);
            } catch (VirtualMachineError err) {
                SystemFailure.initiateFailure(err);
                throw err;
            } catch (Throwable t) {
                SystemFailure.checkFailure();
                t.printStackTrace();
            }
        } finally {
            try {
                sock.close();
            } catch (IOException ignore) {
            // ignore
            }
        }
    });
}
Also used : DataOutputStream(java.io.DataOutputStream) VersionedDataOutputStream(org.apache.geode.internal.VersionedDataOutputStream) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) VersionedDataInputStream(org.apache.geode.internal.VersionedDataInputStream) CancelException(org.apache.geode.CancelException) StreamCorruptedException(java.io.StreamCorruptedException) IOException(java.io.IOException) EOFException(java.io.EOFException) SSLException(javax.net.ssl.SSLException) VersionedDataOutputStream(org.apache.geode.internal.VersionedDataOutputStream) EOFException(java.io.EOFException) StreamCorruptedException(java.io.StreamCorruptedException) CancelException(org.apache.geode.CancelException) VersionedDataInputStream(org.apache.geode.internal.VersionedDataInputStream)

Aggregations

CancelException (org.apache.geode.CancelException)135 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)46 IOException (java.io.IOException)40 ReplyException (org.apache.geode.distributed.internal.ReplyException)30 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)25 CacheClosedException (org.apache.geode.cache.CacheClosedException)23 Region (org.apache.geode.cache.Region)22 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)21 LocalRegion (org.apache.geode.internal.cache.LocalRegion)18 Set (java.util.Set)16 Cache (org.apache.geode.cache.Cache)16 CacheException (org.apache.geode.cache.CacheException)16 HashSet (java.util.HashSet)15 Iterator (java.util.Iterator)15 QueryException (org.apache.geode.cache.query.QueryException)15 ArrayList (java.util.ArrayList)13 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)13 QueryInvocationTargetException (org.apache.geode.cache.query.QueryInvocationTargetException)13 DistributedSystemDisconnectedException (org.apache.geode.distributed.DistributedSystemDisconnectedException)13 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)13