Search in sources :

Example 61 with ClusterTopologyCheckedException

use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.

the class GridDhtLockFuture method map.

/**
 * @param entries Entries.
 */
private void map(Iterable<GridDhtCacheEntry> entries) {
    synchronized (this) {
        if (mapped)
            return;
        mapped = true;
    }
    try {
        if (log.isDebugEnabled())
            log.debug("Mapping entry for DHT lock future: " + this);
        // Assign keys to primary nodes.
        for (GridDhtCacheEntry entry : entries) {
            try {
                while (true) {
                    try {
                        cctx.dhtMap(nearNodeId, topVer, entry, tx == null ? lockVer : null, log, dhtMap, null);
                        GridCacheMvccCandidate cand = entry.candidate(lockVer);
                        // Possible in case of lock cancellation.
                        if (cand == null) {
                            onFailed(false);
                            // Will mark initialized in finally block.
                            return;
                        }
                        break;
                    } catch (GridCacheEntryRemovedException ignore) {
                        if (log.isDebugEnabled())
                            log.debug("Got removed entry when mapping DHT lock future (will retry): " + entry);
                        entry = cctx.dht().entryExx(entry.key(), topVer);
                    }
                }
            } catch (GridDhtInvalidPartitionException e) {
                assert false : "DHT lock should never get invalid partition [err=" + e + ", fut=" + this + ']';
            }
        }
        if (isDone()) {
            if (log.isDebugEnabled())
                log.debug("Mapping won't proceed because future is done: " + this);
            return;
        }
        if (log.isDebugEnabled())
            log.debug("Mapped DHT lock future [dhtMap=" + F.nodeIds(dhtMap.keySet()) + ", dhtLockFut=" + this + ']');
        long timeout = inTx() ? tx.remainingTime() : this.timeout;
        // Create mini futures.
        for (Map.Entry<ClusterNode, List<GridDhtCacheEntry>> mapped : dhtMap.entrySet()) {
            ClusterNode n = mapped.getKey();
            List<GridDhtCacheEntry> dhtMapping = mapped.getValue();
            int cnt = F.size(dhtMapping);
            if (cnt > 0) {
                assert !n.id().equals(cctx.localNodeId());
                if (inTx() && tx.remainingTime() == -1)
                    return;
                MiniFuture fut = new MiniFuture(n, dhtMapping);
                GridDhtLockRequest req = new GridDhtLockRequest(cctx.cacheId(), nearNodeId, inTx() ? tx.nearXidVersion() : null, threadId, futId, fut.futureId(), lockVer, topVer, inTx(), read, isolation(), isInvalidate(), timeout, cnt, 0, inTx() ? tx.size() : cnt, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? accessTtl : -1L, skipStore, cctx.store().configured(), keepBinary, cctx.deploymentEnabled());
                try {
                    for (ListIterator<GridDhtCacheEntry> it = dhtMapping.listIterator(); it.hasNext(); ) {
                        GridDhtCacheEntry e = it.next();
                        boolean needVal = false;
                        try {
                            // Must unswap entry so that isNewLocked returns correct value.
                            e.unswap(false);
                            needVal = e.isNewLocked();
                            if (needVal) {
                                List<ClusterNode> owners = cctx.topology().owners(e.partition(), tx != null ? tx.topologyVersion() : cctx.affinity().affinityTopologyVersion());
                                // Do not preload if local node is partition owner.
                                if (owners.contains(cctx.localNode()))
                                    needVal = false;
                            }
                        } catch (GridCacheEntryRemovedException ex) {
                            assert false : "Entry cannot become obsolete when DHT local candidate is added " + "[e=" + e + ", ex=" + ex + ']';
                        }
                        // Skip entry if it is not new and is not present in updated mapping.
                        if (tx != null && !needVal)
                            continue;
                        boolean invalidateRdr = e.readerId(n.id()) != null;
                        req.addDhtKey(e.key(), invalidateRdr, cctx);
                        if (needVal) {
                            // Mark last added key as needed to be preloaded.
                            req.markLastKeyForPreload();
                            if (tx != null) {
                                IgniteTxEntry txEntry = tx.entry(e.txKey());
                                // NOOP entries will be sent to backups on prepare step.
                                if (txEntry.op() == GridCacheOperation.READ)
                                    txEntry.op(GridCacheOperation.NOOP);
                            }
                        }
                        it.set(addOwned(req, e));
                    }
                    if (!F.isEmpty(req.keys())) {
                        if (tx != null)
                            tx.addLockTransactionNode(n);
                        // Append new future.
                        add(fut);
                        cctx.io().send(n, req, cctx.ioPolicy());
                        if (msgLog.isDebugEnabled()) {
                            msgLog.debug("DHT lock fut, sent request [txId=" + nearLockVer + ", dhtTxId=" + lockVer + ", inTx=" + inTx() + ", nodeId=" + n.id() + ']');
                        }
                    }
                } catch (IgniteCheckedException e) {
                    // Fail the whole thing.
                    if (e instanceof ClusterTopologyCheckedException)
                        fut.onResult();
                    else {
                        if (msgLog.isDebugEnabled()) {
                            msgLog.debug("DHT lock fut, failed to send request [txId=" + nearLockVer + ", dhtTxId=" + lockVer + ", inTx=" + inTx() + ", node=" + n.id() + ", err=" + e + ']');
                        }
                        fut.onResult(e);
                    }
                }
            }
        }
    } finally {
        markInitialized();
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteTxEntry(org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) LinkedHashMap(java.util.LinkedHashMap) GridCacheMvccCandidate(org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 62 with ClusterTopologyCheckedException

use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.

the class GridDhtTransactionalCacheAdapter method processDhtLockRequest0.

/**
 * @param nodeId Node ID.
 * @param req Request.
 */
private void processDhtLockRequest0(UUID nodeId, GridDhtLockRequest req) {
    assert nodeId != null;
    assert req != null;
    assert !nodeId.equals(locNodeId);
    int cnt = F.size(req.keys());
    GridDhtLockResponse res;
    GridDhtTxRemote dhtTx = null;
    GridNearTxRemote nearTx = null;
    boolean fail = false;
    boolean cancelled = false;
    try {
        res = new GridDhtLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), cnt, ctx.deploymentEnabled());
        dhtTx = startRemoteTx(nodeId, req, res);
        nearTx = isNearEnabled(cacheCfg) ? near().startRemoteTx(nodeId, req) : null;
        if (nearTx != null && !nearTx.empty())
            res.nearEvicted(nearTx.evicted());
        else {
            if (!F.isEmpty(req.nearKeys())) {
                Collection<IgniteTxKey> nearEvicted = new ArrayList<>(req.nearKeys().size());
                nearEvicted.addAll(F.viewReadOnly(req.nearKeys(), new C1<KeyCacheObject, IgniteTxKey>() {

                    @Override
                    public IgniteTxKey apply(KeyCacheObject k) {
                        return ctx.txKey(k);
                    }
                }));
                res.nearEvicted(nearEvicted);
            }
        }
    } catch (IgniteTxRollbackCheckedException e) {
        String err = "Failed processing DHT lock request (transaction has been completed): " + req;
        U.error(log, err, e);
        res = new GridDhtLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), new IgniteTxRollbackCheckedException(err, e), ctx.deploymentEnabled());
        fail = true;
    } catch (IgniteCheckedException e) {
        String err = "Failed processing DHT lock request: " + req;
        U.error(log, err, e);
        res = new GridDhtLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), new IgniteCheckedException(err, e), ctx.deploymentEnabled());
        fail = true;
    } catch (GridDistributedLockCancelledException ignored) {
        // Received lock request for cancelled lock.
        if (log.isDebugEnabled())
            log.debug("Received lock request for canceled lock (will ignore): " + req);
        res = null;
        fail = true;
        cancelled = true;
    }
    boolean releaseAll = false;
    if (res != null) {
        try {
            // Reply back to sender.
            ctx.io().send(nodeId, res, ctx.ioPolicy());
            if (txLockMsgLog.isDebugEnabled()) {
                txLockMsgLog.debug("Sent dht lock response [txId=" + req.nearXidVersion() + ", dhtTxId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']');
            }
        } catch (ClusterTopologyCheckedException ignored) {
            U.warn(txLockMsgLog, "Failed to send dht lock response, node failed [" + "txId=" + req.nearXidVersion() + ", dhtTxId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']');
            fail = true;
            releaseAll = true;
        } catch (IgniteCheckedException e) {
            U.error(txLockMsgLog, "Failed to send dht lock response (lock will not be acquired) " + "txId=" + req.nearXidVersion() + ", dhtTxId=" + req.version() + ", inTx=" + req.inTx() + ", node=" + nodeId + ']', e);
            fail = true;
        }
    }
    if (fail) {
        if (dhtTx != null)
            dhtTx.rollbackRemoteTx();
        if (// Even though this should never happen, we leave this check for consistency.
        nearTx != null)
            nearTx.rollbackRemoteTx();
        List<KeyCacheObject> keys = req.keys();
        if (keys != null) {
            for (KeyCacheObject key : keys) {
                while (true) {
                    GridDistributedCacheEntry entry = peekExx(key);
                    try {
                        if (entry != null) {
                            // Release all locks because sender node left grid.
                            if (releaseAll)
                                entry.removeExplicitNodeLocks(req.nodeId());
                            else
                                entry.removeLock(req.version());
                        }
                        break;
                    } catch (GridCacheEntryRemovedException ignore) {
                        if (log.isDebugEnabled())
                            log.debug("Attempted to remove lock on removed entity during during failure " + "handling for dht lock request (will retry): " + entry);
                    }
                }
            }
        }
        if (releaseAll && !cancelled)
            U.warn(log, "Sender node left grid in the midst of lock acquisition (locks have been released).");
    }
}
Also used : GridDistributedCacheEntry(org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry) GridDistributedLockCancelledException(org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException) ArrayList(java.util.ArrayList) IgniteTxRollbackCheckedException(org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException) GridNearTxRemote(org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxRemote) C1(org.apache.ignite.internal.util.typedef.C1) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) IgniteTxKey(org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 63 with ClusterTopologyCheckedException

use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.

the class GridDhtTransactionalCacheAdapter method removeLocks.

/**
 * @param nodeId Node ID.
 * @param ver Version.
 * @param keys Keys.
 * @param unmap Flag for un-mapping version.
 */
public void removeLocks(UUID nodeId, GridCacheVersion ver, Iterable<KeyCacheObject> keys, boolean unmap) {
    assert nodeId != null;
    assert ver != null;
    if (F.isEmpty(keys))
        return;
    // Remove mapped versions.
    GridCacheVersion dhtVer = unmap ? ctx.mvcc().unmapVersion(ver) : ver;
    Map<ClusterNode, List<KeyCacheObject>> dhtMap = new HashMap<>();
    Map<ClusterNode, List<KeyCacheObject>> nearMap = new HashMap<>();
    GridCacheVersion obsoleteVer = null;
    for (KeyCacheObject key : keys) {
        while (true) {
            boolean created = false;
            GridDhtCacheEntry entry = peekExx(key);
            if (entry == null) {
                entry = entryExx(key);
                created = true;
            }
            try {
                GridCacheMvccCandidate cand = null;
                if (dhtVer == null) {
                    cand = entry.localCandidateByNearVersion(ver, true);
                    if (cand != null)
                        dhtVer = cand.version();
                    else {
                        if (log.isDebugEnabled())
                            log.debug("Failed to locate lock candidate based on dht or near versions [nodeId=" + nodeId + ", ver=" + ver + ", unmap=" + unmap + ", keys=" + keys + ']');
                        entry.removeLock(ver);
                        if (created) {
                            if (obsoleteVer == null)
                                obsoleteVer = ctx.versions().next();
                            if (entry.markObsolete(obsoleteVer))
                                removeEntry(entry);
                        }
                        break;
                    }
                }
                if (cand == null)
                    cand = entry.candidate(dhtVer);
                AffinityTopologyVersion topVer = cand == null ? AffinityTopologyVersion.NONE : cand.topologyVersion();
                // Note that we obtain readers before lock is removed.
                // Even in case if entry would be removed just after lock is removed,
                // we must send release messages to backups and readers.
                Collection<UUID> readers = entry.readers();
                // we are about to remove.
                if (entry.removeLock(dhtVer)) {
                    // Map to backups and near readers.
                    map(nodeId, topVer, entry, readers, dhtMap, nearMap);
                    if (log.isDebugEnabled())
                        log.debug("Removed lock [lockId=" + ver + ", key=" + key + ']');
                } else if (log.isDebugEnabled())
                    log.debug("Received unlock request for unknown candidate " + "(added to cancelled locks set) [ver=" + ver + ", entry=" + entry + ']');
                if (created && entry.markObsolete(dhtVer))
                    removeEntry(entry);
                ctx.evicts().touch(entry, topVer);
                break;
            } catch (GridCacheEntryRemovedException ignored) {
                if (log.isDebugEnabled())
                    log.debug("Received remove lock request for removed entry (will retry): " + entry);
            } catch (IgniteCheckedException e) {
                U.error(log, "Failed to remove locks for keys: " + keys, e);
            }
        }
    }
    IgnitePair<Collection<GridCacheVersion>> versPair = ctx.tm().versions(ver);
    Collection<GridCacheVersion> committed = versPair.get1();
    Collection<GridCacheVersion> rolledback = versPair.get2();
    // Backups.
    for (Map.Entry<ClusterNode, List<KeyCacheObject>> entry : dhtMap.entrySet()) {
        ClusterNode n = entry.getKey();
        List<KeyCacheObject> keyBytes = entry.getValue();
        GridDhtUnlockRequest req = new GridDhtUnlockRequest(ctx.cacheId(), keyBytes.size(), ctx.deploymentEnabled());
        req.version(dhtVer);
        try {
            for (KeyCacheObject key : keyBytes) req.addKey(key, ctx);
            keyBytes = nearMap.get(n);
            if (keyBytes != null)
                for (KeyCacheObject key : keyBytes) req.addNearKey(key);
            req.completedVersions(committed, rolledback);
            ctx.io().send(n, req, ctx.ioPolicy());
        } catch (ClusterTopologyCheckedException ignore) {
            if (log.isDebugEnabled())
                log.debug("Node left while sending unlock request: " + n);
        } catch (IgniteCheckedException e) {
            U.error(log, "Failed to send unlock request to node (will make best effort to complete): " + n, e);
        }
    }
    // Readers.
    for (Map.Entry<ClusterNode, List<KeyCacheObject>> entry : nearMap.entrySet()) {
        ClusterNode n = entry.getKey();
        if (!dhtMap.containsKey(n)) {
            List<KeyCacheObject> keyBytes = entry.getValue();
            GridDhtUnlockRequest req = new GridDhtUnlockRequest(ctx.cacheId(), keyBytes.size(), ctx.deploymentEnabled());
            req.version(dhtVer);
            try {
                for (KeyCacheObject key : keyBytes) req.addNearKey(key);
                req.completedVersions(committed, rolledback);
                ctx.io().send(n, req, ctx.ioPolicy());
            } catch (ClusterTopologyCheckedException ignore) {
                if (log.isDebugEnabled())
                    log.debug("Node left while sending unlock request: " + n);
            } catch (IgniteCheckedException e) {
                U.error(log, "Failed to send unlock request to node (will make best effort to complete): " + n, e);
            }
        }
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) HashMap(java.util.HashMap) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) Collection(java.util.Collection) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) UUID(java.util.UUID) Map(java.util.Map) GridCacheConcurrentMap(org.apache.ignite.internal.processors.cache.GridCacheConcurrentMap) HashMap(java.util.HashMap) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheMvccCandidate(org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 64 with ClusterTopologyCheckedException

use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.

the class GridDhtTxFinishFuture method rollbackLockTransactions.

/**
 * @param nodes Nodes.
 * @return {@code True} in case there is at least one synchronous {@code MiniFuture} to wait for.
 */
private boolean rollbackLockTransactions(Collection<ClusterNode> nodes) {
    assert !F.isEmpty(nodes);
    if (tx.onePhaseCommit())
        return false;
    boolean sync = tx.syncMode() == FULL_SYNC;
    if (tx.explicitLock())
        sync = true;
    boolean res = false;
    int miniId = 0;
    for (ClusterNode n : nodes) {
        assert !n.isLocal();
        MiniFuture fut = new MiniFuture(++miniId, n);
        // Append new future.
        add(fut);
        GridDhtTxFinishRequest req = new GridDhtTxFinishRequest(tx.nearNodeId(), futId, fut.futureId(), tx.topologyVersion(), tx.xidVersion(), tx.commitVersion(), tx.threadId(), tx.isolation(), false, tx.isInvalidate(), tx.system(), tx.ioPolicy(), tx.isSystemInvalidate(), sync ? FULL_SYNC : tx.syncMode(), tx.completedBase(), tx.committedVersions(), tx.rolledbackVersions(), tx.pendingVersions(), tx.size(), tx.subjectId(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), false, false);
        try {
            cctx.io().send(n, req, tx.ioPolicy());
            if (msgLog.isDebugEnabled()) {
                msgLog.debug("DHT finish fut, sent request lock tx [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ']');
            }
            if (sync)
                res = true;
            else
                fut.onDone();
        } catch (IgniteCheckedException e) {
            // Fail the whole thing.
            if (e instanceof ClusterTopologyCheckedException)
                fut.onNodeLeft((ClusterTopologyCheckedException) e);
            else {
                if (msgLog.isDebugEnabled()) {
                    msgLog.debug("DHT finish fut, failed to send request lock tx [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ", err=" + e + ']');
                }
                fut.onResult(e);
            }
        }
    }
    return res;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 65 with ClusterTopologyCheckedException

use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.

the class GridDhtAssignmentFetchFuture method requestFromNextNode.

/**
 * Requests affinity from next node in the list.
 */
private void requestFromNextNode() {
    boolean complete;
    // Avoid 'protected field is accessed in synchronized context' warning.
    IgniteLogger log0 = log;
    synchronized (this) {
        while (!availableNodes.isEmpty()) {
            ClusterNode node = availableNodes.poll();
            try {
                if (log0.isDebugEnabled())
                    log0.debug("Sending affinity fetch request to remote node [locNodeId=" + ctx.localNodeId() + ", node=" + node + ']');
                ctx.io().send(node, new GridDhtAffinityAssignmentRequest(id, grpId, topVer, needPartState), AFFINITY_POOL);
                // Close window for listener notification.
                if (ctx.discovery().node(node.id()) == null) {
                    U.warn(log0, "Failed to request affinity assignment from remote node (node left grid, will " + "continue to another node): " + node);
                    continue;
                }
                pendingNode = node;
                break;
            } catch (ClusterTopologyCheckedException ignored) {
                U.warn(log0, "Failed to request affinity assignment from remote node (node left grid, will " + "continue to another node): " + node);
            } catch (IgniteCheckedException e) {
                if (ctx.discovery().reconnectSupported() && X.hasCause(e, IOException.class)) {
                    onDone(new IgniteNeedReconnectException(ctx.localNode(), e));
                    return;
                }
                U.warn(log0, "Failed to request affinity assignment from remote node (will " + "continue to another node): " + node);
            }
        }
        complete = pendingNode == null;
    }
    // Affinity should be calculated from scratch.
    if (complete)
        onDone((GridDhtAffinityAssignmentResponse) null);
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteLogger(org.apache.ignite.IgniteLogger) IgniteNeedReconnectException(org.apache.ignite.internal.IgniteNeedReconnectException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Aggregations

ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)79 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)59 ClusterNode (org.apache.ignite.cluster.ClusterNode)49 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)19 UUID (java.util.UUID)17 Map (java.util.Map)16 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)16 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)14 GridCacheEntryRemovedException (org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException)13 HashMap (java.util.HashMap)12 GridCacheVersion (org.apache.ignite.internal.processors.cache.version.GridCacheVersion)12 ArrayList (java.util.ArrayList)11 ClusterTopologyServerNotFoundException (org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException)10 Collection (java.util.Collection)8 IgniteException (org.apache.ignite.IgniteException)8 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)8 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)7 IgniteInterruptedCheckedException (org.apache.ignite.internal.IgniteInterruptedCheckedException)7 Event (org.apache.ignite.events.Event)6 IgniteClientDisconnectedCheckedException (org.apache.ignite.internal.IgniteClientDisconnectedCheckedException)6