Search in sources :

Example 1 with GridCacheMvccCandidate

use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.

the class GridDhtPartitionsExchangeFuture method waitPartitionRelease.

/**
 * The main purpose of this method is to wait for all ongoing updates (transactional and atomic), initiated on
 * the previous topology version, to finish to prevent inconsistencies during rebalancing and to prevent two
 * different simultaneous owners of the same lock.
 * Also, this method can be used to wait for tx recovery only in case of PME-free switch.
 *
 * @param latchId Distributed latch Id.
 * @param distributed If {@code true} then node should wait for partition release completion on all other nodes.
 * @param doRollback If {@code true} tries to rollback transactions which lock partitions. Avoids unnecessary calls
 *      of {@link org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager#rollbackOnTopologyChange}
 *
 * @throws IgniteCheckedException If failed.
 */
private void waitPartitionRelease(String latchId, boolean distributed, boolean doRollback) throws IgniteCheckedException {
    Latch releaseLatch = null;
    IgniteInternalFuture<?> partReleaseFut;
    cctx.exchange().exchangerBlockingSectionBegin();
    try {
        // Wait for other nodes only on first phase.
        if (distributed)
            releaseLatch = cctx.exchange().latch().getOrCreate(latchId, initialVersion());
        partReleaseFut = context().exchangeFreeSwitch() && isBaselineNodeFailed() ? cctx.partitionRecoveryFuture(initialVersion(), firstDiscoEvt.eventNode()) : cctx.partitionReleaseFuture(initialVersion());
        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;
    } finally {
        cctx.exchange().exchangerBlockingSectionEnd();
    }
    if (log.isTraceEnabled())
        log.trace("Before waiting for partition release future: " + this);
    int dumpCnt = 0;
    long nextDumpTime = 0;
    IgniteConfiguration cfg = cctx.gridConfig();
    long waitStartNanos = System.nanoTime();
    long waitTimeout = 2 * cfg.getNetworkTimeout();
    boolean txRolledBack = !doRollback;
    while (true) {
        // Read txTimeoutOnPME from configuration after every iteration.
        long curTimeout = cfg.getTransactionConfiguration().getTxTimeoutOnPartitionMapExchange();
        cctx.exchange().exchangerBlockingSectionBegin();
        try {
            // This avoids unnecessary waiting for rollback.
            partReleaseFut.get(curTimeout > 0 && !txRolledBack ? Math.min(curTimeout, waitTimeout) : waitTimeout, TimeUnit.MILLISECONDS);
            break;
        } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            if (nextDumpTime <= U.currentTimeMillis()) {
                dumpPendingObjects(partReleaseFut, curTimeout <= 0 && !txRolledBack);
                nextDumpTime = U.currentTimeMillis() + nextDumpTimeout(dumpCnt++, waitTimeout);
            }
            long passedMillis = U.millisSinceNanos(waitStartNanos);
            if (!txRolledBack && curTimeout > 0 && passedMillis >= curTimeout) {
                txRolledBack = true;
                cctx.tm().rollbackOnTopologyChange(initialVersion());
            }
        } catch (IgniteCheckedException e) {
            U.warn(log, "Unable to await partitions release future", e);
            throw e;
        } finally {
            cctx.exchange().exchangerBlockingSectionEnd();
        }
    }
    long waitEndNanos = System.nanoTime();
    if (log.isInfoEnabled()) {
        long waitTime = U.nanosToMillis(waitEndNanos - waitStartNanos);
        String futInfo = RELEASE_FUTURE_DUMP_THRESHOLD > 0 && waitTime > RELEASE_FUTURE_DUMP_THRESHOLD ? partReleaseFut.toString() : "NA";
        String mode = distributed ? "DISTRIBUTED" : "LOCAL";
        if (log.isInfoEnabled())
            log.info("Finished waiting for partition release future [topVer=" + exchangeId().topologyVersion() + ", waitTime=" + waitTime + "ms, futInfo=" + futInfo + ", mode=" + mode + "]");
    }
    if (!context().exchangeFreeSwitch()) {
        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());
        nextDumpTime = 0;
        dumpCnt = 0;
        while (true) {
            cctx.exchange().exchangerBlockingSectionBegin();
            try {
                locksFut.get(50, TimeUnit.MILLISECONDS);
                break;
            } catch (IgniteFutureTimeoutCheckedException ignored) {
                if (nextDumpTime <= U.currentTimeMillis()) {
                    U.warn(log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId());
                    U.warn(log, "Locked keys:");
                    for (IgniteTxKey key : cctx.mvcc().lockedKeys()) U.warn(log, "Locked key: " + key);
                    for (IgniteTxKey key : cctx.mvcc().nearLockedKeys()) U.warn(log, "Locked near key: " + key);
                    Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion());
                    for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Awaited locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
                    nextDumpTime = U.currentTimeMillis() + nextDumpTimeout(dumpCnt++, waitTimeout);
                    if (getBoolean(IGNITE_THREAD_DUMP_ON_EXCHANGE_TIMEOUT, false))
                        U.dumpThreads(log);
                }
                // Sometimes FinishLockFuture is not rechecked causing frozen PME.
                // Will recheck every 50 milliseconds.
                cctx.mvcc().recheckPendingLocks();
            } finally {
                cctx.exchange().exchangerBlockingSectionEnd();
            }
        }
        timeBag.finishGlobalStage("Wait partitions release [latch=" + latchId + "]");
    }
    if (releaseLatch == null) {
        assert !distributed : "Partitions release latch must be initialized in distributed mode.";
        return;
    }
    releaseLatch.countDown();
    // For compatibility with old version where joining nodes are not waiting for latch.
    if (localJoinExchange() && !cctx.exchange().latch().canSkipJoiningNodes(initialVersion()))
        return;
    try {
        String troubleshootingHint;
        if (crd.isLocal())
            troubleshootingHint = "Some nodes have not sent acknowledgement for latch completion. " + "It's possible due to unfinishined atomic updates, transactions " + "or not released explicit locks on that nodes. " + "Please check logs for errors on nodes with ids reported in latch `pendingAcks` collection";
        else
            troubleshootingHint = "For more details please check coordinator node logs [crdNode=" + crd.toString() + "]";
        while (true) {
            try {
                cctx.exchange().exchangerBlockingSectionBegin();
                try {
                    releaseLatch.await(waitTimeout, TimeUnit.MILLISECONDS);
                } finally {
                    cctx.exchange().exchangerBlockingSectionEnd();
                }
                if (log.isInfoEnabled())
                    log.info("Finished waiting for partitions release latch: " + releaseLatch);
                break;
            } catch (IgniteFutureTimeoutCheckedException ignored) {
                U.warn(log, "Unable to await partitions release latch within timeout. " + troubleshootingHint + " [latch=" + releaseLatch + "]");
                // Try to resend ack.
                releaseLatch.countDown();
            }
        }
    } catch (IgniteCheckedException e) {
        U.warn(log, "Stop waiting for partitions release latch: " + e.getMessage());
    }
    timeBag.finishGlobalStage("Wait partitions release latch [latch=" + latchId + "]");
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteConfiguration(org.apache.ignite.configuration.IgniteConfiguration) Latch(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.latch.Latch) CountDownLatch(java.util.concurrent.CountDownLatch) IgniteFutureTimeoutCheckedException(org.apache.ignite.internal.IgniteFutureTimeoutCheckedException) IgniteTxKey(org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) GridCacheMvccCandidate(org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate)

Example 2 with GridCacheMvccCandidate

use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.

the class IgniteTxLocalAdapter method updateExplicitVersion.

/**
 * Updates explicit version for tx entry based on current entry lock owner.
 *
 * @param txEntry Tx entry to update.
 * @param entry Entry.
 * @throws GridCacheEntryRemovedException If entry was concurrently removed.
 */
protected void updateExplicitVersion(IgniteTxEntry txEntry, GridCacheEntryEx entry) throws GridCacheEntryRemovedException {
    if (!entry.context().isDht()) {
        // All put operations must wait for async locks to complete,
        // so it is safe to get acquired locks.
        GridCacheMvccCandidate explicitCand = entry.localOwner();
        if (explicitCand == null)
            explicitCand = cctx.mvcc().explicitLock(threadId(), entry.txKey());
        if (explicitCand != null) {
            GridCacheVersion explicitVer = explicitCand.version();
            boolean locCand = false;
            if (explicitCand.nearLocal() || explicitCand.local())
                locCand = cctx.localNodeId().equals(explicitCand.nodeId());
            else if (explicitCand.dhtLocal())
                locCand = cctx.localNodeId().equals(explicitCand.otherNodeId());
            if (!explicitVer.equals(xidVer) && explicitCand.isHeldByThread(threadId) && !explicitCand.tx() && locCand) {
                txEntry.explicitVersion(explicitVer);
                if (explicitVer.isLess(minVer))
                    minVer = explicitVer;
            }
        }
    }
}
Also used : GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) GridCacheMvccCandidate(org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate)

Example 3 with GridCacheMvccCandidate

use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.

the class GridDhtColocatedLockFuture method map0.

/**
 * @param keys Keys to map.
 * @param remap Remap flag.
 * @param topLocked Topology locked flag.
 * @throws IgniteCheckedException If mapping failed.
 */
private synchronized void map0(Collection<KeyCacheObject> keys, boolean remap, boolean topLocked) throws IgniteCheckedException {
    try {
        AffinityTopologyVersion topVer = this.topVer;
        assert topVer != null;
        assert topVer.topologyVersion() > 0;
        if (CU.affinityNodes(cctx, topVer).isEmpty()) {
            onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid): " + cctx.name()));
            return;
        }
        boolean clientNode = cctx.kernalContext().clientNode();
        assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
        // First assume this node is primary for all keys passed in.
        if (!clientNode && mapAsPrimary(keys, topVer))
            return;
        mappings = new ArrayDeque<>();
        // Assign keys to primary nodes.
        GridNearLockMapping map = null;
        for (KeyCacheObject key : keys) {
            GridNearLockMapping updated = map(key, map, topVer);
            // If new mapping was created, add to collection.
            if (updated != map) {
                mappings.add(updated);
                if (tx != null && updated.node().isLocal())
                    tx.colocatedLocallyMapped(true);
            }
            map = updated;
        }
        if (isDone()) {
            if (log.isDebugEnabled())
                log.debug("Abandoning (re)map because future is done: " + this);
            return;
        }
        if (log.isDebugEnabled())
            log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
        boolean hasRmtNodes = false;
        boolean first = true;
        // Create mini futures.
        for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
            GridNearLockMapping mapping = iter.next();
            ClusterNode node = mapping.node();
            Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
            boolean loc = node.equals(cctx.localNode());
            assert !mappedKeys.isEmpty();
            GridNearLockRequest req = null;
            Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
            for (KeyCacheObject key : mappedKeys) {
                IgniteTxKey txKey = cctx.txKey(key);
                GridDistributedCacheEntry entry = null;
                if (tx != null) {
                    IgniteTxEntry txEntry = tx.entry(txKey);
                    if (txEntry != null) {
                        entry = (GridDistributedCacheEntry) txEntry.cached();
                        if (entry != null && loc == entry.detached()) {
                            entry = cctx.colocated().entryExx(key, topVer, true);
                            txEntry.cached(entry);
                        }
                    }
                }
                boolean explicit;
                while (true) {
                    try {
                        if (entry == null)
                            entry = cctx.colocated().entryExx(key, topVer, true);
                        if (!cctx.isAll(entry, filter)) {
                            if (log.isDebugEnabled())
                                log.debug("Entry being locked did not pass filter (will not lock): " + entry);
                            onComplete(false, false);
                            return;
                        }
                        assert loc ^ entry.detached() : "Invalid entry [loc=" + loc + ", entry=" + entry + ']';
                        GridCacheMvccCandidate cand = addEntry(entry);
                        // Will either return value from dht cache or null if this is a miss.
                        IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.detached() ? null : ((GridDhtCacheEntry) entry).versionedValue(topVer);
                        GridCacheVersion dhtVer = null;
                        if (val != null) {
                            dhtVer = val.get1();
                            valMap.put(key, val);
                        }
                        if (cand != null && !cand.reentry()) {
                            if (req == null) {
                                boolean clientFirst = false;
                                if (first) {
                                    clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
                                    first = false;
                                }
                                assert !implicitTx() && !implicitSingleTx() : tx;
                                req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, false, cctx.deploymentEnabled(), inTx() ? tx.label() : null);
                                mapping.request(req);
                            }
                            distributedKeys.add(key);
                            if (tx != null)
                                tx.addKeyMapping(txKey, mapping.node());
                            req.addKeyBytes(key, retval, // Include DHT version to match remote DHT entry.
                            dhtVer, cctx);
                        }
                        explicit = inTx() && cand == null;
                        if (explicit)
                            tx.addKeyMapping(txKey, mapping.node());
                        break;
                    } catch (GridCacheEntryRemovedException ignored) {
                        if (log.isDebugEnabled())
                            log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
                        entry = null;
                    }
                }
                // Mark mapping explicit lock flag.
                if (explicit) {
                    boolean marked = tx != null && tx.markExplicit(node.id());
                    assert tx == null || marked;
                }
            }
            if (!distributedKeys.isEmpty()) {
                mapping.distributedKeys(distributedKeys);
                hasRmtNodes |= !mapping.node().isLocal();
            } else {
                assert mapping.request() == null;
                iter.remove();
            }
        }
    } finally {
        /**
         * Notify ready {@link mappings} waiters. See {@link #cancel()}
         */
        if (tx != null) {
            mappingsReady = true;
            notifyAll();
        }
    }
    proceedMapping();
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteTxEntry(org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry) GridDistributedCacheEntry(org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry) GridNearLockMapping(org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockMapping) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridNearLockRequest(org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockRequest) ArrayList(java.util.ArrayList) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) ClusterTopologyServerNotFoundException(org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) IgniteTxKey(org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheMvccCandidate(org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate)

Example 4 with GridCacheMvccCandidate

use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.

the class GridDhtColocatedCache method unlockAll.

/**
 * {@inheritDoc}
 */
@Override
public void unlockAll(Collection<? extends K> keys) {
    if (keys.isEmpty())
        return;
    try {
        GridCacheVersion ver = null;
        int keyCnt = -1;
        Map<ClusterNode, GridNearUnlockRequest> map = null;
        Collection<KeyCacheObject> locKeys = new ArrayList<>();
        for (K key : keys) {
            KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
            IgniteTxKey txKey = ctx.txKey(cacheKey);
            GridDistributedCacheEntry entry = peekExx(cacheKey);
            GridCacheMvccCandidate lock = ctx.mvcc().removeExplicitLock(Thread.currentThread().getId(), txKey, null);
            if (lock != null) {
                final AffinityTopologyVersion topVer = lock.topologyVersion();
                assert topVer.compareTo(AffinityTopologyVersion.ZERO) > 0;
                // Send request to remove from remote nodes.
                ClusterNode primary = ctx.affinity().primaryByKey(key, topVer);
                if (primary == null) {
                    if (log.isDebugEnabled())
                        log.debug("Failed to unlock keys (all partition nodes left the grid).");
                    continue;
                }
                if (map == null) {
                    Collection<ClusterNode> affNodes = CU.affinityNodes(ctx, topVer);
                    keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());
                    map = U.newHashMap(affNodes.size());
                }
                if (ver == null)
                    ver = lock.version();
                if (!lock.reentry()) {
                    if (!ver.equals(lock.version()))
                        throw new IgniteCheckedException("Failed to unlock (if keys were locked separately, " + "then they need to be unlocked separately): " + keys);
                    if (!primary.isLocal()) {
                        GridNearUnlockRequest req = map.get(primary);
                        if (req == null) {
                            map.put(primary, req = new GridNearUnlockRequest(ctx.cacheId(), keyCnt, ctx.deploymentEnabled()));
                            req.version(ver);
                        }
                        KeyCacheObject key0 = entry != null ? entry.key() : cacheKey;
                        req.addKey(key0, ctx);
                    } else
                        locKeys.add(cacheKey);
                    if (log.isDebugEnabled())
                        log.debug("Removed lock (will distribute): " + lock);
                } else if (log.isDebugEnabled())
                    log.debug("Current thread still owns lock (or there are no other nodes)" + " [lock=" + lock + ", curThreadId=" + Thread.currentThread().getId() + ']');
            }
        }
        if (ver == null)
            return;
        if (!locKeys.isEmpty())
            removeLocks(ctx.localNodeId(), ver, locKeys, true);
        for (Map.Entry<ClusterNode, GridNearUnlockRequest> mapping : map.entrySet()) {
            ClusterNode n = mapping.getKey();
            GridDistributedUnlockRequest req = mapping.getValue();
            assert !n.isLocal();
            if (!F.isEmpty(req.keys())) {
                try {
                    // We don't wait for reply to this message.
                    ctx.io().send(n, req, ctx.ioPolicy());
                } catch (ClusterTopologyCheckedException e) {
                    if (log.isDebugEnabled())
                        log.debug("Failed to send unlock request (node has left the grid) [keys=" + req.keys() + ", n=" + n + ", e=" + e + ']');
                } catch (IgniteCheckedException e) {
                    U.error(log, "Failed to send unlock request [keys=" + req.keys() + ", n=" + n + ']', e);
                }
            }
        }
    } catch (IgniteCheckedException ex) {
        U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDistributedCacheEntry(org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) ArrayList(java.util.ArrayList) GridNearUnlockRequest(org.apache.ignite.internal.processors.cache.distributed.near.GridNearUnlockRequest) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridDistributedUnlockRequest(org.apache.ignite.internal.processors.cache.distributed.GridDistributedUnlockRequest) IgniteTxKey(org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey) Map(java.util.Map) GridCacheConcurrentMap(org.apache.ignite.internal.processors.cache.GridCacheConcurrentMap) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheMvccCandidate(org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 5 with GridCacheMvccCandidate

use of org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate in project ignite by apache.

the class ContentionClosure method call.

/**
 * {@inheritDoc}
 */
@Override
public ContentionInfo call() throws Exception {
    final IgniteTxManager tm = ignite.context().cache().context().tm();
    final Collection<IgniteInternalTx> activeTxs = tm.activeTransactions();
    ContentionInfo ci = new ContentionInfo();
    ci.setNode(ignite.localNode());
    ci.setEntries(new ArrayList<>());
    for (IgniteInternalTx tx : activeTxs) {
        if (ci.getEntries().size() == maxPrint)
            break;
        // Show only primary txs.
        if (tx.local()) {
            IgniteTxLocalAdapter tx0 = (IgniteTxLocalAdapter) tx;
            final IgniteTxLocalState state0 = tx0.txState();
            if (!(state0 instanceof IgniteTxStateImpl))
                continue;
            final IgniteTxStateImpl state = (IgniteTxStateImpl) state0;
            final Collection<IgniteTxEntry> entries = state.allEntriesCopy();
            IgniteTxEntry bad = null;
            int qSize = 0;
            for (IgniteTxEntry entry : entries) {
                Collection<GridCacheMvccCandidate> locs;
                GridCacheEntryEx cached = entry.cached();
                while (true) {
                    try {
                        locs = cached.localCandidates();
                        break;
                    } catch (GridCacheEntryRemovedException ignored) {
                        cached = entry.context().cache().entryEx(entry.key());
                    }
                }
                if (locs != null)
                    qSize += locs.size();
                final Collection<GridCacheMvccCandidate> rmts = cached.remoteMvccSnapshot();
                if (rmts != null)
                    qSize += rmts.size();
                if (qSize >= minQueueSize) {
                    bad = entry;
                    break;
                } else
                    qSize = 0;
            }
            if (bad != null) {
                StringBuilder b = new StringBuilder();
                b.append("TxEntry [cacheId=").append(bad.cacheId()).append(", key=").append(bad.key()).append(", queue=").append(qSize).append(", op=").append(bad.op()).append(", val=").append(bad.value()).append(", tx=").append(CU.txString(tx)).append(", other=[");
                final IgniteTxState st = tx.txState();
                if (st instanceof IgniteTxStateImpl) {
                    IgniteTxStateImpl st0 = (IgniteTxStateImpl) st;
                    final Collection<IgniteTxEntry> cp = st0.allEntriesCopy();
                    for (IgniteTxEntry entry : cp) {
                        if (entry == bad)
                            continue;
                        b.append(entry.toString()).append('\n');
                    }
                }
                b.append("]]");
                ci.getEntries().add(b.toString());
            }
        }
    }
    return ci;
}
Also used : IgniteTxEntry(org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry) IgniteTxStateImpl(org.apache.ignite.internal.processors.cache.transactions.IgniteTxStateImpl) IgniteTxManager(org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager) IgniteTxState(org.apache.ignite.internal.processors.cache.transactions.IgniteTxState) GridCacheEntryEx(org.apache.ignite.internal.processors.cache.GridCacheEntryEx) IgniteInternalTx(org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx) IgniteTxLocalAdapter(org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalAdapter) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) IgniteTxLocalState(org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalState) GridCacheMvccCandidate(org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate)

Aggregations

GridCacheMvccCandidate (org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate)41 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)17 GridCacheVersion (org.apache.ignite.internal.processors.cache.version.GridCacheVersion)15 Nullable (org.jetbrains.annotations.Nullable)14 GridCacheMvcc (org.apache.ignite.internal.processors.cache.GridCacheMvcc)13 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)11 ClusterNode (org.apache.ignite.cluster.ClusterNode)11 CacheObject (org.apache.ignite.internal.processors.cache.CacheObject)11 GridCacheEntryRemovedException (org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException)10 ArrayList (java.util.ArrayList)9 Map (java.util.Map)9 CacheLockCandidates (org.apache.ignite.internal.processors.cache.CacheLockCandidates)9 IgniteTxEntry (org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry)8 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)6 IgniteTxKey (org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey)6 UUID (java.util.UUID)5 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)5 GridCacheEntryEx (org.apache.ignite.internal.processors.cache.GridCacheEntryEx)5 GridDistributedCacheEntry (org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry)5 Collection (java.util.Collection)4