Search in sources :

Example 1 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridClusterStateProcessor method start.

/** {@inheritDoc} */
@Override
public void start(boolean activeOnStart) throws IgniteCheckedException {
    super.start(activeOnStart);
    globalState = activeOnStart ? ACTIVE : INACTIVE;
    cacheProc = ctx.cache();
    sharedCtx = cacheProc.context();
    sharedCtx.io().addHandler(0, GridChangeGlobalStateMessageResponse.class, new CI2<UUID, GridChangeGlobalStateMessageResponse>() {

        @Override
        public void apply(UUID nodeId, GridChangeGlobalStateMessageResponse msg) {
            processChangeGlobalStateResponse(nodeId, msg);
        }
    });
    ctx.discovery().setCustomEventListener(ChangeGlobalStateMessage.class, new CustomEventListener<ChangeGlobalStateMessage>() {

        @Override
        public void onCustomEvent(AffinityTopologyVersion topVer, ClusterNode snd, ChangeGlobalStateMessage msg) {
            assert topVer != null;
            assert snd != null;
            assert msg != null;
            boolean activate = msg.activate();
            ChangeGlobalStateContext actx = lastCgsCtx;
            if (actx != null && globalState == TRANSITION) {
                GridChangeGlobalStateFuture f = cgsLocFut.get();
                if (log.isDebugEnabled())
                    log.debug("Concurrent " + prettyStr(activate) + " [id=" + ctx.localNodeId() + " topVer=" + topVer + " actx=" + actx + ", msg=" + msg + "]");
                if (f != null && f.requestId.equals(msg.requestId()))
                    f.onDone(new IgniteCheckedException("Concurrent change state, now in progress=" + (activate) + ", initiatingNodeId=" + actx.initiatingNodeId + ", you try=" + (prettyStr(activate)) + ", locNodeId=" + ctx.localNodeId()));
                msg.concurrentChangeState();
            } else {
                if (log.isInfoEnabled())
                    log.info("Create " + prettyStr(activate) + " context [id=" + ctx.localNodeId() + " topVer=" + topVer + ", reqId=" + msg.requestId() + ", initiatingNodeId=" + msg.initiatorNodeId() + "]");
                lastCgsCtx = new ChangeGlobalStateContext(msg.requestId(), msg.initiatorNodeId(), msg.getDynamicCacheChangeBatch(), msg.activate());
                globalState = TRANSITION;
            }
        }
    });
    ctx.event().addLocalEventListener(lsr, EVT_NODE_LEFT, EVT_NODE_FAILED);
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) ChangeGlobalStateMessage(org.apache.ignite.internal.processors.cache.ChangeGlobalStateMessage) UUID(java.util.UUID) GridChangeGlobalStateMessageResponse(org.apache.ignite.internal.processors.cache.GridChangeGlobalStateMessageResponse)

Example 2 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class TcpCommunicationSpi method safeHandshake.

/**
     * Performs handshake in timeout-safe way.
     *
     * @param client Client.
     * @param recovery Recovery descriptor if use recovery handshake, otherwise {@code null}.
     * @param rmtNodeId Remote node.
     * @param timeout Timeout for handshake.
     * @param sslMeta Session meta.
     * @param handshakeConnIdx Non null connection index if need send it in handshake.
     * @throws IgniteCheckedException If handshake failed or wasn't completed withing timeout.
     * @return Handshake response.
     */
@SuppressWarnings("ThrowFromFinallyBlock")
private <T> long safeHandshake(T client, @Nullable GridNioRecoveryDescriptor recovery, UUID rmtNodeId, long timeout, GridSslMeta sslMeta, @Nullable Integer handshakeConnIdx) throws IgniteCheckedException {
    HandshakeTimeoutObject<T> obj = new HandshakeTimeoutObject<>(client, U.currentTimeMillis() + timeout);
    addTimeoutObject(obj);
    long rcvCnt = 0;
    try {
        if (client instanceof GridCommunicationClient)
            ((GridCommunicationClient) client).doHandshake(new HandshakeClosure(rmtNodeId));
        else {
            SocketChannel ch = (SocketChannel) client;
            boolean success = false;
            try {
                BlockingSslHandler sslHnd = null;
                ByteBuffer buf;
                if (isSslEnabled()) {
                    assert sslMeta != null;
                    sslHnd = new BlockingSslHandler(sslMeta.sslEngine(), ch, directBuf, ByteOrder.nativeOrder(), log);
                    if (!sslHnd.handshake())
                        throw new IgniteCheckedException("SSL handshake is not completed.");
                    ByteBuffer handBuff = sslHnd.applicationBuffer();
                    if (handBuff.remaining() < NodeIdMessage.MESSAGE_FULL_SIZE) {
                        buf = ByteBuffer.allocate(1000);
                        int read = ch.read(buf);
                        if (read == -1)
                            throw new IgniteCheckedException("Failed to read remote node ID (connection closed).");
                        buf.flip();
                        buf = sslHnd.decode(buf);
                    } else
                        buf = handBuff;
                } else {
                    buf = ByteBuffer.allocate(NodeIdMessage.MESSAGE_FULL_SIZE);
                    for (int i = 0; i < NodeIdMessage.MESSAGE_FULL_SIZE; ) {
                        int read = ch.read(buf);
                        if (read == -1)
                            throw new IgniteCheckedException("Failed to read remote node ID (connection closed).");
                        i += read;
                    }
                }
                UUID rmtNodeId0 = U.bytesToUuid(buf.array(), Message.DIRECT_TYPE_SIZE);
                if (!rmtNodeId.equals(rmtNodeId0))
                    throw new IgniteCheckedException("Remote node ID is not as expected [expected=" + rmtNodeId + ", rcvd=" + rmtNodeId0 + ']');
                else if (log.isDebugEnabled())
                    log.debug("Received remote node ID: " + rmtNodeId0);
                if (isSslEnabled()) {
                    assert sslHnd != null;
                    ch.write(sslHnd.encrypt(ByteBuffer.wrap(U.IGNITE_HEADER)));
                } else
                    ch.write(ByteBuffer.wrap(U.IGNITE_HEADER));
                ClusterNode locNode = getLocalNode();
                if (locNode == null)
                    throw new IgniteCheckedException("Local node has not been started or " + "fully initialized [isStopping=" + getSpiContext().isStopping() + ']');
                if (recovery != null) {
                    HandshakeMessage msg;
                    int msgSize = HandshakeMessage.MESSAGE_FULL_SIZE;
                    if (handshakeConnIdx != null) {
                        msg = new HandshakeMessage2(locNode.id(), recovery.incrementConnectCount(), recovery.received(), handshakeConnIdx);
                        msgSize += 4;
                    } else {
                        msg = new HandshakeMessage(locNode.id(), recovery.incrementConnectCount(), recovery.received());
                    }
                    if (log.isDebugEnabled())
                        log.debug("Writing handshake message [locNodeId=" + locNode.id() + ", rmtNode=" + rmtNodeId + ", msg=" + msg + ']');
                    buf = ByteBuffer.allocate(msgSize);
                    buf.order(ByteOrder.nativeOrder());
                    boolean written = msg.writeTo(buf, null);
                    assert written;
                    buf.flip();
                    if (isSslEnabled()) {
                        assert sslHnd != null;
                        ch.write(sslHnd.encrypt(buf));
                    } else
                        ch.write(buf);
                } else {
                    if (isSslEnabled()) {
                        assert sslHnd != null;
                        ch.write(sslHnd.encrypt(ByteBuffer.wrap(nodeIdMessage().nodeIdBytesWithType)));
                    } else
                        ch.write(ByteBuffer.wrap(nodeIdMessage().nodeIdBytesWithType));
                }
                if (recovery != null) {
                    if (log.isDebugEnabled())
                        log.debug("Waiting for handshake [rmtNode=" + rmtNodeId + ']');
                    if (isSslEnabled()) {
                        assert sslHnd != null;
                        buf = ByteBuffer.allocate(1000);
                        buf.order(ByteOrder.nativeOrder());
                        ByteBuffer decode = ByteBuffer.allocate(2 * buf.capacity());
                        decode.order(ByteOrder.nativeOrder());
                        for (int i = 0; i < RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE; ) {
                            int read = ch.read(buf);
                            if (read == -1)
                                throw new IgniteCheckedException("Failed to read remote node recovery handshake " + "(connection closed).");
                            buf.flip();
                            ByteBuffer decode0 = sslHnd.decode(buf);
                            i += decode0.remaining();
                            decode = appendAndResizeIfNeeded(decode, decode0);
                            buf.clear();
                        }
                        decode.flip();
                        rcvCnt = decode.getLong(Message.DIRECT_TYPE_SIZE);
                        if (decode.limit() > RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE) {
                            decode.position(RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE);
                            sslMeta.decodedBuffer(decode);
                        }
                        ByteBuffer inBuf = sslHnd.inputBuffer();
                        if (inBuf.position() > 0)
                            sslMeta.encodedBuffer(inBuf);
                    } else {
                        buf = ByteBuffer.allocate(RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE);
                        buf.order(ByteOrder.nativeOrder());
                        for (int i = 0; i < RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE; ) {
                            int read = ch.read(buf);
                            if (read == -1)
                                throw new IgniteCheckedException("Failed to read remote node recovery handshake " + "(connection closed).");
                            i += read;
                        }
                        rcvCnt = buf.getLong(Message.DIRECT_TYPE_SIZE);
                    }
                    if (log.isDebugEnabled())
                        log.debug("Received handshake message [rmtNode=" + rmtNodeId + ", rcvCnt=" + rcvCnt + ']');
                    if (rcvCnt == -1) {
                        if (log.isDebugEnabled())
                            log.debug("Connection rejected, will retry client creation [rmtNode=" + rmtNodeId + ']');
                    } else
                        success = true;
                } else
                    success = true;
            } catch (IOException e) {
                if (log.isDebugEnabled())
                    log.debug("Failed to read from channel: " + e);
                throw new IgniteCheckedException("Failed to read from channel.", e);
            } finally {
                if (!success)
                    U.closeQuiet(ch);
            }
        }
    } finally {
        boolean cancelled = obj.cancel();
        if (cancelled)
            removeTimeoutObject(obj);
        // Ignoring whatever happened after timeout - reporting only timeout event.
        if (!cancelled)
            throw new HandshakeTimeoutException("Failed to perform handshake due to timeout (consider increasing " + "'connectionTimeout' configuration property).");
    }
    return rcvCnt;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) SocketChannel(java.nio.channels.SocketChannel) BlockingSslHandler(org.apache.ignite.internal.util.nio.ssl.BlockingSslHandler) IOException(java.io.IOException) GridCommunicationClient(org.apache.ignite.internal.util.nio.GridCommunicationClient) ByteBuffer(java.nio.ByteBuffer) IpcEndpoint(org.apache.ignite.internal.util.ipc.IpcEndpoint) IpcSharedMemoryServerEndpoint(org.apache.ignite.internal.util.ipc.shmem.IpcSharedMemoryServerEndpoint) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) EVT_NODE_LEFT(org.apache.ignite.events.EventType.EVT_NODE_LEFT) LT(org.apache.ignite.internal.util.typedef.internal.LT) UUID(java.util.UUID)

Example 3 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class TxOptimisticDeadlockDetectionTest method doTestDeadlock.

/**
     * @throws Exception If failed.
     */
private void doTestDeadlock(final int txCnt, final boolean loc, boolean lockPrimaryFirst, final boolean clientTx, final IgniteClosure<Integer, Object> transformer) throws Exception {
    log.info(">>> Test deadlock [txCnt=" + txCnt + ", loc=" + loc + ", lockPrimaryFirst=" + lockPrimaryFirst + ", clientTx=" + clientTx + ", transformer=" + transformer.getClass().getName() + ']');
    TestCommunicationSpi.init(txCnt);
    final AtomicInteger threadCnt = new AtomicInteger();
    final CyclicBarrier barrier = new CyclicBarrier(txCnt);
    final AtomicReference<TransactionDeadlockException> deadlockErr = new AtomicReference<>();
    final List<List<Integer>> keySets = generateKeys(txCnt, loc, !lockPrimaryFirst);
    final Set<Integer> involvedKeys = new GridConcurrentHashSet<>();
    final Set<Integer> involvedLockedKeys = new GridConcurrentHashSet<>();
    final Set<IgniteInternalTx> involvedTxs = new GridConcurrentHashSet<>();
    IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {

        @Override
        public void run() {
            int threadNum = threadCnt.incrementAndGet();
            Ignite ignite = loc ? ignite(0) : ignite(clientTx ? threadNum - 1 + txCnt : threadNum - 1);
            IgniteCache<Object, Integer> cache = ignite.cache(CACHE_NAME);
            List<Integer> keys = keySets.get(threadNum - 1);
            int txTimeout = 500 + txCnt * 100;
            try (Transaction tx = ignite.transactions().txStart(OPTIMISTIC, REPEATABLE_READ, txTimeout, 0)) {
                IgniteInternalTx tx0 = ((TransactionProxyImpl) tx).tx();
                involvedTxs.add(tx0);
                Integer key = keys.get(0);
                involvedKeys.add(key);
                Object k;
                log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode().id() + ", tx=" + tx.xid() + ", key=" + transformer.apply(key) + ']');
                cache.put(transformer.apply(key), 0);
                involvedLockedKeys.add(key);
                barrier.await();
                key = keys.get(1);
                ClusterNode primaryNode = ((IgniteCacheProxy) cache).context().affinity().primaryByKey(key, NONE);
                List<Integer> primaryKeys = primaryKeys(grid(primaryNode).cache(CACHE_NAME), 5, key + (100 * threadNum));
                Map<Object, Integer> entries = new HashMap<>();
                involvedKeys.add(key);
                entries.put(transformer.apply(key), 0);
                for (Integer i : primaryKeys) {
                    involvedKeys.add(i);
                    entries.put(transformer.apply(i), 1);
                    k = transformer.apply(i + 13);
                    involvedKeys.add(i + 13);
                    entries.put(k, 2);
                }
                log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode().id() + ", tx=" + tx.xid() + ", entries=" + entries + ']');
                cache.putAll(entries);
                tx.commit();
            } catch (Throwable e) {
                log.info("Expected exception: " + e);
                e.printStackTrace(System.out);
                // At least one stack trace should contain TransactionDeadlockException.
                if (hasCause(e, TransactionTimeoutException.class) && hasCause(e, TransactionDeadlockException.class)) {
                    if (deadlockErr.compareAndSet(null, cause(e, TransactionDeadlockException.class))) {
                        log.info("At least one stack trace should contain " + TransactionDeadlockException.class.getSimpleName());
                        e.printStackTrace(System.out);
                    }
                }
            }
        }
    }, loc ? 2 : txCnt, "tx-thread");
    try {
        fut.get();
    } catch (IgniteCheckedException e) {
        U.error(null, "Unexpected exception", e);
        fail();
    }
    U.sleep(1000);
    TransactionDeadlockException deadlockE = deadlockErr.get();
    assertNotNull("Failed to detect deadlock", deadlockE);
    boolean fail = false;
    // Check transactions, futures and entry locks state.
    for (int i = 0; i < NODES_CNT * 2; i++) {
        Ignite ignite = ignite(i);
        int cacheId = ((IgniteCacheProxy) ignite.cache(CACHE_NAME)).context().cacheId();
        GridCacheSharedContext<Object, Object> cctx = ((IgniteKernal) ignite).context().cache().context();
        IgniteTxManager txMgr = cctx.tm();
        Collection<IgniteInternalTx> activeTxs = txMgr.activeTransactions();
        for (IgniteInternalTx tx : activeTxs) {
            Collection<IgniteTxEntry> entries = tx.allEntries();
            for (IgniteTxEntry entry : entries) {
                if (entry.cacheId() == cacheId) {
                    fail = true;
                    U.error(log, "Transaction still exists: " + "\n" + tx.xidVersion() + "\n" + tx.nearXidVersion() + "\n nodeId=" + cctx.localNodeId() + "\n tx=" + tx);
                }
            }
        }
        Collection<IgniteInternalFuture<?>> futs = txMgr.deadlockDetectionFutures();
        assertTrue(futs.isEmpty());
        GridCacheAdapter<Object, Integer> intCache = internalCache(i, CACHE_NAME);
        GridCacheConcurrentMap map = intCache.map();
        for (Integer key : involvedKeys) {
            Object key0 = transformer.apply(key);
            KeyCacheObject keyCacheObj = intCache.context().toCacheKeyObject(key0);
            GridCacheMapEntry entry = map.getEntry(keyCacheObj);
            if (entry != null)
                assertNull("Entry still has locks " + entry, entry.mvccAllLocal());
        }
    }
    if (fail)
        fail("Some transactions still exist");
    // Check deadlock report
    String msg = deadlockE.getMessage();
    for (IgniteInternalTx tx : involvedTxs) assertTrue(msg.contains("[txId=" + tx.xidVersion() + ", nodeId=" + tx.nodeId() + ", threadId=" + tx.threadId() + ']'));
    for (Integer key : involvedKeys) {
        if (involvedLockedKeys.contains(key))
            assertTrue(msg.contains("[key=" + transformer.apply(key) + ", cache=" + CACHE_NAME + ']'));
        else
            assertFalse(msg.contains("[key=" + transformer.apply(key)));
    }
}
Also used : TransactionDeadlockException(org.apache.ignite.transactions.TransactionDeadlockException) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) GridConcurrentHashSet(org.apache.ignite.internal.util.GridConcurrentHashSet) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) List(java.util.List) ArrayList(java.util.ArrayList) Ignite(org.apache.ignite.Ignite) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteKernal(org.apache.ignite.internal.IgniteKernal) GridCacheConcurrentMap(org.apache.ignite.internal.processors.cache.GridCacheConcurrentMap) IgniteCache(org.apache.ignite.IgniteCache) AtomicReference(java.util.concurrent.atomic.AtomicReference) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Transaction(org.apache.ignite.transactions.Transaction) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TransactionTimeoutException(org.apache.ignite.transactions.TransactionTimeoutException) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheMapEntry(org.apache.ignite.internal.processors.cache.GridCacheMapEntry) Map(java.util.Map) GridCacheConcurrentMap(org.apache.ignite.internal.processors.cache.GridCacheConcurrentMap) HashMap(java.util.HashMap)

Example 4 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class TxPessimisticDeadlockDetectionTest method doTestDeadlock.

/**
     * @throws Exception If failed.
     */
private void doTestDeadlock(final int txCnt, final boolean loc, boolean lockPrimaryFirst, final boolean clientTx, final IgniteClosure<Integer, Object> transformer) throws Exception {
    log.info(">>> Test deadlock [txCnt=" + txCnt + ", loc=" + loc + ", lockPrimaryFirst=" + lockPrimaryFirst + ", clientTx=" + clientTx + ", transformer=" + transformer.getClass().getName() + ']');
    final AtomicInteger threadCnt = new AtomicInteger();
    final CyclicBarrier barrier = new CyclicBarrier(txCnt);
    final AtomicReference<TransactionDeadlockException> deadlockErr = new AtomicReference<>();
    final List<List<Integer>> keySets = generateKeys(txCnt, loc, !lockPrimaryFirst);
    final Set<Integer> involvedKeys = new GridConcurrentHashSet<>();
    final Set<Integer> involvedLockedKeys = new GridConcurrentHashSet<>();
    final Set<IgniteInternalTx> involvedTxs = new GridConcurrentHashSet<>();
    IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {

        @Override
        public void run() {
            int threadNum = threadCnt.incrementAndGet();
            Ignite ignite = loc ? ignite(0) : ignite(clientTx ? threadNum - 1 + txCnt : threadNum - 1);
            IgniteCache<Object, Integer> cache = ignite.cache(CACHE_NAME);
            List<Integer> keys = keySets.get(threadNum - 1);
            int txTimeout = 500 + txCnt * 100;
            try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, txTimeout, 0)) {
                involvedTxs.add(((TransactionProxyImpl) tx).tx());
                Integer key = keys.get(0);
                involvedKeys.add(key);
                Object k;
                log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode() + ", tx=" + tx + ", key=" + transformer.apply(key) + ']');
                cache.put(transformer.apply(key), 0);
                involvedLockedKeys.add(key);
                barrier.await();
                key = keys.get(1);
                ClusterNode primaryNode = ((IgniteCacheProxy) cache).context().affinity().primaryByKey(key, NONE);
                List<Integer> primaryKeys = primaryKeys(grid(primaryNode).cache(CACHE_NAME), 5, key + (100 * threadNum));
                Map<Object, Integer> entries = new HashMap<>();
                involvedKeys.add(key);
                entries.put(transformer.apply(key), 0);
                for (Integer i : primaryKeys) {
                    involvedKeys.add(i);
                    entries.put(transformer.apply(i), 1);
                    k = transformer.apply(i + 13);
                    involvedKeys.add(i + 13);
                    entries.put(k, 2);
                }
                log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode() + ", tx=" + tx + ", entries=" + entries + ']');
                cache.putAll(entries);
                tx.commit();
            } catch (Throwable e) {
                // At least one stack trace should contain TransactionDeadlockException.
                if (hasCause(e, TransactionTimeoutException.class) && hasCause(e, TransactionDeadlockException.class)) {
                    if (deadlockErr.compareAndSet(null, cause(e, TransactionDeadlockException.class)))
                        U.error(log, "At least one stack trace should contain " + TransactionDeadlockException.class.getSimpleName(), e);
                }
            }
        }
    }, loc ? 2 : txCnt, "tx-thread");
    try {
        fut.get();
    } catch (IgniteCheckedException e) {
        U.error(null, "Unexpected exception", e);
        fail();
    }
    U.sleep(1000);
    TransactionDeadlockException deadlockE = deadlockErr.get();
    assertNotNull(deadlockE);
    boolean fail = false;
    // Check transactions, futures and entry locks state.
    for (int i = 0; i < NODES_CNT * 2; i++) {
        Ignite ignite = ignite(i);
        int cacheId = ((IgniteCacheProxy) ignite.cache(CACHE_NAME)).context().cacheId();
        GridCacheSharedContext<Object, Object> cctx = ((IgniteKernal) ignite).context().cache().context();
        IgniteTxManager txMgr = cctx.tm();
        Collection<IgniteInternalTx> activeTxs = txMgr.activeTransactions();
        for (IgniteInternalTx tx : activeTxs) {
            Collection<IgniteTxEntry> entries = tx.allEntries();
            for (IgniteTxEntry entry : entries) {
                if (entry.cacheId() == cacheId) {
                    fail = true;
                    U.error(log, "Transaction still exists: " + "\n" + tx.xidVersion() + "\n" + tx.nearXidVersion() + "\n nodeId=" + cctx.localNodeId() + "\n tx=" + tx);
                }
            }
        }
        Collection<IgniteInternalFuture<?>> futs = txMgr.deadlockDetectionFutures();
        assertTrue(futs.isEmpty());
        GridCacheAdapter<Object, Integer> intCache = internalCache(i, CACHE_NAME);
        GridCacheConcurrentMap map = intCache.map();
        for (Integer key : involvedKeys) {
            Object key0 = transformer.apply(key);
            KeyCacheObject keyCacheObj = intCache.context().toCacheKeyObject(key0);
            GridCacheMapEntry entry = map.getEntry(keyCacheObj);
            if (entry != null)
                assertNull("Entry still has locks " + entry, entry.mvccAllLocal());
        }
    }
    if (fail)
        fail("Some transactions still exist");
    // Check deadlock report
    String msg = deadlockE.getMessage();
    for (IgniteInternalTx tx : involvedTxs) assertTrue(msg.contains("[txId=" + tx.xidVersion() + ", nodeId=" + tx.nodeId() + ", threadId=" + tx.threadId() + ']'));
    for (Integer key : involvedKeys) {
        if (involvedLockedKeys.contains(key))
            assertTrue(msg.contains("[key=" + transformer.apply(key) + ", cache=" + CACHE_NAME + ']'));
        else
            assertFalse(msg.contains("[key=" + transformer.apply(key)));
    }
}
Also used : TransactionDeadlockException(org.apache.ignite.transactions.TransactionDeadlockException) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) GridConcurrentHashSet(org.apache.ignite.internal.util.GridConcurrentHashSet) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) ArrayList(java.util.ArrayList) List(java.util.List) Ignite(org.apache.ignite.Ignite) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteKernal(org.apache.ignite.internal.IgniteKernal) GridCacheConcurrentMap(org.apache.ignite.internal.processors.cache.GridCacheConcurrentMap) IgniteCache(org.apache.ignite.IgniteCache) AtomicReference(java.util.concurrent.atomic.AtomicReference) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Transaction(org.apache.ignite.transactions.Transaction) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TransactionTimeoutException(org.apache.ignite.transactions.TransactionTimeoutException) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheMapEntry(org.apache.ignite.internal.processors.cache.GridCacheMapEntry) HashMap(java.util.HashMap) Map(java.util.Map) GridCacheConcurrentMap(org.apache.ignite.internal.processors.cache.GridCacheConcurrentMap)

Example 5 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class CacheUtils method foreach.

/**
     * @param cacheName Cache name.
     * @param fun An operation that accepts a cache entry and processes it.
     * @param keyFilter Cache keys filter.
     * @param <K> Cache key object type.
     * @param <V> Cache value object type.
     */
public static <K, V> void foreach(String cacheName, IgniteConsumer<CacheEntry<K, V>> fun, IgnitePredicate<K> keyFilter) {
    bcast(cacheName, () -> {
        Ignite ignite = Ignition.localIgnite();
        IgniteCache<K, V> cache = ignite.getOrCreateCache(cacheName);
        int partsCnt = ignite.affinity(cacheName).partitions();
        // Use affinity in filter for scan query. Otherwise we accept consumer in each node which is wrong.
        Affinity affinity = ignite.affinity(cacheName);
        ClusterNode locNode = ignite.cluster().localNode();
        // Iterate over all partitions. Some of them will be stored on that local node.
        for (int part = 0; part < partsCnt; part++) {
            int p = part;
            // Query returns an empty cursor if this partition is not stored on this node.
            for (Cache.Entry<K, V> entry : cache.query(new ScanQuery<K, V>(part, (k, v) -> affinity.mapPartitionToNode(p) == locNode && (keyFilter == null || keyFilter.apply(k))))) fun.accept(new CacheEntry<>(entry, cache));
        }
    });
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteConsumer(org.apache.ignite.ml.math.functions.IgniteConsumer) IgniteFunction(org.apache.ignite.ml.math.functions.IgniteFunction) Affinity(org.apache.ignite.cache.affinity.Affinity) SparseDistributedMatrix(org.apache.ignite.ml.math.impls.matrix.SparseDistributedMatrix) SparseDistributedMatrixStorage(org.apache.ignite.ml.math.impls.storage.matrix.SparseDistributedMatrixStorage) IgniteCallable(org.apache.ignite.lang.IgniteCallable) ClusterNode(org.apache.ignite.cluster.ClusterNode) IgnitePredicate(org.apache.ignite.lang.IgnitePredicate) ValueMapper(org.apache.ignite.ml.math.ValueMapper) Map(java.util.Map) Cache(javax.cache.Cache) ClusterGroup(org.apache.ignite.cluster.ClusterGroup) KeyMapper(org.apache.ignite.ml.math.KeyMapper) CacheEntryImpl(org.apache.ignite.internal.processors.cache.CacheEntryImpl) Collection(java.util.Collection) IgniteRunnable(org.apache.ignite.lang.IgniteRunnable) Ignite(org.apache.ignite.Ignite) BinaryOperator(java.util.function.BinaryOperator) IgniteCache(org.apache.ignite.IgniteCache) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) Ignition(org.apache.ignite.Ignition) IgniteBiFunction(org.apache.ignite.ml.math.functions.IgniteBiFunction) Collections(java.util.Collections) ScanQuery(org.apache.ignite.cache.query.ScanQuery) IgniteUuid(org.apache.ignite.lang.IgniteUuid) Affinity(org.apache.ignite.cache.affinity.Affinity) Ignite(org.apache.ignite.Ignite) Cache(javax.cache.Cache) IgniteCache(org.apache.ignite.IgniteCache)

Aggregations

ClusterNode (org.apache.ignite.cluster.ClusterNode)1104 UUID (java.util.UUID)281 ArrayList (java.util.ArrayList)280 Test (org.junit.Test)276 Ignite (org.apache.ignite.Ignite)239 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)239 HashMap (java.util.HashMap)184 Map (java.util.Map)182 List (java.util.List)165 IgniteException (org.apache.ignite.IgniteException)147 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)147 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)143 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)126 Collection (java.util.Collection)113 Message (org.apache.ignite.plugin.extensions.communication.Message)106 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)87 HashSet (java.util.HashSet)85 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)82 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)81 IgniteEx (org.apache.ignite.internal.IgniteEx)81