Search in sources :

Example 46 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridDhtPartitionTopologyImpl method nodes.

/**
 * {@inheritDoc}
 */
@Override
public List<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) {
    AffinityAssignment affAssignment = grp.affinity().cachedAffinity(topVer);
    List<ClusterNode> affNodes = affAssignment.get(p);
    List<ClusterNode> nodes = nodes0(p, affAssignment, affNodes);
    return nodes != null ? nodes : affNodes;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) AffinityAssignment(org.apache.ignite.internal.processors.affinity.AffinityAssignment)

Example 47 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class DataStreamerImpl method load0.

/**
 * @param entries Entries.
 * @param resFut Result future.
 * @param activeKeys Active keys.
 * @param remaps Remaps count.
 */
private void load0(Collection<? extends DataStreamerEntry> entries, final GridFutureAdapter<Object> resFut, @Nullable final Collection<KeyCacheObjectWrapper> activeKeys, final int remaps) {
    try {
        assert entries != null;
        final boolean remap = remaps > 0;
        if (!remap) {
            // Failed data should be processed prior to new data.
            acquireRemapSemaphore();
        }
        if (!isWarningPrinted) {
            synchronized (this) {
                if (!allowOverwrite() && !isWarningPrinted) {
                    U.warn(log, "Data streamer will not overwrite existing cache entries for better performance " + "(to change, set allowOverwrite to true)");
                }
                isWarningPrinted = true;
            }
        }
        Map<ClusterNode, Collection<DataStreamerEntry>> mappings = new HashMap<>();
        boolean initPda = ctx.deploy().enabled() && jobPda == null;
        GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
        if (cache == null)
            throw new IgniteCheckedException("Cache not created or already destroyed.");
        GridCacheContext cctx = cache.context();
        GridCacheGateway gate = null;
        AffinityTopologyVersion topVer;
        if (!cctx.isLocal())
            topVer = ctx.cache().context().exchange().lastTopologyFuture().get();
        else
            topVer = ctx.cache().context().exchange().readyAffinityVersion();
        List<List<ClusterNode>> assignments = cctx.affinity().assignments(topVer);
        if (!allowOverwrite() && !cctx.isLocal()) {
            // Cases where cctx required.
            gate = cctx.gate();
            gate.enter();
        }
        try {
            for (DataStreamerEntry entry : entries) {
                List<ClusterNode> nodes;
                try {
                    KeyCacheObject key = entry.getKey();
                    assert key != null;
                    if (initPda) {
                        if (cacheObjCtx.addDeploymentInfo())
                            jobPda = new DataStreamerPda(key.value(cacheObjCtx, false), entry.getValue() != null ? entry.getValue().value(cacheObjCtx, false) : null, rcvr);
                        else if (rcvr != null)
                            jobPda = new DataStreamerPda(rcvr);
                        initPda = false;
                    }
                    if (key.partition() == -1)
                        key.partition(cctx.affinity().partition(key, false));
                    nodes = nodes(key, topVer, cctx);
                } catch (IgniteCheckedException e) {
                    resFut.onDone(e);
                    return;
                }
                if (F.isEmpty(nodes)) {
                    resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
                    return;
                }
                for (ClusterNode node : nodes) {
                    Collection<DataStreamerEntry> col = mappings.get(node);
                    if (col == null)
                        mappings.put(node, col = new ArrayList<>());
                    col.add(entry);
                }
            }
            for (final Map.Entry<ClusterNode, Collection<DataStreamerEntry>> e : mappings.entrySet()) {
                final UUID nodeId = e.getKey().id();
                Buffer buf = bufMappings.get(nodeId);
                if (buf == null) {
                    Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));
                    if (old != null)
                        buf = old;
                }
                final Collection<DataStreamerEntry> entriesForNode = e.getValue();
                IgniteInClosure<IgniteInternalFuture<?>> lsnr = new IgniteInClosure<IgniteInternalFuture<?>>() {

                    @Override
                    public void apply(IgniteInternalFuture<?> t) {
                        try {
                            t.get();
                            if (activeKeys != null) {
                                for (DataStreamerEntry e : entriesForNode) activeKeys.remove(new KeyCacheObjectWrapper(e.getKey()));
                                if (activeKeys.isEmpty())
                                    resFut.onDone();
                            } else {
                                assert entriesForNode.size() == 1;
                                // That has been a single key,
                                // so complete result future right away.
                                resFut.onDone();
                            }
                        } catch (IgniteClientDisconnectedCheckedException e1) {
                            if (log.isDebugEnabled())
                                log.debug("Future finished with disconnect error [nodeId=" + nodeId + ", err=" + e1 + ']');
                            resFut.onDone(e1);
                        } catch (IgniteCheckedException e1) {
                            if (log.isDebugEnabled())
                                log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');
                            if (cancelled) {
                                resFut.onDone(new IgniteCheckedException("Data streamer has been cancelled: " + DataStreamerImpl.this, e1));
                            } else if (remaps + 1 > maxRemapCnt) {
                                resFut.onDone(new IgniteCheckedException("Failed to finish operation (too many remaps): " + remaps, e1));
                            } else {
                                try {
                                    remapSem.acquire();
                                    final Runnable r = new Runnable() {

                                        @Override
                                        public void run() {
                                            try {
                                                if (cancelled)
                                                    closedException();
                                                load0(entriesForNode, resFut, activeKeys, remaps + 1);
                                            } catch (Throwable ex) {
                                                resFut.onDone(new IgniteCheckedException("DataStreamer remapping failed. ", ex));
                                            } finally {
                                                remapSem.release();
                                            }
                                        }
                                    };
                                    dataToRemap.add(r);
                                    if (!remapOwning.get() && remapOwning.compareAndSet(false, true)) {
                                        ctx.closure().callLocalSafe(new GPC<Boolean>() {

                                            @Override
                                            public Boolean call() {
                                                boolean locked = true;
                                                while (locked || !dataToRemap.isEmpty()) {
                                                    if (!locked && !remapOwning.compareAndSet(false, true))
                                                        return false;
                                                    try {
                                                        Runnable r = dataToRemap.poll();
                                                        if (r != null)
                                                            r.run();
                                                    } finally {
                                                        if (!dataToRemap.isEmpty())
                                                            locked = true;
                                                        else {
                                                            remapOwning.set(false);
                                                            locked = false;
                                                        }
                                                    }
                                                }
                                                return true;
                                            }
                                        }, true);
                                    }
                                } catch (InterruptedException e2) {
                                    resFut.onDone(e2);
                                }
                            }
                        }
                    }
                };
                GridCompoundFuture opFut = new SilentCompoundFuture();
                opFut.listen(lsnr);
                final List<GridFutureAdapter<?>> futs;
                try {
                    futs = buf.update(entriesForNode, topVer, assignments, opFut, remap);
                    opFut.markInitialized();
                } catch (IgniteInterruptedCheckedException e1) {
                    resFut.onDone(e1);
                    return;
                }
                if (ctx.discovery().node(nodeId) == null) {
                    if (bufMappings.remove(nodeId, buf)) {
                        final Buffer buf0 = buf;
                        waitAffinityAndRun(new Runnable() {

                            @Override
                            public void run() {
                                buf0.onNodeLeft();
                                if (futs != null) {
                                    Throwable ex = new ClusterTopologyCheckedException("Failed to wait for request completion (node has left): " + nodeId);
                                    for (int i = 0; i < futs.size(); i++) futs.get(i).onDone(ex);
                                }
                            }
                        }, ctx.discovery().topologyVersion(), false);
                    }
                }
            }
        } finally {
            if (gate != null)
                gate.leave();
        }
    } catch (Exception ex) {
        resFut.onDone(new IgniteCheckedException("DataStreamer data loading failed.", ex));
    }
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) IgniteInterruptedException(org.apache.ignite.IgniteInterruptedException) GridCompoundFuture(org.apache.ignite.internal.util.future.GridCompoundFuture) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheAdapter(org.apache.ignite.internal.processors.cache.GridCacheAdapter) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) ArrayList(java.util.ArrayList) List(java.util.List) GridCacheGateway(org.apache.ignite.internal.processors.cache.GridCacheGateway) UUID(java.util.UUID) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) ClusterNode(org.apache.ignite.cluster.ClusterNode) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteDataStreamerTimeoutException(org.apache.ignite.IgniteDataStreamerTimeoutException) IgniteInterruptedException(org.apache.ignite.IgniteInterruptedException) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) ClusterTopologyServerNotFoundException(org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException) IgniteClientDisconnectedCheckedException(org.apache.ignite.internal.IgniteClientDisconnectedCheckedException) IgniteException(org.apache.ignite.IgniteException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) IgniteFutureTimeoutCheckedException(org.apache.ignite.internal.IgniteFutureTimeoutCheckedException) ClusterTopologyException(org.apache.ignite.cluster.ClusterTopologyException) CacheException(javax.cache.CacheException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) Collection(java.util.Collection) IgniteInClosure(org.apache.ignite.lang.IgniteInClosure) IgniteClientDisconnectedCheckedException(org.apache.ignite.internal.IgniteClientDisconnectedCheckedException) ClusterTopologyException(org.apache.ignite.cluster.ClusterTopologyException) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 48 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridClientPartitionTopology method updateLocal.

/**
 * Updates value for single partition.
 *
 * @param p Partition.
 * @param nodeId Node ID.
 * @param state State.
 * @param updateSeq Update sequence.
 */
private void updateLocal(int p, UUID nodeId, GridDhtPartitionState state, long updateSeq) {
    assert lock.isWriteLockedByCurrentThread();
    assert nodeId.equals(cctx.localNodeId());
    // In case if node joins, get topology at the time of joining node.
    ClusterNode oldest = discoCache.oldestAliveServerNode();
    // If this node became the oldest node.
    if (cctx.localNode().equals(oldest)) {
        long seq = node2part.updateSequence();
        if (seq != updateSeq) {
            if (seq > updateSeq) {
                if (this.updateSeq.get() < seq) {
                    // Update global counter if necessary.
                    boolean b = this.updateSeq.compareAndSet(this.updateSeq.get(), seq + 1);
                    assert b : "Invalid update sequence [updateSeq=" + updateSeq + ", seq=" + seq + ", curUpdateSeq=" + this.updateSeq.get() + ", node2part=" + node2part.toFullString() + ']';
                    updateSeq = seq + 1;
                } else
                    updateSeq = seq;
            }
            node2part.updateSequence(updateSeq);
        }
    }
    GridDhtPartitionMap map = node2part.get(nodeId);
    if (map == null)
        node2part.put(nodeId, map = new GridDhtPartitionMap(nodeId, updateSeq, topVer, GridPartitionStateMap.EMPTY, false));
    map.updateSequence(updateSeq, topVer);
    map.put(p, state);
    Set<UUID> ids = part2node.get(p);
    if (ids == null)
        part2node.put(p, ids = U.newHashSet(3));
    ids.add(nodeId);
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap) UUID(java.util.UUID)

Example 49 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class DataStructuresProcessor method compatibleCache.

/**
 * @param cfg Collection configuration.
 * @return Cache name.
 * @param grpName Group name.
 * @throws IgniteCheckedException If failed.
 */
@Nullable
private IgniteInternalCache compatibleCache(CollectionConfiguration cfg, String grpName) throws IgniteCheckedException {
    String cacheName = DS_CACHE_NAME_PREFIX + cfg.getAtomicityMode() + "_" + cfg.getCacheMode() + "_" + cfg.getBackups() + "@" + grpName;
    IgniteInternalCache cache = ctx.cache().cache(cacheName);
    if (cache == null) {
        ctx.cache().dynamicStartCache(cacheConfiguration(cfg, cacheName, grpName), cacheName, null, CacheType.DATA_STRUCTURES, false, false, true, true).get();
    } else {
        IgnitePredicate<ClusterNode> cacheNodeFilter = cache.context().group().nodeFilter();
        String clsName1 = cacheNodeFilter != null ? cacheNodeFilter.getClass().getName() : CacheConfiguration.IgniteAllNodesPredicate.class.getName();
        String clsName2 = cfg.getNodeFilter() != null ? cfg.getNodeFilter().getClass().getName() : CacheConfiguration.IgniteAllNodesPredicate.class.getName();
        if (!clsName1.equals(clsName2))
            throw new IgniteCheckedException("Could not add collection to group " + grpName + " because of different node filters [existing=" + clsName1 + ", new=" + clsName2 + "]");
    }
    cache = ctx.cache().getOrStartCache(cacheName);
    assert cache != null;
    return cache;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteInternalCache(org.apache.ignite.internal.processors.cache.IgniteInternalCache) Nullable(org.jetbrains.annotations.Nullable)

Example 50 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridClientPartitionTopology method beforeExchange0.

/**
 * @param loc Local node.
 * @param exchFut Exchange future.
 */
private void beforeExchange0(ClusterNode loc, GridDhtPartitionsExchangeFuture exchFut) {
    GridDhtPartitionExchangeId exchId = exchFut.exchangeId();
    if (exchFut.context().events().hasServerLeft()) {
        List<DiscoveryEvent> evts0 = exchFut.context().events().events();
        for (int i = 0; i < evts0.size(); i++) {
            DiscoveryEvent evt = evts0.get(i);
            if (ExchangeDiscoveryEvents.serverLeftEvent(evt))
                removeNode(evt.eventNode().id());
        }
    }
    // In case if node joins, get topology at the time of joining node.
    ClusterNode oldest = discoCache.oldestAliveServerNode();
    assert oldest != null;
    if (log.isDebugEnabled())
        log.debug("Partition map beforeExchange [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
    long updateSeq = this.updateSeq.incrementAndGet();
    // If this is the oldest node.
    if (oldest.id().equals(loc.id()) || exchFut.dynamicCacheGroupStarted(grpId)) {
        if (node2part == null) {
            node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq);
            if (log.isDebugEnabled())
                log.debug("Created brand new full topology map on oldest node [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
        } else if (!node2part.valid()) {
            node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq, node2part, false);
            if (log.isDebugEnabled())
                log.debug("Created new full topology map on oldest node [exchId=" + exchId + ", fullMap=" + node2part + ']');
        } else if (!node2part.nodeId().equals(loc.id())) {
            node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq, node2part, false);
            if (log.isDebugEnabled())
                log.debug("Copied old map into new map on oldest node (previous oldest node left) [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
        }
    }
    consistencyCheck();
    if (log.isDebugEnabled())
        log.debug("Partition map after beforeExchange [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) DiscoveryEvent(org.apache.ignite.events.DiscoveryEvent) GridDhtPartitionExchangeId(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionExchangeId) GridDhtPartitionFullMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap)

Aggregations

ClusterNode (org.apache.ignite.cluster.ClusterNode)1104 UUID (java.util.UUID)281 ArrayList (java.util.ArrayList)280 Test (org.junit.Test)276 Ignite (org.apache.ignite.Ignite)239 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)239 HashMap (java.util.HashMap)184 Map (java.util.Map)182 List (java.util.List)165 IgniteException (org.apache.ignite.IgniteException)147 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)147 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)143 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)126 Collection (java.util.Collection)113 Message (org.apache.ignite.plugin.extensions.communication.Message)106 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)87 HashSet (java.util.HashSet)85 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)82 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)81 IgniteEx (org.apache.ignite.internal.IgniteEx)81