Search in sources :

Example 16 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridDhtAffinityAssignmentResponse method nodes.

/**
     * @param disco Discovery manager.
     * @param assignmentIds Assignment node IDs.
     * @return Assignment nodes.
     */
private List<List<ClusterNode>> nodes(GridDiscoveryManager disco, List<List<UUID>> assignmentIds) {
    if (assignmentIds != null) {
        List<List<ClusterNode>> assignment = new ArrayList<>(assignmentIds.size());
        for (int i = 0; i < assignmentIds.size(); i++) {
            List<UUID> ids = assignmentIds.get(i);
            List<ClusterNode> nodes = new ArrayList<>(ids.size());
            for (int j = 0; j < ids.size(); j++) {
                ClusterNode node = disco.node(topVer, ids.get(j));
                assert node != null;
                nodes.add(node);
            }
            assignment.add(nodes);
        }
        return assignment;
    }
    return null;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) UUID(java.util.UUID)

Example 17 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridDhtPreloader method assign.

/** {@inheritDoc} */
@Override
public GridDhtPreloaderAssignments assign(GridDhtPartitionsExchangeFuture exchFut) {
    // No assignments for disabled preloader.
    GridDhtPartitionTopology top = cctx.dht().topology();
    if (!cctx.rebalanceEnabled() || !cctx.shared().kernalContext().state().active())
        return new GridDhtPreloaderAssignments(exchFut, top.topologyVersion());
    int partCnt = cctx.affinity().partitions();
    assert exchFut.forcePreload() || exchFut.dummyReassign() || exchFut.exchangeId().topologyVersion().equals(top.topologyVersion()) : "Topology version mismatch [exchId=" + exchFut.exchangeId() + ", cache=" + cctx.name() + ", topVer=" + top.topologyVersion() + ']';
    GridDhtPreloaderAssignments assigns = new GridDhtPreloaderAssignments(exchFut, top.topologyVersion());
    AffinityTopologyVersion topVer = assigns.topologyVersion();
    for (int p = 0; p < partCnt; p++) {
        if (cctx.shared().exchange().hasPendingExchange()) {
            if (log.isDebugEnabled())
                log.debug("Skipping assignments creation, exchange worker has pending assignments: " + exchFut.exchangeId());
            assigns.cancelled(true);
            return assigns;
        }
        // If partition belongs to local node.
        if (cctx.affinity().partitionLocalNode(p, topVer)) {
            GridDhtLocalPartition part = top.localPartition(p, topVer, true);
            assert part != null;
            assert part.id() == p;
            if (part.state() != MOVING) {
                if (log.isDebugEnabled())
                    log.debug("Skipping partition assignment (state is not MOVING): " + part);
                // For.
                continue;
            }
            Collection<ClusterNode> picked = pickedOwners(p, topVer);
            if (picked.isEmpty()) {
                top.own(part);
                if (cctx.events().isRecordable(EVT_CACHE_REBALANCE_PART_DATA_LOST)) {
                    DiscoveryEvent discoEvt = exchFut.discoveryEvent();
                    cctx.events().addPreloadEvent(p, EVT_CACHE_REBALANCE_PART_DATA_LOST, discoEvt.eventNode(), discoEvt.type(), discoEvt.timestamp());
                }
                if (log.isDebugEnabled())
                    log.debug("Owning partition as there are no other owners: " + part);
            } else {
                ClusterNode n = F.rand(picked);
                GridDhtPartitionDemandMessage msg = assigns.get(n);
                if (msg == null) {
                    assigns.put(n, msg = new GridDhtPartitionDemandMessage(top.updateSequence(), exchFut.exchangeId().topologyVersion(), cctx.cacheId()));
                }
                msg.addPartition(p);
            }
        }
    }
    return assigns;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) DiscoveryEvent(org.apache.ignite.events.DiscoveryEvent) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)

Example 18 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridDhtPreloader method processForceKeysRequest0.

/**
     * @param node Node originated request.
     * @param msg Force keys message.
     */
private void processForceKeysRequest0(ClusterNode node, GridDhtForceKeysRequest msg) {
    if (!enterBusy())
        return;
    try {
        ClusterNode loc = cctx.localNode();
        GridDhtForceKeysResponse res = new GridDhtForceKeysResponse(cctx.cacheId(), msg.futureId(), msg.miniId(), cctx.deploymentEnabled());
        for (KeyCacheObject k : msg.keys()) {
            int p = cctx.affinity().partition(k);
            GridDhtLocalPartition locPart = top.localPartition(p, AffinityTopologyVersion.NONE, false);
            // If this node is no longer an owner.
            if (locPart == null && !top.owners(p).contains(loc)) {
                res.addMissed(k);
                continue;
            }
            GridCacheEntryEx entry = null;
            while (true) {
                try {
                    entry = cctx.dht().entryEx(k);
                    entry.unswap();
                    GridCacheEntryInfo info = entry.info();
                    if (info == null) {
                        assert entry.obsolete() : entry;
                        continue;
                    }
                    if (!info.isNew())
                        res.addInfo(info);
                    cctx.evicts().touch(entry, msg.topologyVersion());
                    break;
                } catch (GridCacheEntryRemovedException ignore) {
                    if (log.isDebugEnabled())
                        log.debug("Got removed entry: " + k);
                } catch (GridDhtInvalidPartitionException ignore) {
                    if (log.isDebugEnabled())
                        log.debug("Local node is no longer an owner: " + p);
                    res.addMissed(k);
                    break;
                }
            }
        }
        if (log.isDebugEnabled())
            log.debug("Sending force key response [node=" + node.id() + ", res=" + res + ']');
        cctx.io().send(node, res, cctx.ioPolicy());
    } catch (ClusterTopologyCheckedException ignore) {
        if (log.isDebugEnabled())
            log.debug("Received force key request form failed node (will ignore) [nodeId=" + node.id() + ", req=" + msg + ']');
    } catch (IgniteCheckedException e) {
        U.error(log, "Failed to reply to force key request [nodeId=" + node.id() + ", req=" + msg + ']', e);
    } finally {
        leaveBusy();
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridCacheEntryInfo(org.apache.ignite.internal.processors.cache.GridCacheEntryInfo) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) GridCacheEntryEx(org.apache.ignite.internal.processors.cache.GridCacheEntryEx) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 19 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GroupTrainerBaseProcessorTask method map.

/**
 * {@inheritDoc}
 */
@Nullable
@Override
public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, @Nullable Void arg) throws IgniteException {
    Map<ComputeJob, ClusterNode> res = new HashMap<>();
    for (ClusterNode node : subgrid) {
        BaseLocalProcessorJob<K, V, T, R> job = createJob();
        res.put(job, node);
    }
    return res;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) ComputeJob(org.apache.ignite.compute.ComputeJob) HashMap(java.util.HashMap) Nullable(org.jetbrains.annotations.Nullable)

Example 20 with ClusterNode

use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.

the class GridReduceQueryExecutor method update.

/**
 * @param schemaName Schema name.
 * @param cacheIds Cache ids.
 * @param selectQry Select query.
 * @param params SQL parameters.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param pageSize Page size.
 * @param timeoutMillis Timeout.
 * @param parts Partitions.
 * @param isReplicatedOnly Whether query uses only replicated caches.
 * @param cancel Cancel state.
 * @return Update result, or {@code null} when some map node doesn't support distributed DML.
 */
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
    AffinityTopologyVersion topVer = h2.readyTopologyVersion();
    NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
    final long reqId = qryIdGen.incrementAndGet();
    final GridRunningQueryInfo qryInfo = new GridRunningQueryInfo(reqId, selectQry, GridCacheQueryType.SQL_FIELDS, schemaName, U.currentTimeMillis(), cancel, false);
    Collection<ClusterNode> nodes = nodesParts.nodes();
    if (nodes == null)
        throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
    if (isReplicatedOnly) {
        ClusterNode locNode = ctx.discovery().localNode();
        if (nodes.contains(locNode))
            nodes = singletonList(locNode);
        else
            nodes = singletonList(F.rand(nodes));
    }
    for (ClusterNode n : nodes) {
        if (!n.version().greaterThanEqual(2, 3, 0)) {
            log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
            return null;
        }
    }
    final DistributedUpdateRun r = new DistributedUpdateRun(nodes.size(), qryInfo);
    int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
    if (isReplicatedOnly)
        flags |= GridH2QueryRequest.FLAG_REPLICATED;
    GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).flags(flags);
    updRuns.put(reqId, r);
    boolean release = false;
    try {
        Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
        ExplicitPartitionsSpecializer partsSpec = (parts == null) ? null : new ExplicitPartitionsSpecializer(partsMap);
        final Collection<ClusterNode> finalNodes = nodes;
        cancel.set(new Runnable() {

            @Override
            public void run() {
                r.future().onCancelled();
                send(finalNodes, new GridQueryCancelRequest(reqId), null, false);
            }
        });
        // send() logs the debug message
        if (send(nodes, req, partsSpec, false))
            return r.future().get();
        throw new CacheException("Failed to send update request to participating nodes.");
    } catch (IgniteCheckedException | RuntimeException e) {
        release = true;
        U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
        throw new CacheException("Failed to run update. " + e.getMessage(), e);
    } finally {
        if (release)
            send(nodes, new GridQueryCancelRequest(reqId), null, false);
        if (!updRuns.remove(reqId, r))
            U.warn(log, "Update run was already removed: " + reqId);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridH2DmlRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest) GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) CacheException(javax.cache.CacheException) GridRunningQueryInfo(org.apache.ignite.internal.processors.query.GridRunningQueryInfo) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IntArray(org.h2.util.IntArray)

Aggregations

ClusterNode (org.apache.ignite.cluster.ClusterNode)738 UUID (java.util.UUID)185 ArrayList (java.util.ArrayList)180 Ignite (org.apache.ignite.Ignite)151 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)150 HashMap (java.util.HashMap)104 Map (java.util.Map)89 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)86 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)80 List (java.util.List)79 IgniteException (org.apache.ignite.IgniteException)68 Collection (java.util.Collection)67 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)58 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)52 HashSet (java.util.HashSet)39 CountDownLatch (java.util.concurrent.CountDownLatch)38 Event (org.apache.ignite.events.Event)38 DiscoveryEvent (org.apache.ignite.events.DiscoveryEvent)36 IgniteKernal (org.apache.ignite.internal.IgniteKernal)35 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)34