Search in sources :

Example 1 with GridDhtLocalPartition

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.

the class GridCacheMapEntry method nextPartCounter.

/**
     * @param topVer Topology version.
     * @return Update counter.
     */
private long nextPartCounter(AffinityTopologyVersion topVer) {
    long updateCntr;
    if (!cctx.isLocal() && !isNear()) {
        GridDhtLocalPartition locPart = cctx.topology().localPartition(partition(), topVer, false);
        if (locPart == null)
            return 0;
        updateCntr = locPart.nextUpdateCounter();
    } else
        updateCntr = 0;
    return updateCntr;
}
Also used : GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)

Example 2 with GridDhtLocalPartition

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.

the class IgniteCacheOffheapManagerImpl method entriesCount.

/** {@inheritDoc} */
@Override
public long entriesCount(boolean primary, boolean backup, AffinityTopologyVersion topVer) throws IgniteCheckedException {
    if (cctx.isLocal())
        return entriesCount(0);
    else {
        ClusterNode locNode = cctx.localNode();
        long cnt = 0;
        for (GridDhtLocalPartition locPart : cctx.topology().currentLocalPartitions()) {
            if (primary) {
                if (cctx.affinity().primaryByPartition(locNode, locPart.id(), topVer)) {
                    cnt += locPart.dataStore().size();
                    continue;
                }
            }
            if (backup) {
                if (cctx.affinity().backupByPartition(locNode, locPart.id(), topVer))
                    cnt += locPart.dataStore().size();
            }
        }
        return cnt;
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)

Example 3 with GridDhtLocalPartition

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.

the class GridDhtPreloader method assign.

/** {@inheritDoc} */
@Override
public GridDhtPreloaderAssignments assign(GridDhtPartitionsExchangeFuture exchFut) {
    // No assignments for disabled preloader.
    GridDhtPartitionTopology top = cctx.dht().topology();
    if (!cctx.rebalanceEnabled() || !cctx.shared().kernalContext().state().active())
        return new GridDhtPreloaderAssignments(exchFut, top.topologyVersion());
    int partCnt = cctx.affinity().partitions();
    assert exchFut.forcePreload() || exchFut.dummyReassign() || exchFut.exchangeId().topologyVersion().equals(top.topologyVersion()) : "Topology version mismatch [exchId=" + exchFut.exchangeId() + ", cache=" + cctx.name() + ", topVer=" + top.topologyVersion() + ']';
    GridDhtPreloaderAssignments assigns = new GridDhtPreloaderAssignments(exchFut, top.topologyVersion());
    AffinityTopologyVersion topVer = assigns.topologyVersion();
    for (int p = 0; p < partCnt; p++) {
        if (cctx.shared().exchange().hasPendingExchange()) {
            if (log.isDebugEnabled())
                log.debug("Skipping assignments creation, exchange worker has pending assignments: " + exchFut.exchangeId());
            assigns.cancelled(true);
            return assigns;
        }
        // If partition belongs to local node.
        if (cctx.affinity().partitionLocalNode(p, topVer)) {
            GridDhtLocalPartition part = top.localPartition(p, topVer, true);
            assert part != null;
            assert part.id() == p;
            if (part.state() != MOVING) {
                if (log.isDebugEnabled())
                    log.debug("Skipping partition assignment (state is not MOVING): " + part);
                // For.
                continue;
            }
            Collection<ClusterNode> picked = pickedOwners(p, topVer);
            if (picked.isEmpty()) {
                top.own(part);
                if (cctx.events().isRecordable(EVT_CACHE_REBALANCE_PART_DATA_LOST)) {
                    DiscoveryEvent discoEvt = exchFut.discoveryEvent();
                    cctx.events().addPreloadEvent(p, EVT_CACHE_REBALANCE_PART_DATA_LOST, discoEvt.eventNode(), discoEvt.type(), discoEvt.timestamp());
                }
                if (log.isDebugEnabled())
                    log.debug("Owning partition as there are no other owners: " + part);
            } else {
                ClusterNode n = F.rand(picked);
                GridDhtPartitionDemandMessage msg = assigns.get(n);
                if (msg == null) {
                    assigns.put(n, msg = new GridDhtPartitionDemandMessage(top.updateSequence(), exchFut.exchangeId().topologyVersion(), cctx.cacheId()));
                }
                msg.addPartition(p);
            }
        }
    }
    return assigns;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) DiscoveryEvent(org.apache.ignite.events.DiscoveryEvent) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)

Example 4 with GridDhtLocalPartition

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.

the class GridDhtPreloader method processForceKeysRequest0.

/**
     * @param node Node originated request.
     * @param msg Force keys message.
     */
private void processForceKeysRequest0(ClusterNode node, GridDhtForceKeysRequest msg) {
    if (!enterBusy())
        return;
    try {
        ClusterNode loc = cctx.localNode();
        GridDhtForceKeysResponse res = new GridDhtForceKeysResponse(cctx.cacheId(), msg.futureId(), msg.miniId(), cctx.deploymentEnabled());
        for (KeyCacheObject k : msg.keys()) {
            int p = cctx.affinity().partition(k);
            GridDhtLocalPartition locPart = top.localPartition(p, AffinityTopologyVersion.NONE, false);
            // If this node is no longer an owner.
            if (locPart == null && !top.owners(p).contains(loc)) {
                res.addMissed(k);
                continue;
            }
            GridCacheEntryEx entry = null;
            while (true) {
                try {
                    entry = cctx.dht().entryEx(k);
                    entry.unswap();
                    GridCacheEntryInfo info = entry.info();
                    if (info == null) {
                        assert entry.obsolete() : entry;
                        continue;
                    }
                    if (!info.isNew())
                        res.addInfo(info);
                    cctx.evicts().touch(entry, msg.topologyVersion());
                    break;
                } catch (GridCacheEntryRemovedException ignore) {
                    if (log.isDebugEnabled())
                        log.debug("Got removed entry: " + k);
                } catch (GridDhtInvalidPartitionException ignore) {
                    if (log.isDebugEnabled())
                        log.debug("Local node is no longer an owner: " + p);
                    res.addMissed(k);
                    break;
                }
            }
        }
        if (log.isDebugEnabled())
            log.debug("Sending force key response [node=" + node.id() + ", res=" + res + ']');
        cctx.io().send(node, res, cctx.ioPolicy());
    } catch (ClusterTopologyCheckedException ignore) {
        if (log.isDebugEnabled())
            log.debug("Received force key request form failed node (will ignore) [nodeId=" + node.id() + ", req=" + msg + ']');
    } catch (IgniteCheckedException e) {
        U.error(log, "Failed to reply to force key request [nodeId=" + node.id() + ", req=" + msg + ']', e);
    } finally {
        leaveBusy();
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridCacheEntryInfo(org.apache.ignite.internal.processors.cache.GridCacheEntryInfo) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) GridCacheEntryEx(org.apache.ignite.internal.processors.cache.GridCacheEntryEx) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 5 with GridDhtLocalPartition

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.

the class GridDhtPartitionSupplier method handleDemandMessage.

/**
     * @param d Demand message.
     * @param idx Index.
     * @param id Node uuid.
     */
@SuppressWarnings("unchecked")
public void handleDemandMessage(int idx, UUID id, GridDhtPartitionDemandMessage d) {
    assert d != null;
    assert id != null;
    AffinityTopologyVersion cutTop = cctx.affinity().affinityTopologyVersion();
    AffinityTopologyVersion demTop = d.topologyVersion();
    T3<UUID, Integer, AffinityTopologyVersion> scId = new T3<>(id, idx, demTop);
    if (d.updateSequence() == -1) {
        //Demand node requested context cleanup.
        synchronized (scMap) {
            clearContext(scMap.remove(scId), log);
            return;
        }
    }
    if (cutTop.compareTo(demTop) > 0) {
        if (log.isDebugEnabled())
            log.debug("Demand request cancelled [current=" + cutTop + ", demanded=" + demTop + ", from=" + id + ", idx=" + idx + "]");
        return;
    }
    if (log.isDebugEnabled())
        log.debug("Demand request accepted [current=" + cutTop + ", demanded=" + demTop + ", from=" + id + ", idx=" + idx + "]");
    GridDhtPartitionSupplyMessage s = new GridDhtPartitionSupplyMessage(d.updateSequence(), cctx.cacheId(), d.topologyVersion(), cctx.deploymentEnabled());
    ClusterNode node = cctx.discovery().node(id);
    if (node == null)
        // Context will be cleaned at topology change.
        return;
    try {
        SupplyContext sctx;
        synchronized (scMap) {
            sctx = scMap.remove(scId);
            assert sctx == null || d.updateSequence() == sctx.updateSeq;
        }
        // Initial demand request should contain partitions list.
        if (sctx == null && d.partitions() == null)
            return;
        assert !(sctx != null && d.partitions() != null);
        long bCnt = 0;
        SupplyContextPhase phase = SupplyContextPhase.NEW;
        boolean newReq = true;
        long maxBatchesCnt = cctx.config().getRebalanceBatchesPrefetchCount();
        if (sctx != null) {
            phase = sctx.phase;
            maxBatchesCnt = 1;
        } else {
            if (log.isDebugEnabled())
                log.debug("Starting supplying rebalancing [cache=" + cctx.name() + ", fromNode=" + node.id() + ", partitionsCount=" + d.partitions().size() + ", topology=" + d.topologyVersion() + ", updateSeq=" + d.updateSequence() + ", idx=" + idx + "]");
        }
        Iterator<Integer> partIt = sctx != null ? sctx.partIt : d.partitions().iterator();
        while ((sctx != null && newReq) || partIt.hasNext()) {
            int part = sctx != null && newReq ? sctx.part : partIt.next();
            newReq = false;
            GridDhtLocalPartition loc;
            if (sctx != null && sctx.loc != null) {
                loc = sctx.loc;
                assert loc.reservations() > 0;
            } else {
                loc = top.localPartition(part, d.topologyVersion(), false);
                if (loc == null || loc.state() != OWNING || !loc.reserve()) {
                    // Reply with partition of "-1" to let sender know that
                    // this node is no longer an owner.
                    s.missed(part);
                    if (log.isDebugEnabled())
                        log.debug("Requested partition is not owned by local node [part=" + part + ", demander=" + id + ']');
                    continue;
                }
            }
            try {
                boolean partMissing = false;
                if (phase == SupplyContextPhase.NEW)
                    phase = SupplyContextPhase.OFFHEAP;
                if (phase == SupplyContextPhase.OFFHEAP) {
                    IgniteRebalanceIterator iter;
                    if (sctx == null || sctx.entryIt == null) {
                        iter = cctx.offheap().rebalanceIterator(part, d.topologyVersion(), d.partitionCounter(part));
                        if (!iter.historical())
                            s.clean(part);
                    } else
                        iter = (IgniteRebalanceIterator) sctx.entryIt;
                    while (iter.hasNext()) {
                        if (!cctx.affinity().partitionBelongs(node, part, d.topologyVersion())) {
                            // Demander no longer needs this partition,
                            // so we send '-1' partition and move on.
                            s.missed(part);
                            if (log.isDebugEnabled())
                                log.debug("Demanding node does not need requested partition " + "[part=" + part + ", nodeId=" + id + ']');
                            partMissing = true;
                            if (sctx != null) {
                                sctx = new SupplyContext(phase, partIt, null, part, loc, d.updateSequence());
                            }
                            break;
                        }
                        if (s.messageSize() >= cctx.config().getRebalanceBatchSize()) {
                            if (++bCnt >= maxBatchesCnt) {
                                saveSupplyContext(scId, phase, partIt, part, iter, loc, d.topologyVersion(), d.updateSequence());
                                loc = null;
                                reply(node, d, s, scId);
                                return;
                            } else {
                                if (!reply(node, d, s, scId))
                                    return;
                                s = new GridDhtPartitionSupplyMessage(d.updateSequence(), cctx.cacheId(), d.topologyVersion(), cctx.deploymentEnabled());
                            }
                        }
                        CacheDataRow row = iter.next();
                        GridCacheEntryInfo info = new GridCacheEntryInfo();
                        info.key(row.key());
                        info.expireTime(row.expireTime());
                        info.version(row.version());
                        info.value(row.value());
                        if (preloadPred == null || preloadPred.apply(info))
                            s.addEntry0(part, info, cctx);
                        else {
                            if (log.isDebugEnabled())
                                log.debug("Rebalance predicate evaluated to false (will not send " + "cache entry): " + info);
                            continue;
                        }
                    // Need to manually prepare cache message.
                    // TODO GG-11141.
                    //                                if (depEnabled && !prepared) {
                    //                                    ClassLoader ldr = swapEntry.keyClassLoaderId() != null ?
                    //                                        cctx.deploy().getClassLoader(swapEntry.keyClassLoaderId()) :
                    //                                        swapEntry.valueClassLoaderId() != null ?
                    //                                            cctx.deploy().getClassLoader(swapEntry.valueClassLoaderId()) :
                    //                                            null;
                    //
                    //                                    if (ldr == null)
                    //                                        continue;
                    //
                    //                                    if (ldr instanceof GridDeploymentInfo) {
                    //                                        s.prepare((GridDeploymentInfo)ldr);
                    //
                    //                                        prepared = true;
                    //                                    }
                    //                                }
                    }
                    if (partMissing)
                        continue;
                }
                // Mark as last supply message.
                s.last(part);
                phase = SupplyContextPhase.NEW;
                sctx = null;
            } finally {
                if (loc != null)
                    loc.release();
            }
        }
        reply(node, d, s, scId);
        if (log.isDebugEnabled())
            log.debug("Finished supplying rebalancing [cache=" + cctx.name() + ", fromNode=" + node.id() + ", topology=" + d.topologyVersion() + ", updateSeq=" + d.updateSequence() + ", idx=" + idx + "]");
    } catch (IgniteCheckedException e) {
        U.error(log, "Failed to send partition supply message to node: " + id, e);
    } catch (IgniteSpiException e) {
        if (log.isDebugEnabled())
            log.debug("Failed to send message to node (current node is stopping?) [node=" + node.id() + ", msg=" + e.getMessage() + ']');
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) CacheDataRow(org.apache.ignite.internal.processors.cache.database.CacheDataRow) GridCacheEntryInfo(org.apache.ignite.internal.processors.cache.GridCacheEntryInfo) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) IgniteRebalanceIterator(org.apache.ignite.internal.processors.cache.IgniteRebalanceIterator) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition) IgniteSpiException(org.apache.ignite.spi.IgniteSpiException) UUID(java.util.UUID) T3(org.apache.ignite.internal.util.typedef.T3)

Aggregations

GridDhtLocalPartition (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition)25 ClusterNode (org.apache.ignite.cluster.ClusterNode)8 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)8 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)6 GridDhtPartitionTopology (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology)6 CacheDataRow (org.apache.ignite.internal.processors.cache.database.CacheDataRow)5 Map (java.util.Map)4 GridCacheEntryInfo (org.apache.ignite.internal.processors.cache.GridCacheEntryInfo)4 Collection (java.util.Collection)3 HashMap (java.util.HashMap)3 UUID (java.util.UUID)3 Ignite (org.apache.ignite.Ignite)3 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)3 IgniteKernal (org.apache.ignite.internal.IgniteKernal)3 GridCacheEntryEx (org.apache.ignite.internal.processors.cache.GridCacheEntryEx)3 GridCacheEntryRemovedException (org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException)3 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)3 GridDhtInvalidPartitionException (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException)3 T2 (org.apache.ignite.internal.util.typedef.T2)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2