Search in sources :

Example 21 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class GridDhtCacheAdapter method processForceKeysRequest0.

/**
 * @param node Node originated request.
 * @param msg Force keys message.
 */
private void processForceKeysRequest0(ClusterNode node, GridDhtForceKeysRequest msg) {
    try {
        ClusterNode loc = ctx.localNode();
        GridDhtForceKeysResponse res = new GridDhtForceKeysResponse(ctx.cacheId(), msg.futureId(), msg.miniId(), ctx.deploymentEnabled());
        GridDhtPartitionTopology top = ctx.topology();
        for (KeyCacheObject k : msg.keys()) {
            int p = ctx.affinity().partition(k);
            GridDhtLocalPartition locPart = top.localPartition(p, AffinityTopologyVersion.NONE, false);
            // If this node is no longer an owner.
            if (locPart == null && !top.owners(p).contains(loc)) {
                res.addMissed(k);
                continue;
            }
            GridCacheEntryEx entry;
            while (true) {
                ctx.shared().database().checkpointReadLock();
                try {
                    entry = ctx.dht().entryEx(k);
                    entry.unswap();
                    if (ctx.mvccEnabled()) {
                        List<GridCacheEntryInfo> infos = entry.allVersionsInfo();
                        if (infos == null) {
                            assert entry.obsolete() : entry;
                            continue;
                        }
                        for (int i = 0; i < infos.size(); i++) res.addInfo(infos.get(i));
                    } else {
                        GridCacheEntryInfo info = entry.info();
                        if (info == null) {
                            assert entry.obsolete() : entry;
                            continue;
                        }
                        if (!info.isNew())
                            res.addInfo(info);
                    }
                    entry.touch();
                    break;
                } catch (GridCacheEntryRemovedException ignore) {
                    if (log.isDebugEnabled())
                        log.debug("Got removed entry: " + k);
                } catch (GridDhtInvalidPartitionException ignore) {
                    if (log.isDebugEnabled())
                        log.debug("Local node is no longer an owner: " + p);
                    res.addMissed(k);
                    break;
                } finally {
                    ctx.shared().database().checkpointReadUnlock();
                }
            }
        }
        if (log.isDebugEnabled())
            log.debug("Sending force key response [node=" + node.id() + ", res=" + res + ']');
        ctx.io().send(node, res, ctx.ioPolicy());
    } catch (ClusterTopologyCheckedException ignore) {
        if (log.isDebugEnabled())
            log.debug("Received force key request form failed node (will ignore) [nodeId=" + node.id() + ", req=" + msg + ']');
    } catch (IgniteCheckedException e) {
        U.error(log, "Failed to reply to force key request [nodeId=" + node.id() + ", req=" + msg + ']', e);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtForceKeysResponse(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtForceKeysResponse) GridCacheEntryInfo(org.apache.ignite.internal.processors.cache.GridCacheEntryInfo) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) GridCacheEntryEx(org.apache.ignite.internal.processors.cache.GridCacheEntryEx) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)

Example 22 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class GridCachePartitionExchangeManager method processFullPartitionUpdate.

/**
 * @param node Sender cluster node.
 * @param msg Message.
 */
public void processFullPartitionUpdate(ClusterNode node, GridDhtPartitionsFullMessage msg) {
    if (!enterBusy())
        return;
    try {
        if (msg.exchangeId() == null) {
            if (log.isDebugEnabled())
                log.debug("Received full partition update [node=" + node.id() + ", msg=" + msg + ']');
            boolean updated = false;
            Map<Integer, Map<Integer, Long>> partsSizes = msg.partitionSizes(cctx);
            for (Map.Entry<Integer, GridDhtPartitionFullMap> entry : msg.partitions().entrySet()) {
                Integer grpId = entry.getKey();
                CacheGroupContext grp = cctx.cache().cacheGroup(grpId);
                GridDhtPartitionTopology top = null;
                if (grp == null)
                    top = clientTops.get(grpId);
                else if (!grp.isLocal())
                    top = grp.topology();
                if (top != null) {
                    updated |= top.update(null, entry.getValue(), null, msg.partsToReload(cctx.localNodeId(), grpId), partsSizes.getOrDefault(grpId, Collections.emptyMap()), msg.topologyVersion(), null, null);
                }
            }
            if (!cctx.kernalContext().clientNode() && updated) {
                if (log.isDebugEnabled())
                    log.debug("Refresh partitions due to topology update");
                refreshPartitions();
            }
            boolean hasMovingParts = false;
            for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
                if (!grp.isLocal() && grp.topology().hasMovingPartitions()) {
                    hasMovingParts = true;
                    break;
                }
            }
            if (!hasMovingParts)
                cctx.database().releaseHistoryForPreloading();
        } else
            exchangeFuture(msg.exchangeId(), null, null, null, null).onReceiveFullMessage(node, msg);
    } finally {
        leaveBusy();
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) GridDhtPartitionFullMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap) GridPartitionStateMap(org.apache.ignite.internal.util.GridPartitionStateMap) CachePartitionFullCountersMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CachePartitionFullCountersMap) Map(java.util.Map) ConcurrentNavigableMap(java.util.concurrent.ConcurrentNavigableMap) IgniteDhtPartitionsToReloadMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtPartitionsToReloadMap) GridDhtPartitionFullMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap) CachePartitionPartialCountersMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CachePartitionPartialCountersMap) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap) IgniteDhtPartitionHistorySuppliersMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtPartitionHistorySuppliersMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap)

Example 23 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class GridCachePartitionExchangeManager method refreshPartitions.

/**
 * Partition refresh callback for selected cache groups.
 * For coordinator causes {@link GridDhtPartitionsFullMessage FullMessages} send,
 * for non coordinator -  {@link GridDhtPartitionsSingleMessage SingleMessages} send
 *
 * @param grps Cache groups for partitions refresh.
 */
public void refreshPartitions(@NotNull Collection<CacheGroupContext> grps) {
    // TODO https://issues.apache.org/jira/browse/IGNITE-6857
    if (cctx.snapshot().snapshotOperationInProgress()) {
        if (log.isDebugEnabled())
            log.debug("Schedule resend parititions due to snapshot in progress");
        scheduleResendPartitions();
        return;
    }
    if (grps.isEmpty()) {
        if (log.isDebugEnabled())
            log.debug("Skip partitions refresh, there are no cache groups for partition refresh.");
        return;
    }
    ClusterNode oldest = cctx.discovery().oldestAliveServerNode(NONE);
    if (oldest == null) {
        if (log.isDebugEnabled())
            log.debug("Skip partitions refresh, there are no server nodes [loc=" + cctx.localNodeId() + ']');
        return;
    }
    if (log.isDebugEnabled()) {
        log.debug("Refreshing partitions [oldest=" + oldest.id() + ", loc=" + cctx.localNodeId() + ", cacheGroups= " + grps + ']');
    }
    // If this is the oldest node.
    if (oldest.id().equals(cctx.localNodeId())) {
        // Check rebalance state & send CacheAffinityChangeMessage if need.
        for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
            if (!grp.isLocal()) {
                GridDhtPartitionTopology top = grp.topology();
                if (top != null)
                    cctx.affinity().checkRebalanceState(top, grp.groupId());
            }
        }
        GridDhtPartitionsExchangeFuture lastFut = lastInitializedFut;
        // No need to send to nodes which did not finish their first exchange.
        AffinityTopologyVersion rmtTopVer = lastFut != null ? (lastFut.isDone() && lastFut.error() == null ? lastFut.topologyVersion() : lastFut.initialVersion()) : AffinityTopologyVersion.NONE;
        Collection<ClusterNode> rmts = cctx.discovery().remoteAliveNodesWithCaches(rmtTopVer);
        if (log.isDebugEnabled())
            log.debug("Refreshing partitions from oldest node: " + cctx.localNodeId());
        sendAllPartitions(rmts, rmtTopVer, grps);
    } else {
        if (log.isDebugEnabled())
            log.debug("Refreshing local partitions from non-oldest node: " + cctx.localNodeId());
        sendLocalPartitions(oldest, null, grps);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtPartitionsExchangeFuture(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)

Example 24 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class GridCachePartitionExchangeManager method processSinglePartitionUpdate.

/**
 * @param node Sender cluster node.
 * @param msg Message.
 */
private void processSinglePartitionUpdate(final ClusterNode node, final GridDhtPartitionsSingleMessage msg) {
    if (!enterBusy())
        return;
    try {
        if (msg.exchangeId() == null) {
            if (log.isDebugEnabled())
                log.debug("Received local partition update [nodeId=" + node.id() + ", parts=" + msg + ']');
            boolean updated = false;
            for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg.partitions().entrySet()) {
                Integer grpId = entry.getKey();
                CacheGroupContext grp = cctx.cache().cacheGroup(grpId);
                if (grp != null && !grp.topology().initialized())
                    continue;
                GridDhtPartitionTopology top = null;
                if (grp == null)
                    top = clientTops.get(grpId);
                else if (!grp.isLocal())
                    top = grp.topology();
                if (top != null) {
                    updated |= top.update(null, entry.getValue(), false);
                    cctx.affinity().checkRebalanceState(top, grpId);
                }
            }
            if (updated) {
                if (log.isDebugEnabled())
                    log.debug("Partitions have been scheduled to resend [reason=Single update from " + node.id() + "]");
                scheduleResendPartitions();
            }
        } else {
            GridDhtPartitionsExchangeFuture exchFut = exchangeFuture(msg.exchangeId());
            if (log.isTraceEnabled())
                log.trace("Notifying exchange future about single message: " + exchFut);
            if (msg.client()) {
                AffinityTopologyVersion initVer = exchFut.initialVersion();
                AffinityTopologyVersion readyVer = readyAffinityVersion();
                if (initVer.compareTo(readyVer) < 0 && !exchFut.isDone()) {
                    U.warn(log, "Client node tries to connect but its exchange " + "info is cleaned up from exchange history. " + "Consider increasing 'IGNITE_EXCHANGE_HISTORY_SIZE' property " + "or start clients in smaller batches. " + "Current settings and versions: " + "[IGNITE_EXCHANGE_HISTORY_SIZE=" + EXCHANGE_HISTORY_SIZE + ", " + "initVer=" + initVer + ", " + "readyVer=" + readyVer + "].");
                    exchFut.forceClientReconnect(node, msg);
                    return;
                }
            }
            exchFut.onReceiveSingleMessage(node, msg);
        }
    } finally {
        leaveBusy();
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap) GridDhtPartitionsExchangeFuture(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridPartitionStateMap(org.apache.ignite.internal.util.GridPartitionStateMap) CachePartitionFullCountersMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CachePartitionFullCountersMap) Map(java.util.Map) ConcurrentNavigableMap(java.util.concurrent.ConcurrentNavigableMap) IgniteDhtPartitionsToReloadMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtPartitionsToReloadMap) GridDhtPartitionFullMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap) CachePartitionPartialCountersMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CachePartitionPartialCountersMap) GridDhtPartitionMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap) IgniteDhtPartitionHistorySuppliersMap(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtPartitionHistorySuppliersMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap)

Example 25 with GridDhtPartitionTopology

use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.

the class IgniteClientCacheStartFailoverTest method testRebalanceStateConcurrentStart.

/**
 * @throws Exception If failed.
 */
@Test
public void testRebalanceStateConcurrentStart() throws Exception {
    final int SRVS1 = 3;
    final int CLIENTS = 5;
    final int SRVS2 = 5;
    startGrids(SRVS1);
    Ignite srv0 = ignite(0);
    final int KEYS = 1000;
    final List<String> cacheNames = startCaches(srv0, KEYS);
    final List<Ignite> clients = new ArrayList<>();
    for (int i = 0; i < CLIENTS; i++) clients.add(startClientGrid(SRVS1 + i));
    final CyclicBarrier barrier = new CyclicBarrier(clients.size() + SRVS2);
    final AtomicInteger clientIdx = new AtomicInteger();
    final Set<Integer> keys = new HashSet<>();
    for (int i = 0; i < KEYS; i++) keys.add(i);
    IgniteInternalFuture<?> fut1 = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            barrier.await();
            Ignite client = clients.get(clientIdx.getAndIncrement());
            for (String cacheName : cacheNames) client.cache(cacheName);
            ThreadLocalRandom rnd = ThreadLocalRandom.current();
            for (int i = 0; i < 10; i++) {
                for (String cacheName : cacheNames) {
                    IgniteCache<Object, Object> cache = client.cache(cacheName);
                    Map<Object, Object> map0 = cache.getAll(keys);
                    assertEquals("[cache=" + cacheName + ", expected=" + KEYS + ", actual=" + map0.size() + ']', KEYS, map0.size());
                    int key = rnd.nextInt(KEYS);
                    try {
                        cache.put(key, i);
                    } catch (CacheException e) {
                        log.error("It couldn't put a value [cache=" + cacheName + ", key=" + key + ", val=" + i + ']', e);
                        CacheConfiguration ccfg = cache.getConfiguration(CacheConfiguration.class);
                        TransactionSerializationException txEx = X.cause(e, TransactionSerializationException.class);
                        boolean notContains = !txEx.getMessage().contains("Cannot serialize transaction due to write conflict (transaction is marked for rollback)");
                        if (txEx == null || ccfg.getAtomicityMode() != TRANSACTIONAL_SNAPSHOT || notContains)
                            fail("Assert violated because exception was thrown [e=" + e.getMessage() + ']');
                    }
                }
            }
            return null;
        }
    }, clients.size(), "client-cache-start");
    final AtomicInteger srvIdx = new AtomicInteger(SRVS1 + CLIENTS);
    IgniteInternalFuture<?> fut2 = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            barrier.await();
            startGrid(srvIdx.incrementAndGet());
            return null;
        }
    }, SRVS2, "node-start");
    fut1.get();
    fut2.get();
    final AffinityTopologyVersion topVer = new AffinityTopologyVersion(SRVS1 + SRVS2 + CLIENTS, 1);
    for (Ignite client : clients) {
        for (String cacheName : cacheNames) {
            final GridDhtPartitionTopology top = ((IgniteKernal) client).context().cache().internalCache(cacheName).context().topology();
            GridTestUtils.waitForCondition(new GridAbsPredicate() {

                @Override
                public boolean apply() {
                    return top.rebalanceFinished(topVer);
                }
            }, 5000);
            assertTrue(top.rebalanceFinished(topVer));
        }
    }
}
Also used : TransactionSerializationException(org.apache.ignite.transactions.TransactionSerializationException) CacheException(javax.cache.CacheException) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology) ArrayList(java.util.ArrayList) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) Ignite(org.apache.ignite.Ignite) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) HashSet(java.util.HashSet) IgniteKernal(org.apache.ignite.internal.IgniteKernal) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) IgniteCache(org.apache.ignite.IgniteCache) CacheServerNotFoundException(org.apache.ignite.cache.CacheServerNotFoundException) CacheException(javax.cache.CacheException) TransactionSerializationException(org.apache.ignite.transactions.TransactionSerializationException) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TreeMap(java.util.TreeMap) GridCommonAbstractTest(org.apache.ignite.testframework.junits.common.GridCommonAbstractTest) Test(org.junit.Test)

Aggregations

GridDhtPartitionTopology (org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology)64 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)24 GridDhtLocalPartition (org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition)21 ClusterNode (org.apache.ignite.cluster.ClusterNode)20 Map (java.util.Map)18 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)18 CacheGroupContext (org.apache.ignite.internal.processors.cache.CacheGroupContext)17 HashMap (java.util.HashMap)15 ArrayList (java.util.ArrayList)14 Ignite (org.apache.ignite.Ignite)14 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)12 Test (org.junit.Test)12 IgniteEx (org.apache.ignite.internal.IgniteEx)11 UUID (java.util.UUID)10 IgniteKernal (org.apache.ignite.internal.IgniteKernal)10 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)9 IgniteException (org.apache.ignite.IgniteException)9 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)9 GridDhtPartitionMap (org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap)9 HashSet (java.util.HashSet)8