Search in sources :

Example 56 with GridFutureAdapter

use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.

the class IgnitePdsDataRegionMetricsTest method testUsedCheckpointBuffer.

/**
 * Test for check used checkpoint size metric.
 *
 * @throws Exception If failed.
 */
@Test
public void testUsedCheckpointBuffer() throws Exception {
    IgniteEx ig = startGrid(0);
    ig.cluster().active(true);
    final DataRegionMetricsImpl regionMetrics = ig.cachex(DEFAULT_CACHE_NAME).context().group().dataRegion().metrics();
    Assert.assertEquals(0, regionMetrics.getUsedCheckpointBufferPages());
    Assert.assertEquals(0, regionMetrics.getUsedCheckpointBufferSize());
    load(ig);
    GridCacheDatabaseSharedManager psMgr = (GridCacheDatabaseSharedManager) ig.context().cache().context().database();
    GridFutureAdapter<T2<Long, Long>> metricsResult = new GridFutureAdapter<>();
    IgniteInternalFuture chpBeginFut = psMgr.wakeupForCheckpoint(null);
    chpBeginFut.listen((f) -> {
        load(ig);
        metricsResult.onDone(new T2<>(regionMetrics.getUsedCheckpointBufferPages(), regionMetrics.getUsedCheckpointBufferSize()));
    });
    metricsResult.get();
    Assert.assertTrue(metricsResult.get().get1() > 0);
    Assert.assertTrue(metricsResult.get().get2() > 0);
}
Also used : GridCacheDatabaseSharedManager(org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager) IgniteEx(org.apache.ignite.internal.IgniteEx) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) DataRegionMetricsImpl(org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) T2(org.apache.ignite.internal.util.typedef.T2) GridCommonAbstractTest(org.apache.ignite.testframework.junits.common.GridCommonAbstractTest) Test(org.junit.Test)

Example 57 with GridFutureAdapter

use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.

the class ZkCommunicationErrorProcessFuture method run.

/**
 * {@inheritDoc}
 */
@Override
public void run() {
    // Run from zk discovery worker pool after timeout.
    if (needProcessTimeout()) {
        try {
            UUID reqId = UUID.randomUUID();
            if (log.isInfoEnabled()) {
                log.info("Initiate cluster-wide communication error resolve process [reqId=" + reqId + ", errNodes=" + nodeFuts.size() + ']');
            }
            impl.sendCustomMessage(new ZkCommunicationErrorResolveStartMessage(reqId));
        } catch (Exception e) {
            Collection<GridFutureAdapter<Boolean>> futs;
            synchronized (this) {
                if (state != State.WAIT_TIMEOUT)
                    return;
                state = State.DONE;
                resErr = e;
                // nodeFuts should not be modified after state changed to DONE.
                futs = nodeFuts.values();
            }
            for (GridFutureAdapter<Boolean> fut : futs) fut.onDone(e);
            onDone(e);
        }
    }
}
Also used : GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) Collection(java.util.Collection) UUID(java.util.UUID)

Example 58 with GridFutureAdapter

use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.

the class PreloadingRestartWhileClearingPartitionTest method testPreloadingRestart.

/**
 * @throws Exception If failed.
 */
@Test
public void testPreloadingRestart() throws Exception {
    IgniteEx crd = startGrids(3);
    crd.cluster().state(ClusterState.ACTIVE);
    final int clearingPart = 0;
    final int cnt = 1_100;
    final int delta = 2_000;
    final int rmv = 1_500;
    loadDataToPartition(clearingPart, getTestIgniteInstanceName(0), DEFAULT_CACHE_NAME, cnt, 0);
    forceCheckpoint();
    stopGrid(2);
    loadDataToPartition(clearingPart, getTestIgniteInstanceName(0), DEFAULT_CACHE_NAME, delta, cnt);
    // Removal required for triggering full rebalancing.
    List<Integer> clearKeys = partitionKeys(grid(0).cache(DEFAULT_CACHE_NAME), clearingPart, rmv, cnt);
    for (Integer clearKey : clearKeys) grid(0).cache(DEFAULT_CACHE_NAME).remove(clearKey);
    CountDownLatch lock = new CountDownLatch(1);
    CountDownLatch unlock = new CountDownLatch(1);
    // Start node and delay preloading in the middle of partition clearing.
    IgniteEx g2 = startGrid(2, new DependencyResolver() {

        @Override
        public <T> T resolve(T instance) {
            if (instance instanceof GridDhtPartitionTopologyImpl) {
                GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
                top.partitionFactory(new GridDhtPartitionTopologyImpl.PartitionFactory() {

                    @Override
                    public GridDhtLocalPartition create(GridCacheSharedContext ctx, CacheGroupContext grp, int id, boolean recovery) {
                        return id == clearingPart ? new GridDhtLocalPartitionSyncEviction(ctx, grp, id, recovery, 1, lock, unlock) : new GridDhtLocalPartition(ctx, grp, id, recovery);
                    }
                });
            }
            return instance;
        }
    });
    assertTrue(U.await(lock, GridDhtLocalPartitionSyncEviction.TIMEOUT, TimeUnit.MILLISECONDS));
    // Stop supplier for clearingPart.
    GridCacheContext<Object, Object> ctx = g2.cachex(DEFAULT_CACHE_NAME).context();
    GridDhtPartitionDemander.RebalanceFuture rebFut = (GridDhtPartitionDemander.RebalanceFuture) ctx.preloader().rebalanceFuture();
    GridDhtPreloaderAssignments assignments = U.field(rebFut, "assignments");
    ClusterNode supplier = assignments.supplier(clearingPart);
    AtomicReference<GridFutureAdapter<?>> ref = U.field(ctx.topology().localPartition(clearingPart), "finishFutRef");
    GridFutureAdapter clearFut = ref.get();
    assertFalse(clearFut.isDone());
    grid(supplier).close();
    doSleep(1000);
    unlock.countDown();
    awaitPartitionMapExchange(true, true, null);
    assertPartitionsSame(idleVerify(grid(2), DEFAULT_CACHE_NAME));
    for (Ignite grid : G.allGrids()) assertEquals(cnt + delta - rmv, grid.cache(DEFAULT_CACHE_NAME).size());
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) CountDownLatch(java.util.concurrent.CountDownLatch) GridCacheSharedContext(org.apache.ignite.internal.processors.cache.GridCacheSharedContext) GridDhtPartitionDemander(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionDemander) DependencyResolver(org.apache.ignite.internal.processors.resource.DependencyResolver) GridDhtPreloaderAssignments(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPreloaderAssignments) IgniteEx(org.apache.ignite.internal.IgniteEx) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) Ignite(org.apache.ignite.Ignite) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext) GridCommonAbstractTest(org.apache.ignite.testframework.junits.common.GridCommonAbstractTest) Test(org.junit.Test)

Example 59 with GridFutureAdapter

use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.

the class BlockedEvictionsTest method testStopNodeDuringEviction_2.

/**
 * @throws Exception If failed.
 */
@Test
public void testStopNodeDuringEviction_2() throws Exception {
    AtomicInteger holder = new AtomicInteger();
    CountDownLatch l1 = new CountDownLatch(1);
    CountDownLatch l2 = new CountDownLatch(1);
    IgniteEx g0 = startGrid(0, new DependencyResolver() {

        @Override
        public <T> T resolve(T instance) {
            if (instance instanceof GridDhtPartitionTopologyImpl) {
                GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
                top.partitionFactory(new GridDhtPartitionTopologyImpl.PartitionFactory() {

                    @Override
                    public GridDhtLocalPartition create(GridCacheSharedContext ctx, CacheGroupContext grp, int id, boolean recovery) {
                        return new GridDhtLocalPartitionSyncEviction(ctx, grp, id, recovery, 3, l1, l2) {

                            /**
                             */
                            @Override
                            protected void sync() {
                                if (holder.get() == id)
                                    super.sync();
                            }
                        };
                    }
                });
            } else if (instance instanceof IgniteCacheOffheapManager) {
                IgniteCacheOffheapManager mgr = (IgniteCacheOffheapManager) instance;
                IgniteCacheOffheapManager spied = Mockito.spy(mgr);
                Mockito.doAnswer(new Answer() {

                    @Override
                    public Object answer(InvocationOnMock invocation) throws Throwable {
                        Object ret = invocation.callRealMethod();
                        // Wait is necessary here to guarantee test progress.
                        doSleep(2_000);
                        return ret;
                    }
                }).when(spied).stop();
                return (T) spied;
            }
            return instance;
        }
    });
    startGrid(1);
    awaitPartitionMapExchange();
    IgniteCache<Object, Object> cache = g0.getOrCreateCache(cacheConfiguration());
    int p0 = evictingPartitionsAfterJoin(g0, cache, 1).get(0);
    holder.set(p0);
    loadDataToPartition(p0, g0.name(), DEFAULT_CACHE_NAME, 5_000, 0, 3);
    startGrid(2);
    U.awaitQuiet(l1);
    GridDhtLocalPartition part = g0.cachex(DEFAULT_CACHE_NAME).context().topology().localPartition(p0);
    AtomicReference<GridFutureAdapter<?>> ref = U.field(part, "finishFutRef");
    GridFutureAdapter<?> finishFut = ref.get();
    IgniteInternalFuture fut = runAsync(g0::close);
    // Give some time to execute cache store destroy.
    doSleep(500);
    l2.countDown();
    fut.get();
    // Partition clearing future should be finished with NodeStoppingException.
    assertTrue(finishFut.error().getMessage(), finishFut.error() != null && X.hasCause(finishFut.error(), NodeStoppingException.class));
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) GridCacheSharedContext(org.apache.ignite.internal.processors.cache.GridCacheSharedContext) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) DependencyResolver(org.apache.ignite.internal.processors.resource.DependencyResolver) Answer(org.mockito.stubbing.Answer) IgniteCacheOffheapManager(org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InvocationOnMock(org.mockito.invocation.InvocationOnMock) IgniteEx(org.apache.ignite.internal.IgniteEx) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext) GridCommonAbstractTest(org.apache.ignite.testframework.junits.common.GridCommonAbstractTest) Test(org.junit.Test)

Example 60 with GridFutureAdapter

use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.

the class IgniteWalRebalanceTest method testRebalanceReassignAndOwnPartitions.

/**
 * Tests that owning partitions (that are trigged by rebalance future) cannot be mapped to a new rebalance future
 * that was created by RebalanceReassignExchangeTask.
 *
 * @throws Exception If failed.
 */
@Test
public void testRebalanceReassignAndOwnPartitions() throws Exception {
    backups = 3;
    IgniteEx supplier1 = startGrid(0);
    IgniteEx supplier2 = startGrid(1);
    IgniteEx demander = startGrid(2);
    supplier1.cluster().state(ACTIVE);
    String cacheName1 = "test-cache-1";
    String cacheName2 = "test-cache-2";
    IgniteCache<Integer, IndexedObject> c1 = supplier1.getOrCreateCache(new CacheConfiguration<Integer, IndexedObject>(cacheName1).setBackups(backups).setAffinity(new RendezvousAffinityFunction(false, PARTS_CNT)).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setRebalanceOrder(10));
    IgniteCache<Integer, IndexedObject> c2 = supplier1.getOrCreateCache(new CacheConfiguration<Integer, IndexedObject>(cacheName2).setBackups(backups).setAffinity(new RendezvousAffinityFunction(false, PARTS_CNT)).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setRebalanceOrder(20));
    // Fill initial data.
    final int entryCnt = PARTS_CNT * 200;
    final int preloadEntryCnt = PARTS_CNT * 400;
    int val = 0;
    for (int k = 0; k < preloadEntryCnt; k++) {
        c1.put(k, new IndexedObject(val++));
        c2.put(k, new IndexedObject(val++));
    }
    forceCheckpoint();
    stopGrid(2);
    // This is an easy way to emulate missing partitions on the first rebalance.
    for (int i = 0; i < entryCnt; i++) c1.put(i, new IndexedObject(val++));
    // Full rebalance for the cacheName2.
    for (int i = 0; i < preloadEntryCnt; i++) c2.put(i, new IndexedObject(val++));
    // Delay rebalance process for specified groups.
    blockMsgPred = (node, msg) -> {
        if (msg instanceof GridDhtPartitionDemandMessage) {
            GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
            return msg0.groupId() == CU.cacheId(cacheName1) || msg0.groupId() == CU.cacheId(cacheName2);
        }
        return false;
    };
    // Emulate missing partitions and trigger RebalanceReassignExchangeTask which should re-trigger a new rebalance.
    FailingIOFactory ioFactory = injectFailingIOFactory(supplier1);
    demander = startGrid(2);
    TestRecordingCommunicationSpi demanderSpi = TestRecordingCommunicationSpi.spi(grid(2));
    // Wait until demander starts rebalancning.
    demanderSpi.waitForBlocked();
    // Need to start a client node in order to block RebalanceReassignExchangeTask (and do not change the affinity)
    // until cacheName2 triggers a checkpoint after rebalancing.
    CountDownLatch blockClientJoin = new CountDownLatch(1);
    CountDownLatch unblockClientJoin = new CountDownLatch(1);
    demander.context().cache().context().exchange().registerExchangeAwareComponent(new PartitionsExchangeAware() {

        @Override
        public void onInitBeforeTopologyLock(GridDhtPartitionsExchangeFuture fut) {
            blockClientJoin.countDown();
            try {
                if (!unblockClientJoin.await(getTestTimeout(), MILLISECONDS))
                    throw new IgniteException("Failed to wait for client node joinning the cluster.");
            } catch (InterruptedException e) {
                throw new IgniteException("Unexpected exception.", e);
            }
        }
    });
    startClientGrid(4);
    // Wait for a checkpoint after rebalancing cacheName2.
    CountDownLatch blockCheckpoint = new CountDownLatch(1);
    CountDownLatch unblockCheckpoint = new CountDownLatch(1);
    ((GridCacheDatabaseSharedManager) demander.context().cache().context().database()).addCheckpointListener(new CheckpointListener() {

        /**
         * {@inheritDoc}
         */
        @Override
        public void onCheckpointBegin(Context ctx) throws IgniteCheckedException {
            if (!ctx.progress().reason().contains(String.valueOf(CU.cacheId(cacheName2))))
                return;
            blockCheckpoint.countDown();
            try {
                if (!unblockCheckpoint.await(getTestTimeout(), MILLISECONDS))
                    throw new IgniteCheckedException("Failed to wait for unblocking checkpointer.");
            } catch (InterruptedException e) {
                throw new IgniteCheckedException("Unexpected exception", e);
            }
        }

        /**
         * {@inheritDoc}
         */
        @Override
        public void beforeCheckpointBegin(Context ctx) throws IgniteCheckedException {
        }

        /**
         * {@inheritDoc}
         */
        @Override
        public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException {
        }
    });
    // Unblock the first rebalance.
    demanderSpi.stopBlock();
    // Wait for start of the checkpoint after rebalancing cacheName2.
    assertTrue("Failed to wait for checkpoint.", blockCheckpoint.await(getTestTimeout(), MILLISECONDS));
    // Block the second rebalancing.
    demanderSpi.blockMessages((node, msg) -> {
        if (msg instanceof GridDhtPartitionDemandMessage) {
            GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
            return msg0.groupId() == CU.cacheId(cacheName1);
        }
        return false;
    });
    ioFactory.reset();
    // Let's unblock client exchange and, therefore, handling of RebalanceReassignExchangeTask,
    // which is already scheduled.
    unblockClientJoin.countDown();
    // Wait for starting the second rebalance (new chain of rebalance futures should be created at this point).
    demanderSpi.waitForBlocked();
    GridFutureAdapter checkpointFut = ((GridCacheDatabaseSharedManager) demander.context().cache().context().database()).getCheckpointer().currentProgress().futureFor(FINISHED);
    // Unblock checkpointer.
    unblockCheckpoint.countDown();
    assertTrue("Failed to wait for a checkpoint.", GridTestUtils.waitForCondition(() -> checkpointFut.isDone(), getTestTimeout()));
    // Well, there is a race between we unblock rebalance and the current checkpoint executes all its listeners.
    demanderSpi.stopBlock();
    awaitPartitionMapExchange(false, true, null);
}
Also used : GridDhtPartitionsExchangeFuture(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture) GridCacheDatabaseSharedManager(org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager) CheckpointListener(org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener) CountDownLatch(java.util.concurrent.CountDownLatch) PartitionsExchangeAware(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.PartitionsExchangeAware) TestRecordingCommunicationSpi(org.apache.ignite.internal.TestRecordingCommunicationSpi) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) IgniteEx(org.apache.ignite.internal.IgniteEx) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) RendezvousAffinityFunction(org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction) GridDhtPartitionDemandMessage(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionDemandMessage) GridCommonAbstractTest(org.apache.ignite.testframework.junits.common.GridCommonAbstractTest) Test(org.junit.Test)

Aggregations

GridFutureAdapter (org.apache.ignite.internal.util.future.GridFutureAdapter)110 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)59 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)34 IgniteException (org.apache.ignite.IgniteException)23 ArrayList (java.util.ArrayList)21 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)21 List (java.util.List)20 UUID (java.util.UUID)20 ClusterNode (org.apache.ignite.cluster.ClusterNode)20 IgniteEx (org.apache.ignite.internal.IgniteEx)20 HashMap (java.util.HashMap)19 Map (java.util.Map)19 IgniteInterruptedCheckedException (org.apache.ignite.internal.IgniteInterruptedCheckedException)18 Test (org.junit.Test)18 Nullable (org.jetbrains.annotations.Nullable)17 Ignite (org.apache.ignite.Ignite)15 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)15 HashSet (java.util.HashSet)14 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)14 T2 (org.apache.ignite.internal.util.typedef.T2)13