use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class NotOptimizedRebalanceTest method testRebalance.
/**
* Trigger rebalance when node left topology.
*
* @param persistence Persistent flag.
* @throws Exception If failed.
*/
public void testRebalance(boolean persistence, boolean serverJoin) throws Exception {
persistenceEnabled = persistence;
IgniteEx ignite0 = startGrids(NODES_CNT);
ignite0.cluster().active(true);
ignite0.cluster().baselineAutoAdjustEnabled(false);
IgniteEx newNode = serverJoin ? startGrid(NODES_CNT) : startClientGrid(NODES_CNT);
grid(1).close();
for (String cache : ignite0.cacheNames()) loadData(ignite0, cache);
awaitPartitionMapExchange();
TestRecordingCommunicationSpi commSpi1 = startNodeWithBlockingRebalance(getTestIgniteInstanceName(1));
commSpi1.waitForBlocked();
Map<CacheGroupContext, IgniteInternalFuture<Boolean>> futs = getAllRebalanceFuturesByGroup(grid(1));
checkAllFuturesProcessing(futs);
for (int i = 0; i < 3; i++) {
newNode.close();
checkTopology(NODES_CNT);
newNode = serverJoin ? startGrid(NODES_CNT) : startClientGrid(NODES_CNT);
checkTopology(NODES_CNT + 1);
}
if (serverJoin)
checkAllFuturesCancelled(futs);
else
checkAllFuturesProcessing(futs);
commSpi1.stopBlock();
awaitPartitionMapExchange();
Map<CacheGroupContext, IgniteInternalFuture<Boolean>> newFuts = getAllRebalanceFuturesByGroup(grid(1));
for (Map.Entry<CacheGroupContext, IgniteInternalFuture<Boolean>> grpFut : futs.entrySet()) {
IgniteInternalFuture<Boolean> fut = grpFut.getValue();
IgniteInternalFuture<Boolean> newFut = newFuts.get(grpFut.getKey());
if (serverJoin)
assertTrue(futureInfoString(fut), fut.isDone() && !fut.get());
else
assertSame(fut, newFut);
assertTrue(futureInfoString(newFut), newFut.isDone() && newFut.get());
}
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class ResetLostPartitionTest method getPartitionsStates.
/**
* @param gridNumber Grid number.
* @param cacheName Cache name.
* @return Partitions states for given cache name.
*/
private List<GridDhtPartitionState> getPartitionsStates(int gridNumber, String cacheName) {
CacheGroupContext cgCtx = grid(gridNumber).context().cache().cacheGroup(CU.cacheId(cacheName));
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) cgCtx.topology();
return top.localPartitions().stream().map(GridDhtLocalPartition::state).collect(Collectors.toList());
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class GracefulShutdownTest method testNotIdealOwners.
/**
* Try to stop node when not all backups are matching of ideal assignment.
*
* @throws Exception If failed.
*/
@Test
public void testNotIdealOwners() throws Exception {
backups = 1;
Ignite ignite0 = startGrid(0);
ignite0.cluster().active(true);
for (int i = 1; i <= 3; i++) {
IgniteCache cache = ignite0.cache("cache" + i);
assertNotNull(cache);
try (IgniteDataStreamer streamer = ignite0.dataStreamer("cache" + i)) {
for (int j = 0; j < 100; j++) streamer.addData(j, j);
}
}
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(ignite0);
spi.blockMessages((node, msg) -> {
String nodeName = (String) node.attributes().get(ATTR_IGNITE_INSTANCE_NAME);
if (msg instanceof GridDhtPartitionSupplyMessage) {
GridDhtPartitionSupplyMessage supplyMsg = (GridDhtPartitionSupplyMessage) msg;
if (supplyMsg.groupId() != CU.cacheId(GridCacheUtils.UTILITY_CACHE_NAME) && getTestIgniteInstanceName(1).equals(nodeName))
return true;
}
return false;
});
startGrid(1);
Ignite ignite2 = startGrid(2);
resetBaselineTopology();
spi.waitForBlocked();
for (CacheGroupContext grp : ((IgniteEx) ignite2).context().cache().cacheGroups()) {
GridTestUtils.waitForCondition(() -> !grp.topology().partitionMap(false).get(((IgniteEx) ignite2).localNode().id()).hasMovingPartitions(), 30_000);
}
LogListener lnsr = LogListener.matches("This node is waiting for backups of local partitions for group").build();
listeningLog.registerListener(lnsr);
IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
ignite2.close();
});
assertTrue(GridTestUtils.waitForCondition(lnsr::check, 30_000));
assertFalse(fut.isDone());
spi.stopBlock();
assertTrue(GridTestUtils.waitForCondition(fut::isDone, 30_000));
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class GracefulShutdownTest method tesStartCacheWhenNodeStopping.
/**
* Stopping node and start cache which does not allow it.
*
* @throws Exception If failed.
*/
@Test
public void tesStartCacheWhenNodeStopping() throws Exception {
backups = 2;
Ignite ignite0 = startGrid(0);
ignite0.cluster().active(true);
for (int i = 1; i <= 3; i++) {
IgniteCache cache = ignite0.cache("cache" + i);
assertNotNull(cache);
try (IgniteDataStreamer streamer = ignite0.dataStreamer("cache" + i)) {
for (int j = 0; j < 100; j++) streamer.addData(j, j);
}
}
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(ignite0);
spi.blockMessages((node, msg) -> {
String nodeName = (String) node.attributes().get(ATTR_IGNITE_INSTANCE_NAME);
if (msg instanceof GridDhtPartitionSupplyMessage) {
GridDhtPartitionSupplyMessage supplyMsg = (GridDhtPartitionSupplyMessage) msg;
if (supplyMsg.groupId() != CU.cacheId(GridCacheUtils.UTILITY_CACHE_NAME) && getTestIgniteInstanceName(1).equals(nodeName))
return true;
}
return false;
});
Ignite ignite1 = startGrid(1);
Ignite ignite2 = startGrid(2);
resetBaselineTopology();
spi.waitForBlocked();
for (CacheGroupContext grp : ((IgniteEx) ignite2).context().cache().cacheGroups()) {
grp.preloader().rebalanceFuture().get();
}
ignite2.close();
LogListener lnsr = LogListener.matches("This node is waiting for completion of rebalance for group").build();
listeningLog.registerListener(lnsr);
IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
ignite1.close();
});
assertTrue(GridTestUtils.waitForCondition(lnsr::check, 30_000));
listeningLog.unregisterListener(lnsr);
assertFalse(fut.isDone());
ignite0.getOrCreateCache(new CacheConfiguration(DEFAULT_CACHE_NAME).setBackups(1));
spi.stopBlock();
lnsr = LogListener.matches("This node is waiting for backups of local partitions for group").build();
listeningLog.registerListener(lnsr);
assertTrue(GridTestUtils.waitForCondition(lnsr::check, 30_000));
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class LocalWalModeChangeDuringRebalancingSelfTest method testWithExchangesMerge.
/**
* Test that local WAL mode changing works well with exchanges merge.
*
* @throws Exception If failed.
*/
@Test
public void testWithExchangesMerge() throws Exception {
final int nodeCnt = 4;
final int keyCnt = getKeysCount();
Ignite ignite = startGrids(nodeCnt);
ignite.cluster().state(ACTIVE);
IgniteCache<Integer, Integer> cache = ignite.cache(REPL_CACHE);
for (int k = 0; k < keyCnt; k++) cache.put(k, k);
stopGrid(2);
stopGrid(3);
// Rewrite data to trigger further rebalance.
for (int k = 0; k < keyCnt; k++) cache.put(k, k * 2);
// Start several grids in parallel to trigger exchanges merge.
startGridsMultiThreaded(2, 2);
for (int nodeIdx = 2; nodeIdx < nodeCnt; nodeIdx++) {
CacheGroupContext grpCtx = grid(nodeIdx).cachex(REPL_CACHE).context().group();
assertTrue(GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return !grpCtx.walEnabled();
}
}, 5_000));
}
// Invoke rebalance manually.
for (Ignite g : G.allGrids()) g.cache(REPL_CACHE).rebalance();
awaitPartitionMapExchange(false, false, null, false, Collections.singleton(REPL_CACHE));
for (int nodeIdx = 2; nodeIdx < nodeCnt; nodeIdx++) {
CacheGroupContext grpCtx = grid(nodeIdx).cachex(REPL_CACHE).context().group();
assertTrue(grpCtx.walEnabled());
}
// Check no data loss.
for (int nodeIdx = 2; nodeIdx < nodeCnt; nodeIdx++) {
IgniteCache<Integer, Integer> cache0 = grid(nodeIdx).cache(REPL_CACHE);
for (int k = 0; k < keyCnt; k++) Assert.assertEquals("nodeIdx=" + nodeIdx + ", key=" + k, (Integer) (2 * k), cache0.get(k));
}
}
Aggregations