use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class LocalWalModeChangeDuringRebalancingSelfTest method testWalDisabledDuringRebalancingWithPendingTxTracker.
/**
* @throws Exception If failed.
*/
@Test
public void testWalDisabledDuringRebalancingWithPendingTxTracker() throws Exception {
enablePendingTxTracker = true;
dfltCacheBackupCnt = 2;
Ignite ignite = startGrids(3);
ignite.cluster().baselineAutoAdjustEnabled(false);
ignite.cluster().state(ACTIVE);
ignite.cluster().setBaselineTopology(3);
IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME);
stopGrid(2);
awaitExchange((IgniteEx) ignite);
// Ensure each partition has received an update.
for (int k = 0; k < RendezvousAffinityFunction.DFLT_PARTITION_COUNT; k++) cache.put(k, k);
IgniteEx newIgnite = startGrid(2);
awaitExchange(newIgnite);
CacheGroupContext grpCtx = newIgnite.cachex(DEFAULT_CACHE_NAME).context().group();
assertFalse(grpCtx.walEnabled());
long rebalanceStartedTs = System.currentTimeMillis();
for (Ignite g : G.allGrids()) g.cache(DEFAULT_CACHE_NAME).rebalance();
awaitPartitionMapExchange();
assertTrue(grpCtx.walEnabled());
long rebalanceFinishedTs = System.currentTimeMillis();
CheckpointHistory cpHist = ((GridCacheDatabaseSharedManager) newIgnite.context().cache().context().database()).checkpointHistory();
assertNotNull(cpHist);
// Ensure there was a checkpoint on WAL re-activation.
assertEquals(1, cpHist.checkpoints().stream().filter(ts -> rebalanceStartedTs <= ts && ts <= rebalanceFinishedTs).count());
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class LocalWalModeChangeDuringRebalancingSelfTest method testLocalAndGlobalWalStateInterdependence.
/**
* @throws Exception If failed.
*/
@Test
public void testLocalAndGlobalWalStateInterdependence() throws Exception {
Ignite ignite = startGrids(3);
ignite.cluster().baselineAutoAdjustEnabled(false);
ignite.cluster().state(ACTIVE);
IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME);
for (int k = 0; k < getKeysCount(); k++) cache.put(k, k);
IgniteEx newIgnite = startGrid(3);
newIgnite.cluster().setBaselineTopology(ignite.cluster().nodes());
awaitExchange(newIgnite);
CacheGroupContext grpCtx = newIgnite.cachex(DEFAULT_CACHE_NAME).context().group();
assertFalse(grpCtx.walEnabled());
ignite.cluster().disableWal(DEFAULT_CACHE_NAME);
for (Ignite g : G.allGrids()) g.cache(DEFAULT_CACHE_NAME).rebalance();
awaitPartitionMapExchange();
// WAL is globally disabled
assertFalse(grpCtx.walEnabled());
ignite.cluster().enableWal(DEFAULT_CACHE_NAME);
assertTrue(grpCtx.walEnabled());
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class ZkCommunicationFailureContext method cachePartitionOwners.
/**
* {@inheritDoc}
*/
@Override
public List<List<ClusterNode>> cachePartitionOwners(String cacheName) {
if (cacheName == null)
throw new NullPointerException("Null cache name.");
DynamicCacheDescriptor cacheDesc = ctx.affinity().caches().get(CU.cacheId(cacheName));
if (cacheDesc == null)
throw new IllegalArgumentException("Invalid cache name: " + cacheName);
if (cacheDesc.cacheConfiguration().getCacheMode() == CacheMode.LOCAL)
return Collections.emptyList();
CacheGroupContext grp = ctx.cache().cacheGroup(cacheDesc.groupId());
GridDhtPartitionTopology top;
if (grp == null) {
top = ctx.exchange().clientTopologyIfExists(cacheDesc.groupId());
assert top != null : cacheName;
} else
top = grp.topology();
return top.allOwners();
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class MovingPartitionIsEvictedDuringClearingTest method testMovingToEvicted.
/**
* @throws Exception If failed.
*/
@Test
public void testMovingToEvicted() throws Exception {
IgniteEx crd = startGrids(3);
crd.cluster().state(ClusterState.ACTIVE);
final int evictingPart = evictingPartitionsAfterJoin(grid(2), grid(2).cache(DEFAULT_CACHE_NAME), 1).get(0);
final int cnt = 1_100;
final int delta = 2_000;
final int rmv = 1_500;
loadDataToPartition(evictingPart, getTestIgniteInstanceName(0), DEFAULT_CACHE_NAME, cnt, 0, 3);
forceCheckpoint();
stopGrid(2);
loadDataToPartition(evictingPart, getTestIgniteInstanceName(0), DEFAULT_CACHE_NAME, delta, cnt, 3);
// Removal required for triggering full rebalancing.
List<Integer> clearKeys = partitionKeys(grid(0).cache(DEFAULT_CACHE_NAME), evictingPart, rmv, cnt);
for (Integer clearKey : clearKeys) grid(0).cache(DEFAULT_CACHE_NAME).remove(clearKey);
CountDownLatch lock = new CountDownLatch(1);
CountDownLatch unlock = new CountDownLatch(1);
// Start node and delay preloading in the middle of partition clearing.
IgniteEx g2 = startGrid(2, new DependencyResolver() {
@Override
public <T> T resolve(T instance) {
if (instance instanceof GridDhtPartitionTopologyImpl) {
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
top.partitionFactory(new GridDhtPartitionTopologyImpl.PartitionFactory() {
@Override
public GridDhtLocalPartition create(GridCacheSharedContext ctx, CacheGroupContext grp, int id, boolean recovery) {
return id == evictingPart ? new GridDhtLocalPartitionSyncEviction(ctx, grp, id, recovery, 2, lock, unlock) : new GridDhtLocalPartition(ctx, grp, id, recovery);
}
});
}
return instance;
}
});
assertTrue(U.await(lock, GridDhtLocalPartitionSyncEviction.TIMEOUT, TimeUnit.MILLISECONDS));
startGrid(4);
resetBaselineTopology();
awaitPartitionMapExchange();
// Give some time for partition state messages to process.
doSleep(3_000);
// Finish clearing.
unlock.countDown();
awaitPartitionMapExchange();
// Partition will remaing in renting state until next exchange.
assertEquals(RENTING, g2.cachex(DEFAULT_CACHE_NAME).context().topology().localPartition(evictingPart).state());
validadate(cnt + delta - rmv);
stopGrid(2);
startGrid(2);
awaitPartitionMapExchange(true, true, null);
validadate(cnt + delta - rmv);
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class PreloadingRestartWhileClearingPartitionTest method testPreloadingRestart.
/**
* @throws Exception If failed.
*/
@Test
public void testPreloadingRestart() throws Exception {
IgniteEx crd = startGrids(3);
crd.cluster().state(ClusterState.ACTIVE);
final int clearingPart = 0;
final int cnt = 1_100;
final int delta = 2_000;
final int rmv = 1_500;
loadDataToPartition(clearingPart, getTestIgniteInstanceName(0), DEFAULT_CACHE_NAME, cnt, 0);
forceCheckpoint();
stopGrid(2);
loadDataToPartition(clearingPart, getTestIgniteInstanceName(0), DEFAULT_CACHE_NAME, delta, cnt);
// Removal required for triggering full rebalancing.
List<Integer> clearKeys = partitionKeys(grid(0).cache(DEFAULT_CACHE_NAME), clearingPart, rmv, cnt);
for (Integer clearKey : clearKeys) grid(0).cache(DEFAULT_CACHE_NAME).remove(clearKey);
CountDownLatch lock = new CountDownLatch(1);
CountDownLatch unlock = new CountDownLatch(1);
// Start node and delay preloading in the middle of partition clearing.
IgniteEx g2 = startGrid(2, new DependencyResolver() {
@Override
public <T> T resolve(T instance) {
if (instance instanceof GridDhtPartitionTopologyImpl) {
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
top.partitionFactory(new GridDhtPartitionTopologyImpl.PartitionFactory() {
@Override
public GridDhtLocalPartition create(GridCacheSharedContext ctx, CacheGroupContext grp, int id, boolean recovery) {
return id == clearingPart ? new GridDhtLocalPartitionSyncEviction(ctx, grp, id, recovery, 1, lock, unlock) : new GridDhtLocalPartition(ctx, grp, id, recovery);
}
});
}
return instance;
}
});
assertTrue(U.await(lock, GridDhtLocalPartitionSyncEviction.TIMEOUT, TimeUnit.MILLISECONDS));
// Stop supplier for clearingPart.
GridCacheContext<Object, Object> ctx = g2.cachex(DEFAULT_CACHE_NAME).context();
GridDhtPartitionDemander.RebalanceFuture rebFut = (GridDhtPartitionDemander.RebalanceFuture) ctx.preloader().rebalanceFuture();
GridDhtPreloaderAssignments assignments = U.field(rebFut, "assignments");
ClusterNode supplier = assignments.supplier(clearingPart);
AtomicReference<GridFutureAdapter<?>> ref = U.field(ctx.topology().localPartition(clearingPart), "finishFutRef");
GridFutureAdapter clearFut = ref.get();
assertFalse(clearFut.isDone());
grid(supplier).close();
doSleep(1000);
unlock.countDown();
awaitPartitionMapExchange(true, true, null);
assertPartitionsSame(idleVerify(grid(2), DEFAULT_CACHE_NAME));
for (Ignite grid : G.allGrids()) assertEquals(cnt + delta - rmv, grid.cache(DEFAULT_CACHE_NAME).size());
}
Aggregations