use of org.apache.ignite.internal.processors.resource.DependencyResolver in project ignite by apache.
the class TcpCommunicationHandshakeTimeoutTest method testSocketForcedClosedBecauseSlowReadFromSocket.
/**
* 1. Cluster from three nodes.
* 2. Waiting when communication connection goes idle.
* 4. Configure the delay during the communication connection handshake.
* 5. Force establishing of new connection from node2 to node1 from timeout object processor thread
* (it use compute in this test).
* 6. Expected: The frozen attempt of handshake would be successfully handled and a new connection
* would be established.
*
* @throws Exception If fail.
*/
@Test
public void testSocketForcedClosedBecauseSlowReadFromSocket() throws Exception {
// given: Two ordinary nodes.
startGrid(0);
IgniteEx g1 = startGrid(1);
// and: One more node which communication connection can be delayed by demand.
AtomicBoolean delayHandshakeUntilSocketClosed = new AtomicBoolean();
IgniteEx g2 = startGrid(2, new DependencyResolver() {
@Override
public <T> T resolve(T instance) {
if (instance instanceof TcpHandshakeExecutor) {
TcpHandshakeExecutor gridNioServer = (TcpHandshakeExecutor) instance;
return (T) new DelaydTcpHandshakeExecutor(gridNioServer, delayHandshakeUntilSocketClosed);
}
return instance;
}
});
awaitPartitionMapExchange();
AtomicBoolean result = new AtomicBoolean(false);
// Wait for connections go idle.
doSleep(1000);
// when: Initiate communication connection from timeout object processor thread.
g2.context().timeout().addTimeoutObject(new GridTimeoutObjectAdapter(0) {
@Override
public void onTimeout() {
delayHandshakeUntilSocketClosed.set(true);
g2.compute(g2.cluster().forNodes(Arrays.asList(g1.localNode()))).withNoFailover().call(() -> true);
result.set(true);
}
});
// then: Despite the first attempt of handshake would be frozen the compute should be handled well eventually.
assertTrue("Compute should be successfully handled.", waitForCondition(result::get, 20_000));
}
use of org.apache.ignite.internal.processors.resource.DependencyResolver in project ignite by apache.
the class MovingPartitionIsEvictedDuringClearingTest method testMovingToEvicted.
/**
* @throws Exception If failed.
*/
@Test
public void testMovingToEvicted() throws Exception {
IgniteEx crd = startGrids(3);
crd.cluster().state(ClusterState.ACTIVE);
final int evictingPart = evictingPartitionsAfterJoin(grid(2), grid(2).cache(DEFAULT_CACHE_NAME), 1).get(0);
final int cnt = 1_100;
final int delta = 2_000;
final int rmv = 1_500;
loadDataToPartition(evictingPart, getTestIgniteInstanceName(0), DEFAULT_CACHE_NAME, cnt, 0, 3);
forceCheckpoint();
stopGrid(2);
loadDataToPartition(evictingPart, getTestIgniteInstanceName(0), DEFAULT_CACHE_NAME, delta, cnt, 3);
// Removal required for triggering full rebalancing.
List<Integer> clearKeys = partitionKeys(grid(0).cache(DEFAULT_CACHE_NAME), evictingPart, rmv, cnt);
for (Integer clearKey : clearKeys) grid(0).cache(DEFAULT_CACHE_NAME).remove(clearKey);
CountDownLatch lock = new CountDownLatch(1);
CountDownLatch unlock = new CountDownLatch(1);
// Start node and delay preloading in the middle of partition clearing.
IgniteEx g2 = startGrid(2, new DependencyResolver() {
@Override
public <T> T resolve(T instance) {
if (instance instanceof GridDhtPartitionTopologyImpl) {
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
top.partitionFactory(new GridDhtPartitionTopologyImpl.PartitionFactory() {
@Override
public GridDhtLocalPartition create(GridCacheSharedContext ctx, CacheGroupContext grp, int id, boolean recovery) {
return id == evictingPart ? new GridDhtLocalPartitionSyncEviction(ctx, grp, id, recovery, 2, lock, unlock) : new GridDhtLocalPartition(ctx, grp, id, recovery);
}
});
}
return instance;
}
});
assertTrue(U.await(lock, GridDhtLocalPartitionSyncEviction.TIMEOUT, TimeUnit.MILLISECONDS));
startGrid(4);
resetBaselineTopology();
awaitPartitionMapExchange();
// Give some time for partition state messages to process.
doSleep(3_000);
// Finish clearing.
unlock.countDown();
awaitPartitionMapExchange();
// Partition will remaing in renting state until next exchange.
assertEquals(RENTING, g2.cachex(DEFAULT_CACHE_NAME).context().topology().localPartition(evictingPart).state());
validadate(cnt + delta - rmv);
stopGrid(2);
startGrid(2);
awaitPartitionMapExchange(true, true, null);
validadate(cnt + delta - rmv);
}
use of org.apache.ignite.internal.processors.resource.DependencyResolver in project ignite by apache.
the class PreloadingRestartWhileClearingPartitionTest method testPreloadingRestart.
/**
* @throws Exception If failed.
*/
@Test
public void testPreloadingRestart() throws Exception {
IgniteEx crd = startGrids(3);
crd.cluster().state(ClusterState.ACTIVE);
final int clearingPart = 0;
final int cnt = 1_100;
final int delta = 2_000;
final int rmv = 1_500;
loadDataToPartition(clearingPart, getTestIgniteInstanceName(0), DEFAULT_CACHE_NAME, cnt, 0);
forceCheckpoint();
stopGrid(2);
loadDataToPartition(clearingPart, getTestIgniteInstanceName(0), DEFAULT_CACHE_NAME, delta, cnt);
// Removal required for triggering full rebalancing.
List<Integer> clearKeys = partitionKeys(grid(0).cache(DEFAULT_CACHE_NAME), clearingPart, rmv, cnt);
for (Integer clearKey : clearKeys) grid(0).cache(DEFAULT_CACHE_NAME).remove(clearKey);
CountDownLatch lock = new CountDownLatch(1);
CountDownLatch unlock = new CountDownLatch(1);
// Start node and delay preloading in the middle of partition clearing.
IgniteEx g2 = startGrid(2, new DependencyResolver() {
@Override
public <T> T resolve(T instance) {
if (instance instanceof GridDhtPartitionTopologyImpl) {
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
top.partitionFactory(new GridDhtPartitionTopologyImpl.PartitionFactory() {
@Override
public GridDhtLocalPartition create(GridCacheSharedContext ctx, CacheGroupContext grp, int id, boolean recovery) {
return id == clearingPart ? new GridDhtLocalPartitionSyncEviction(ctx, grp, id, recovery, 1, lock, unlock) : new GridDhtLocalPartition(ctx, grp, id, recovery);
}
});
}
return instance;
}
});
assertTrue(U.await(lock, GridDhtLocalPartitionSyncEviction.TIMEOUT, TimeUnit.MILLISECONDS));
// Stop supplier for clearingPart.
GridCacheContext<Object, Object> ctx = g2.cachex(DEFAULT_CACHE_NAME).context();
GridDhtPartitionDemander.RebalanceFuture rebFut = (GridDhtPartitionDemander.RebalanceFuture) ctx.preloader().rebalanceFuture();
GridDhtPreloaderAssignments assignments = U.field(rebFut, "assignments");
ClusterNode supplier = assignments.supplier(clearingPart);
AtomicReference<GridFutureAdapter<?>> ref = U.field(ctx.topology().localPartition(clearingPart), "finishFutRef");
GridFutureAdapter clearFut = ref.get();
assertFalse(clearFut.isDone());
grid(supplier).close();
doSleep(1000);
unlock.countDown();
awaitPartitionMapExchange(true, true, null);
assertPartitionsSame(idleVerify(grid(2), DEFAULT_CACHE_NAME));
for (Ignite grid : G.allGrids()) assertEquals(cnt + delta - rmv, grid.cache(DEFAULT_CACHE_NAME).size());
}
use of org.apache.ignite.internal.processors.resource.DependencyResolver in project ignite by apache.
the class PartitionEvictionOrderTest method testSyncCachesEvictedAtFirst.
/**
* Tests that {@link CacheRebalanceMode#SYNC} caches are evicted at first.
*/
@Test
@WithSystemProperty(key = IgniteSystemProperties.IGNITE_EVICTION_PERMITS, value = "1")
@WithSystemProperty(key = IGNITE_PDS_WAL_REBALANCE_THRESHOLD, value = "500_000")
public void testSyncCachesEvictedAtFirst() throws Exception {
IgniteEx node0 = startGrid(0);
node0.cluster().state(ACTIVE);
IgniteEx node1 = startGrid(1);
node0.cluster().setBaselineTopology(node1.cluster().topologyVersion());
GridCacheAdapter<Object, Object> utilCache0 = grid(0).context().cache().internalCache(CU.UTILITY_CACHE_NAME);
IgniteCache<Object, Object> cache = node0.getOrCreateCache(DEFAULT_CACHE_NAME);
for (int i = 0; i < 1000; i++) {
utilCache0.put(i, i);
cache.put(i, i);
}
awaitPartitionMapExchange();
stopGrid(0);
GridCacheAdapter<Object, Object> utilCache1 = grid(1).context().cache().internalCache(CU.UTILITY_CACHE_NAME);
IgniteInternalCache<Object, Object> cache2 = grid(1).context().cache().cache(DEFAULT_CACHE_NAME);
for (int i = 0; i < 2000; i++) {
try {
cache2.put(i, i + 1);
utilCache1.put(i, i + 1);
} catch (IgniteCheckedException e) {
e.printStackTrace();
}
}
List<T2<Integer, Integer>> evictionOrder = Collections.synchronizedList(new ArrayList<>());
TestDependencyResolver rslvr = new TestDependencyResolver(new DependencyResolver() {
@Override
public <T> T resolve(T instance) {
if (instance instanceof GridDhtPartitionTopologyImpl) {
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
top.partitionFactory((ctx, grp, id, recovery) -> new GridDhtLocalPartition(ctx, grp, id, recovery) {
@Override
public long clearAll(EvictionContext evictionCtx) throws NodeStoppingException {
evictionOrder.add(new T2<>(grp.groupId(), id));
return super.clearAll(evictionCtx);
}
});
}
return instance;
}
});
startGrid(0, rslvr);
awaitPartitionMapExchange(true, true, null);
assertEquals(utilCache0.affinity().partitions() + grid(0).cachex(DEFAULT_CACHE_NAME).affinity().partitions(), evictionOrder.size());
for (int i = 0; i < utilCache0.affinity().partitions(); i++) assertEquals(CU.UTILITY_CACHE_GROUP_ID, evictionOrder.get(i).get1().intValue());
}
use of org.apache.ignite.internal.processors.resource.DependencyResolver in project ignite by apache.
the class BlockedEvictionsTest method testOperationDuringEviction.
/**
* @param persistence {@code True} to use persistence.
* @param mode Mode: <ul><li>0 - block before clearing start</li>
* <li>1 - block in the middle of clearing</li></ul>
* @param r A runnable to run while eviction is blocked.
* @throws Exception If failed.
*/
protected void testOperationDuringEviction(boolean persistence, int mode, Runnable r) throws Exception {
this.persistence = persistence;
AtomicInteger holder = new AtomicInteger();
CountDownLatch l1 = new CountDownLatch(1);
CountDownLatch l2 = new CountDownLatch(1);
IgniteEx g0 = startGrid(0, new DependencyResolver() {
@Override
public <T> T resolve(T instance) {
if (instance instanceof GridDhtPartitionTopologyImpl) {
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) instance;
top.partitionFactory(new GridDhtPartitionTopologyImpl.PartitionFactory() {
@Override
public GridDhtLocalPartition create(GridCacheSharedContext ctx, CacheGroupContext grp, int id, boolean recovery) {
return new GridDhtLocalPartitionSyncEviction(ctx, grp, id, recovery, mode, l1, l2) {
/**
*/
@Override
protected void sync() {
if (holder.get() == id)
super.sync();
}
};
}
});
}
return instance;
}
});
startGrid(1);
if (persistence)
g0.cluster().state(ClusterState.ACTIVE);
awaitPartitionMapExchange(true, true, null);
IgniteCache<Object, Object> cache = g0.getOrCreateCache(cacheConfiguration());
List<Integer> allEvicting = evictingPartitionsAfterJoin(g0, cache, 1024);
int p0 = allEvicting.get(0);
holder.set(p0);
final int cnt = 5_000;
List<Integer> keys = partitionKeys(g0.cache(DEFAULT_CACHE_NAME), p0, cnt, 0);
try (IgniteDataStreamer<Object, Object> ds = g0.dataStreamer(DEFAULT_CACHE_NAME)) {
for (Integer key : keys) ds.addData(key, key);
}
IgniteEx joining = startGrid(2);
if (persistence)
resetBaselineTopology();
assertTrue(U.await(l1, 30_000, TimeUnit.MILLISECONDS));
r.run();
l2.countDown();
}
Aggregations