use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.PartitionsExchangeAware in project ignite by apache.
the class EagerTtlTest method testOneNodeRestartWithTtlCache.
/**
* Checks of restart node with TTL cache.
*
* @throws Exception If failed.
*/
@Test
public void testOneNodeRestartWithTtlCache() throws Exception {
eagerTtl = true;
IgniteEx ignite = startGrid(0);
ignite.cluster().state(ClusterState.ACTIVE);
IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME);
for (int i = 0; i < ENTRIES; i++) cache.put(i, i);
ignite.close();
LogListener assertListener = LogListener.matches(ASSERTION_ERR).build();
listeningLog.registerListener(assertListener);
ignite = startGrid(0);
CountDownLatch exchangeHangLatch = new CountDownLatch(1);
ignite.context().cache().context().exchange().registerExchangeAwareComponent(new PartitionsExchangeAware() {
@Override
public void onInitBeforeTopologyLock(GridDhtPartitionsExchangeFuture fut) {
try {
exchangeHangLatch.await();
} catch (InterruptedException e) {
log.error("Interrupted of waiting latch", e);
fail(e.getMessage());
}
}
});
IgniteInternalFuture<?> activeFut = GridTestUtils.runAsync(() -> ignite(0).cluster().state(ClusterState.ACTIVE));
assertFalse(activeFut.isDone());
assertFalse(GridTestUtils.waitForCondition(assertListener::check, 2_000));
exchangeHangLatch.countDown();
activeFut.get();
awaitPartitionMapExchange();
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.PartitionsExchangeAware in project ignite by apache.
the class IgniteClusterSnapshotSelfTest method testConsistentClusterSnapshotLoadNewTopology.
/**
* Take snapshot from the whole cluster and check snapshot consistency when the
* cluster tx load starts on a new topology version.
* Note: Client nodes and server nodes not in baseline topology must not be affected.
*
* @throws Exception If fails.
*/
@Test
public void testConsistentClusterSnapshotLoadNewTopology() throws Exception {
int grids = 3;
String snpName = "backup23012020";
AtomicInteger atKey = new AtomicInteger(CACHE_KEYS_RANGE);
AtomicInteger txKey = new AtomicInteger(CACHE_KEYS_RANGE);
IgniteEx ignite = startGrids(grids);
startClientGrid();
ignite.cluster().baselineAutoAdjustEnabled(false);
ignite.cluster().state(ACTIVE);
ignite.events().localListen(e -> locEvts.add(e.type()), EVTS_CLUSTER_SNAPSHOT);
// Start node not in baseline.
IgniteEx notBltIgnite = startGrid(grids);
File locSnpDir = snp(notBltIgnite).snapshotLocalDir(SNAPSHOT_NAME);
String notBltDirName = folderName(notBltIgnite);
IgniteCache<Integer, Integer> atCache = ignite.createCache(atomicCcfg);
for (int idx = 0; idx < CACHE_KEYS_RANGE; idx++) {
atCache.put(atKey.incrementAndGet(), -1);
ignite.cache(DEFAULT_CACHE_NAME).put(txKey.incrementAndGet(), -1);
}
forceCheckpoint();
CountDownLatch loadLatch = new CountDownLatch(1);
ignite.context().cache().context().exchange().registerExchangeAwareComponent(new PartitionsExchangeAware() {
/**
* {@inheritDoc}
*/
@Override
public void onInitBeforeTopologyLock(GridDhtPartitionsExchangeFuture fut) {
if (fut.firstEvent().type() != EVT_DISCOVERY_CUSTOM_EVT)
return;
// First discovery custom event will be a snapshot operation.
assertTrue(isSnapshotOperation(fut.firstEvent()));
assertTrue("Snapshot must use pme-free exchange", fut.context().exchangeFreeSwitch());
}
/**
* {@inheritDoc}
*/
@Override
public void onInitAfterTopologyLock(GridDhtPartitionsExchangeFuture fut) {
if (fut.firstEvent().type() != EVT_DISCOVERY_CUSTOM_EVT)
return;
DiscoveryCustomMessage msg = ((DiscoveryCustomEvent) fut.firstEvent()).customMessage();
assertNotNull(msg);
if (msg instanceof SnapshotDiscoveryMessage)
loadLatch.countDown();
}
});
// Start cache load.
IgniteInternalFuture<Long> loadFut = GridTestUtils.runMultiThreadedAsync(() -> {
try {
U.await(loadLatch);
while (!Thread.currentThread().isInterrupted()) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
int atIdx = rnd.nextInt(grids);
// Zero out the sign bit.
grid(atIdx).cache(atomicCcfg.getName()).put(txKey.incrementAndGet(), rnd.nextInt() & Integer.MAX_VALUE);
int txIdx = rnd.nextInt(grids);
grid(txIdx).cache(DEFAULT_CACHE_NAME).put(atKey.incrementAndGet(), rnd.nextInt() & Integer.MAX_VALUE);
}
} catch (IgniteInterruptedCheckedException e) {
throw new RuntimeException(e);
}
}, 3, "cache-put-");
try {
IgniteFuture<Void> fut = ignite.snapshot().createSnapshot(snpName);
U.await(loadLatch, 10, TimeUnit.SECONDS);
fut.get();
waitForEvents(EVT_CLUSTER_SNAPSHOT_STARTED, EVT_CLUSTER_SNAPSHOT_FINISHED);
} finally {
loadFut.cancel();
}
// Cluster can be deactivated but we must test snapshot restore when binary recovery also occurred.
stopAllGrids();
assertTrue("Snapshot directory must be empty for node not in baseline topology: " + notBltDirName, !searchDirectoryRecursively(locSnpDir.toPath(), notBltDirName).isPresent());
IgniteEx snpIg0 = startGridsFromSnapshot(grids, snpName);
assertEquals("The number of all (primary + backup) cache keys mismatch for cache: " + DEFAULT_CACHE_NAME, CACHE_KEYS_RANGE, snpIg0.cache(DEFAULT_CACHE_NAME).size());
assertEquals("The number of all (primary + backup) cache keys mismatch for cache: " + atomicCcfg.getName(), CACHE_KEYS_RANGE, snpIg0.cache(atomicCcfg.getName()).size());
snpIg0.cache(DEFAULT_CACHE_NAME).query(new ScanQuery<>(null)).forEach(e -> assertTrue("Snapshot must contains only negative values " + "[cache=" + DEFAULT_CACHE_NAME + ", entry=" + e + ']', (Integer) e.getValue() < 0));
snpIg0.cache(atomicCcfg.getName()).query(new ScanQuery<>(null)).forEach(e -> assertTrue("Snapshot must contains only negative values " + "[cache=" + atomicCcfg.getName() + ", entry=" + e + ']', (Integer) e.getValue() < 0));
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.PartitionsExchangeAware in project ignite by apache.
the class IgniteClusterSnapshotSelfTest method testClusterSnapshotCoordinatorStopped.
/**
* @throws Exception If fails.
*/
@Test
public void testClusterSnapshotCoordinatorStopped() throws Exception {
CountDownLatch block = new CountDownLatch(1);
startGridsWithCache(3, dfltCacheCfg, CACHE_KEYS_RANGE);
startClientGrid(3);
awaitPartitionMapExchange();
for (IgniteEx grid : Arrays.asList(grid(1), grid(2))) {
grid.context().cache().context().exchange().registerExchangeAwareComponent(new PartitionsExchangeAware() {
/**
* {@inheritDoc}
*/
@Override
public void onInitBeforeTopologyLock(GridDhtPartitionsExchangeFuture fut) {
try {
block.await(TIMEOUT, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
fail("Must not catch exception here: " + e.getMessage());
}
}
});
}
for (Ignite grid : G.allGrids()) {
TestRecordingCommunicationSpi.spi(grid).blockMessages((node, msg) -> {
if (msg instanceof GridDhtPartitionsSingleMessage)
return ((GridDhtPartitionsAbstractMessage) msg).exchangeId() != null;
return false;
});
}
IgniteFuture<Void> fut = grid(1).snapshot().createSnapshot(SNAPSHOT_NAME);
stopGrid(0);
block.countDown();
// There are two exchanges happen: snapshot, node left (with pme-free).
// Both of them are not require for sending messages.
assertFalse("Pme-free switch doesn't expect messaging exchanging between nodes", GridTestUtils.waitForCondition(() -> {
boolean hasMsgs = false;
for (Ignite g : G.allGrids()) hasMsgs |= TestRecordingCommunicationSpi.spi(g).hasBlockedMessages();
return hasMsgs;
}, 5_000));
assertThrowsWithCause((Callable<Object>) fut::get, IgniteException.class);
List<GridDhtPartitionsExchangeFuture> exchFuts = grid(1).context().cache().context().exchange().exchangeFutures();
assertFalse("Exchanges cannot be empty due to snapshot and node left happened", exchFuts.isEmpty());
for (GridDhtPartitionsExchangeFuture exch : exchFuts) {
assertTrue("Snapshot and node left events must keep `rebalanced` state" + exch, exch.rebalanced());
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.PartitionsExchangeAware in project ignite by apache.
the class GridCacheVersionGenerationWithCacheStorageTest method checkGridCacheVersionsGenerationOrder.
/**
* <or>
* <li>Start node.</li>
* <li>Start one more node asyncronously</li>
* <li>With the help of {@link PartitionsExchangeAware) slow down exchange in order to run some cache operations
* after discovery message processing but before exchange topology lock.</li>
* </or>
*
* Ensure that topology version of entry and current topology version* are equals after exchange
* and operation were finished.
*
* @param actions Actions to check: get, put etc.
* @throws Exception
*/
private void checkGridCacheVersionsGenerationOrder(Consumer<IgniteEx> actions, Set<Integer> keySetToCheck) throws Exception {
IgniteEx ign = startGrid(0);
ign.context().cache().context().exchange().registerExchangeAwareComponent(new PartitionsExchangeAware() {
@Override
public void onInitBeforeTopologyLock(GridDhtPartitionsExchangeFuture fut) {
try {
latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
});
IgniteInternalFuture<?> newNodeJoinFut = GridTestUtils.runAsync(() -> startGrid(1));
waitForCondition(() -> (ignite(0).context().discovery().topologyVersion() == 2), 10_000);
assertEquals(2, ignite(0).context().discovery().topologyVersion());
assertEquals(1, ignite(0).cachex(DEFAULT_CACHE_NAME).context().topology().readyTopologyVersion().topologyVersion());
actions.accept(ign);
latch.countDown();
newNodeJoinFut.get();
long expTop = (ign.context().cache().cache(DEFAULT_CACHE_NAME).context().kernalContext().discovery().gridStartTime() - TOP_VER_BASE_TIME) / 1000 + 1;
ign.cache(DEFAULT_CACHE_NAME).getEntries(keySetToCheck).stream().map(CacheEntry::version).forEach(v -> assertEquals(expTop, ((GridCacheVersion) v).topologyVersion()));
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.PartitionsExchangeAware in project ignite by apache.
the class FullHistRebalanceOnClientStopTest method testFullRebalanceNotTriggeredWhenClientNodeStopsDuringPme.
/**
* @throws Exception if failed.
*/
@Test
@WithSystemProperty(key = IGNITE_PDS_WAL_REBALANCE_THRESHOLD, value = "0")
public void testFullRebalanceNotTriggeredWhenClientNodeStopsDuringPme() throws Exception {
startGrids(2);
IgniteEx ig0 = grid(0);
ig0.cluster().active(true);
IgniteCache<Object, Object> cache = ig0.cache(CACHE_NAME);
startClientGrid(5);
final int entryCnt = PARTS_CNT * 1000;
final int preloadEntryCnt = PARTS_CNT * 1001;
for (int i = 0; i < preloadEntryCnt; i++) cache.put(i, i);
forceCheckpoint();
stopGrid(1);
for (int i = 0; i < entryCnt; i++) cache.put(i, i + 100);
forceCheckpoint();
final CountDownLatch exchangeLatch = new CountDownLatch(1);
final CountDownLatch hangingPmeStartedLatch = new CountDownLatch(1);
ig0.context().cache().context().exchange().registerExchangeAwareComponent(new PartitionsExchangeAware() {
@Override
public void onInitAfterTopologyLock(GridDhtPartitionsExchangeFuture fut) {
try {
hangingPmeStartedLatch.countDown();
exchangeLatch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
});
IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
try {
startGrid(1);
awaitPartitionMapExchange();
} catch (Exception e) {
e.printStackTrace();
}
});
IgniteInternalFuture clientStopFut = GridTestUtils.runAsync(() -> {
try {
hangingPmeStartedLatch.await();
stopGrid(5);
} catch (Exception e) {
e.printStackTrace();
}
}, "client-starter");
assertFalse(fut.isDone());
exchangeLatch.countDown();
clientStopFut.get();
fut.get();
awaitPartitionMapExchange();
boolean histRebalanceInvoked = RebalanceCheckingCommunicationSpi.histRebalances();
boolean fullRebalanceInvoked = RebalanceCheckingCommunicationSpi.fullRebalances();
RebalanceCheckingCommunicationSpi.cleanup();
assertTrue("Historical rebalance hasn't been invoked.", histRebalanceInvoked);
assertFalse("Full rebalance has been invoked.", fullRebalanceInvoked);
}
Aggregations