use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class CacheRentingStateRepairTest method testRentingStateRepairAfterRestart.
/**
* Tests partition is properly evicted when node is restarted in the middle of the eviction.
*/
@Test
public void testRentingStateRepairAfterRestart() throws Exception {
try {
IgniteEx g0 = startGrid(0);
g0.cluster().baselineAutoAdjustEnabled(false);
startGrid(1);
g0.cluster().active(true);
awaitPartitionMapExchange();
List<Integer> parts = evictingPartitionsAfterJoin(g0, g0.cache(DEFAULT_CACHE_NAME), 20);
int delayEvictPart = parts.get(0);
int k = 0;
while (g0.affinity(DEFAULT_CACHE_NAME).partition(k) != delayEvictPart) k++;
g0.cache(DEFAULT_CACHE_NAME).put(k, k);
GridDhtPartitionTopology top = dht(g0.cache(DEFAULT_CACHE_NAME)).topology();
GridDhtLocalPartition part = top.localPartition(delayEvictPart);
assertNotNull(part);
// Prevent eviction.
part.reserve();
startGrid(2);
g0.cluster().setBaselineTopology(3);
// Wait until all is evicted except first partition.
assertTrue("Failed to wait for partition eviction: reservedPart=" + part.id() + ", otherParts=" + top.localPartitions().stream().map(p -> "[id=" + p.id() + ", state=" + p.state() + ']').collect(Collectors.toList()), waitForCondition(() -> {
for (int i = 0; i < parts.size(); i++) {
if (delayEvictPart == i)
// Skip reserved partition.
continue;
Integer p = parts.get(i);
@Nullable GridDhtLocalPartition locPart = top.localPartition(p);
assertNotNull(locPart);
if (locPart.state() != GridDhtPartitionState.EVICTED)
return false;
}
return true;
}, 5000));
/**
* Force renting state before node stop.
* This also could be achieved by stopping node just after RENTING state is set.
*/
part.setState(GridDhtPartitionState.RENTING);
assertEquals(GridDhtPartitionState.RENTING, part.state());
stopGrid(0);
g0 = startGrid(0);
awaitPartitionMapExchange();
part = dht(g0.cache(DEFAULT_CACHE_NAME)).topology().localPartition(delayEvictPart);
assertNotNull(part);
final GridDhtLocalPartition finalPart = part;
CountDownLatch evictLatch = new CountDownLatch(1);
part.rent().listen(new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> fut) {
assertEquals(GridDhtPartitionState.EVICTED, finalPart.state());
evictLatch.countDown();
}
});
assertTrue("Failed to wait for partition eviction after restart", evictLatch.await(5_000, TimeUnit.MILLISECONDS));
awaitPartitionMapExchange(true, true, null);
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class IgniteWalRebalanceTest method testSwitchHistoricalRebalanceToFull.
/**
* Tests that demander switches to full rebalance if the previously chosen supplier for a group has failed
* to perform historical rebalance due to an unexpected error.
*
* @param corruptWalClo Closure that corrupts wal iterating on supplier node.
* @param clientClo Closure that is called between the demand message sent and the supply message received.
* Returns {@code true} if it is assumed that the rebalancing from the second supplier should be reassigned.
* @throws Exception If failed
*/
public void testSwitchHistoricalRebalanceToFull(IgniteInClosure<IgniteEx> corruptWalClo, IgniteCallable<Boolean> clientClo) throws Exception {
backups = 3;
IgniteEx supplier1 = startGrid(0);
IgniteEx supplier2 = startGrid(1);
IgniteEx demander = startGrid(2);
supplier1.cluster().state(ACTIVE);
String supplier1Name = supplier1.localNode().consistentId().toString();
String supplier2Name = supplier2.localNode().consistentId().toString();
String demanderName = demander.localNode().consistentId().toString();
String cacheName1 = "test-cache-1";
String cacheName2 = "test-cache-2";
// Cache resides on supplier1 and demander nodes.
IgniteCache<Integer, IndexedObject> c1 = supplier1.getOrCreateCache(new CacheConfiguration<Integer, IndexedObject>(cacheName1).setBackups(backups).setAffinity(new RendezvousAffinityFunction(false, PARTS_CNT)).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setRebalanceOrder(10).setNodeFilter(n -> n.consistentId().equals(supplier1Name) || n.consistentId().equals(demanderName)));
// Cache resides on supplier2 and demander nodes.
IgniteCache<Integer, IndexedObject> c2 = supplier1.getOrCreateCache(new CacheConfiguration<Integer, IndexedObject>("test-cache-2").setBackups(backups).setAffinity(new RendezvousAffinityFunction(false, PARTS_CNT)).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setRebalanceOrder(20).setNodeFilter(n -> n.consistentId().equals(supplier2Name) || n.consistentId().equals(demanderName)));
// Fill initial data.
final int entryCnt = PARTS_CNT * 200;
final int preloadEntryCnt = PARTS_CNT * 400;
int val = 0;
for (int k = 0; k < preloadEntryCnt; k++) {
c1.put(k, new IndexedObject(val++));
c2.put(k, new IndexedObject(val++));
}
forceCheckpoint();
stopGrid(2);
// Rewrite data to trigger further rebalance.
for (int i = 0; i < entryCnt; i++) {
c1.put(i, new IndexedObject(val++));
c2.put(i, new IndexedObject(val++));
}
// Delay rebalance process for specified groups.
blockMsgPred = (node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
return msg0.groupId() == CU.cacheId(cacheName1) || msg0.groupId() == CU.cacheId(cacheName2);
}
return false;
};
Queue<RecordedDemandMessage> recorderedMsgs = new ConcurrentLinkedQueue<>();
// Record demand messages for specified groups.
recordMsgPred = (node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
if (msg0.groupId() == CU.cacheId(cacheName1) || msg0.groupId() == CU.cacheId(cacheName2)) {
recorderedMsgs.add(new RecordedDemandMessage(node.id(), msg0.groupId(), msg0.partitions().hasFull(), msg0.partitions().hasHistorical()));
}
}
return false;
};
// Delay rebalance process for specified group from supplier2.
TestRecordingCommunicationSpi supplierSpi2 = TestRecordingCommunicationSpi.spi(supplier2);
supplierSpi2.blockMessages((node, msg) -> {
if (msg instanceof GridDhtPartitionSupplyMessage) {
GridDhtPartitionSupplyMessage msg0 = (GridDhtPartitionSupplyMessage) msg;
return node.consistentId().equals(demanderName) && msg0.groupId() == CU.cacheId(cacheName2);
}
return false;
});
// Corrupt WAL on supplier1
corruptWalClo.apply(supplier1);
// Trigger rebalance process from suppliers.
IgniteEx restartedDemander = startGrid(2);
recordMsgPred = null;
blockMsgPred = null;
TestRecordingCommunicationSpi demanderSpi = TestRecordingCommunicationSpi.spi(grid(2));
// Wait until demander starts historical rebalancning.
demanderSpi.waitForBlocked();
final IgniteInternalFuture<Boolean> preloadFut1 = restartedDemander.cachex(cacheName1).context().group().preloader().rebalanceFuture();
final IgniteInternalFuture<Boolean> preloadFut2 = restartedDemander.cachex(cacheName2).context().group().preloader().rebalanceFuture();
boolean rebalanceReassigned = clientClo.call();
// Unblock messages and start tracking demand and supply messages.
demanderSpi.stopBlock();
// Wait until rebalancing will be cancelled.
GridTestUtils.waitForCondition(() -> preloadFut1.isDone() && (!rebalanceReassigned || (rebalanceReassigned && preloadFut2.isDone())), getTestTimeout());
Assert.assertEquals("Rebalance should be cancelled on demander node: " + preloadFut1, false, preloadFut1.get());
Assert.assertEquals("Rebalance should be cancelled on demander node: " + preloadFut2, false, rebalanceReassigned && preloadFut2.get());
// Unblock supply messages from supplier2
supplierSpi2.stopBlock();
awaitPartitionMapExchange(true, true, null);
// Check data consistency.
assertPartitionsSame(idleVerify(restartedDemander, cacheName2, cacheName1));
// Check that historical rebalance switched to full for supplier1 and it is still historical for supplier2.
IgnitePredicate<RecordedDemandMessage> histPred = (msg) -> msg.hasHistorical() && !msg.hasFull();
IgnitePredicate<RecordedDemandMessage> fullPred = (msg) -> !msg.hasHistorical() && msg.hasFull();
// Supplier1
List<RecordedDemandMessage> demandMsgsForSupplier1 = recorderedMsgs.stream().filter(msg -> msg.groupId() == CU.cacheId(cacheName1)).filter(msg -> msg.hasFull() || msg.hasHistorical()).collect(toList());
assertEquals("There should only two demand messages.", 2, demandMsgsForSupplier1.size());
assertTrue("The first message should require historical rebalance [msg=" + demandMsgsForSupplier1.get(0) + ']', histPred.apply(demandMsgsForSupplier1.get(0)));
assertTrue("The second message should require full rebalance [msg=" + demandMsgsForSupplier1.get(0) + ']', fullPred.apply(demandMsgsForSupplier1.get(1)));
// Supplier2
List<RecordedDemandMessage> demandMsgsForSupplier2 = recorderedMsgs.stream().filter(msg -> msg.groupId() == CU.cacheId(cacheName2)).filter(msg -> msg.hasFull() || msg.hasHistorical()).collect(toList());
if (rebalanceReassigned) {
assertEquals("There should be only two demand messages.", 2, demandMsgsForSupplier2.size());
assertTrue("Both messages should require historical rebalance [" + "msg=" + demandMsgsForSupplier2.get(0) + ", msg=" + demandMsgsForSupplier2.get(1) + ']', histPred.apply(demandMsgsForSupplier2.get(0)) && histPred.apply(demandMsgsForSupplier2.get(1)));
} else {
assertEquals("There should be only one demand message.", 1, demandMsgsForSupplier2.size());
assertTrue("Message should require historical rebalance [" + "msg=" + demandMsgsForSupplier2.get(0) + ']', histPred.apply(demandMsgsForSupplier2.get(0)));
}
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class GridNioFilterChainSelfTest method testChainEvents.
/**
* @throws Exception If failed.
*/
@Test
public void testChainEvents() throws Exception {
final AtomicReference<String> connectedEvt = new AtomicReference<>();
final AtomicReference<String> disconnectedEvt = new AtomicReference<>();
final AtomicReference<String> msgEvt = new AtomicReference<>();
final AtomicReference<String> idleEvt = new AtomicReference<>();
final AtomicReference<String> writeTimeoutEvt = new AtomicReference<>();
final AtomicReference<String> sndEvt = new AtomicReference<>();
final AtomicReference<String> closeEvt = new AtomicReference<>();
final AtomicReference<ByteBuffer> rcvdMsgObj = new AtomicReference<>();
final AtomicReference<Object> sndMsgObj = new AtomicReference<>();
GridNioServerListener<Object> testLsnr = new GridNioServerListenerAdapter<Object>() {
@Override
public void onConnected(GridNioSession ses) {
connectedEvt.compareAndSet(null, ses.<String>meta(OPENED_META_NAME));
}
@Override
public void onDisconnected(GridNioSession ses, @Nullable Exception e) {
disconnectedEvt.compareAndSet(null, ses.<String>meta(CLOSED_META_NAME));
}
@Override
public void onMessage(GridNioSession ses, Object msg) {
msgEvt.compareAndSet(null, ses.<String>meta(MESSAGE_RECEIVED_META_NAME));
rcvdMsgObj.compareAndSet(null, (ByteBuffer) msg);
}
@Override
public void onSessionWriteTimeout(GridNioSession ses) {
writeTimeoutEvt.compareAndSet(null, ses.<String>meta(WRITE_TIMEOUT_META_NAME));
}
@Override
public void onSessionIdleTimeout(GridNioSession ses) {
idleEvt.compareAndSet(null, ses.<String>meta(IDLE_META_NAME));
}
};
GridNioFilterAdapter testHead = new GridNioFilterAdapter("TestHead") {
@Override
public void onSessionOpened(GridNioSession ses) throws IgniteCheckedException {
proceedSessionOpened(ses);
}
@Override
public void onSessionClosed(GridNioSession ses) throws IgniteCheckedException {
proceedSessionClosed(ses);
}
@Override
public void onExceptionCaught(GridNioSession ses, IgniteCheckedException ex) throws IgniteCheckedException {
proceedExceptionCaught(ses, ex);
}
@Override
public GridNioFuture<?> onSessionWrite(GridNioSession ses, Object msg, boolean fut, IgniteInClosure<IgniteException> ackC) {
sndEvt.compareAndSet(null, ses.<String>meta(MESSAGE_WRITE_META_NAME));
sndMsgObj.compareAndSet(null, msg);
return null;
}
@Override
public void onMessageReceived(GridNioSession ses, Object msg) throws IgniteCheckedException {
proceedMessageReceived(ses, msg);
}
@Override
public GridNioFuture<Boolean> onSessionClose(GridNioSession ses) {
closeEvt.compareAndSet(null, ses.<String>meta(CLOSE_META_NAME));
return null;
}
@Override
public void onSessionIdleTimeout(GridNioSession ses) throws IgniteCheckedException {
proceedSessionIdleTimeout(ses);
}
@Override
public void onSessionWriteTimeout(GridNioSession ses) throws IgniteCheckedException {
proceedSessionWriteTimeout(ses);
}
};
GridNioFilterChain<Object> chain = new GridNioFilterChain<>(log, testLsnr, testHead, new AppendingFilter("A"), new AppendingFilter("B"), new AppendingFilter("C"), new AppendingFilter("D"));
GridNioSession ses = new MockNioSession();
ByteBuffer snd = ByteBuffer.wrap(new byte[1]);
ByteBuffer rcvd = ByteBuffer.wrap(new byte[1]);
chain.onSessionOpened(ses);
chain.onSessionClosed(ses);
chain.onMessageReceived(ses, rcvd);
chain.onSessionIdleTimeout(ses);
chain.onSessionWriteTimeout(ses);
assertNull(chain.onSessionClose(ses));
assertNull(chain.onSessionWrite(ses, snd, true, null));
assertEquals("DCBA", connectedEvt.get());
assertEquals("DCBA", disconnectedEvt.get());
assertEquals("DCBA", msgEvt.get());
assertEquals("DCBA", idleEvt.get());
assertEquals("DCBA", writeTimeoutEvt.get());
assertEquals("ABCD", sndEvt.get());
assertEquals("ABCD", closeEvt.get());
assertTrue(snd == sndMsgObj.get());
assertTrue(rcvd == rcvdMsgObj.get());
}
use of org.apache.ignite.lang.IgniteInClosure in project ignite by apache.
the class CacheMvccTransactionsTest method txReadsSnapshot.
/**
* @param srvs Number of server nodes.
* @param clients Number of client nodes.
* @param cacheBackups Number of cache backups.
* @param cacheParts Number of cache partitions.
* @param readMode Read mode.
* @throws Exception If failed.
*/
private void txReadsSnapshot(final int srvs, final int clients, int cacheBackups, int cacheParts, ReadMode readMode) throws Exception {
final int ACCOUNTS = 20;
final int ACCOUNT_START_VAL = 1000;
final int writers = 4;
final int readers = 4;
final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
@Override
public void apply(IgniteCache<Object, Object> cache) {
final IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
Map<Integer, MvccTestAccount> accounts = new HashMap<>();
for (int i = 0; i < ACCOUNTS; i++) accounts.put(i, new MvccTestAccount(ACCOUNT_START_VAL, 1));
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.putAll(accounts);
tx.commit();
}
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
int cnt = 0;
while (!stop.get()) {
TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
try {
IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
cnt++;
Integer id1 = rnd.nextInt(ACCOUNTS);
Integer id2 = rnd.nextInt(ACCOUNTS);
while (id1.equals(id2)) id2 = rnd.nextInt(ACCOUNTS);
if (id1 > id2) {
int tmp = id1;
id1 = id2;
id2 = tmp;
}
Set<Integer> keys = new HashSet<>();
keys.add(id1);
keys.add(id2);
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
MvccTestAccount a1;
MvccTestAccount a2;
Map<Integer, MvccTestAccount> accounts = checkAndGetAll(false, cache.cache, keys, readMode);
a1 = accounts.get(id1);
a2 = accounts.get(id2);
assertNotNull(a1);
assertNotNull(a2);
cache.cache.put(id1, new MvccTestAccount(a1.val + 1, 1));
cache.cache.put(id2, new MvccTestAccount(a2.val - 1, 1));
tx.commit();
} catch (CacheException ex) {
MvccFeatureChecker.assertMvccWriteConflict(ex);
}
} finally {
cache.readUnlock();
}
}
info("Writer finished, updates: " + cnt);
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
int cnt = 0;
while (!stop.get()) {
TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
Map<Integer, MvccTestAccount> accounts = new HashMap<>();
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
int remaining = ACCOUNTS;
do {
int readCnt = rnd.nextInt(remaining) + 1;
Set<Integer> readKeys = new TreeSet<>();
for (int i = 0; i < readCnt; i++) readKeys.add(accounts.size() + i);
Map<Integer, MvccTestAccount> readRes = checkAndGetAll(false, cache.cache, readKeys, readMode);
assertEquals(readCnt, readRes.size());
accounts.putAll(readRes);
remaining = ACCOUNTS - accounts.size();
} while (remaining > 0);
validateSum(accounts);
tx.commit();
cnt++;
} finally {
cache.readUnlock();
}
}
info("Reader finished, txs: " + cnt);
}
/**
* @param accounts Read accounts.
*/
private void validateSum(Map<Integer, MvccTestAccount> accounts) {
int sum = 0;
for (int i = 0; i < ACCOUNTS; i++) {
MvccTestAccount account = accounts.get(i);
assertNotNull(account);
sum += account.val;
}
assertEquals(ACCOUNTS * ACCOUNT_START_VAL, sum);
}
};
readWriteTest(null, srvs, clients, cacheBackups, cacheParts, writers, readers, DFLT_TEST_TIME, null, init, writer, reader);
}
Aggregations