use of org.apache.ignite.spi.discovery.tcp.BlockTcpDiscoverySpi in project ignite by apache.
the class TxPartitionCounterStateConsistencyTest method testPartitionConsistencyDuringRebalanceAndConcurrentUpdates_LateAffinitySwitch.
/**
* Tests tx load concurrently with PME for switching late affinity.
* <p>
* Scenario: two keys tx mapped locally on late affinity topology and when mapped and prepared remotely on ideal
* topology, first key is mapped to non-moving partition, second is mapped on moving partition.
* <p>
* Success: key over moving partition is prepared on new owner (choosed after late affinity switch),
* otherwise it's possible txs are prepared on different primaries after late affinity switch.
*/
@Test
public void testPartitionConsistencyDuringRebalanceAndConcurrentUpdates_LateAffinitySwitch() throws Exception {
backups = 1;
customDiscoSpi = new BlockTcpDiscoverySpi().setIpFinder(IP_FINDER);
Field rndAddrsField = U.findField(BlockTcpDiscoverySpi.class, "skipAddrsRandomization");
assertNotNull(rndAddrsField);
rndAddrsField.set(customDiscoSpi, true);
// Start coordinator with custom discovery SPI.
IgniteEx crd = startGrid(0);
IgniteEx g1 = startGrid(1);
startGrid(2);
crd.cluster().baselineAutoAdjustEnabled(false);
crd.cluster().active(true);
// Same name pattern as in test configuration.
String consistentId = "node" + getTestIgniteInstanceName(3);
List<Integer> g1Keys = primaryKeys(g1.cache(DEFAULT_CACHE_NAME), 10);
List<Integer> movingFromG1 = movingKeysAfterJoin(g1, DEFAULT_CACHE_NAME, 10, null, consistentId);
// Retain only stable keys;
g1Keys.removeAll(movingFromG1);
// The key will move from grid0 to grid3.
Integer key = movingKeysAfterJoin(crd, DEFAULT_CACHE_NAME, 1, null, consistentId).get(0);
IgniteEx g3 = startGrid(3);
assertEquals(consistentId, g3.localNode().consistentId());
resetBaselineTopology();
awaitPartitionMapExchange();
assertTrue(crd.affinity(DEFAULT_CACHE_NAME).isPrimary(g1.localNode(), g1Keys.get(0)));
stopGrid(3);
Ignite client = startClientGrid(CLIENT_GRID_NAME);
IgniteCache<Object, Object> cache = client.cache(DEFAULT_CACHE_NAME);
IgniteCache<Object, Object> cache2 = client.getOrCreateCache(cacheConfiguration(DEFAULT_CACHE_NAME + "2"));
// Put one key per partition.
for (int k = 0; k < partitions(); k++) {
cache.put(k, 0);
cache2.put(k, 0);
}
CountDownLatch resumeDiscoSndLatch = new CountDownLatch(1);
BlockTcpDiscoverySpi crdDiscoSpi = (BlockTcpDiscoverySpi) grid(0).configuration().getDiscoverySpi();
CyclicBarrier sync = new CyclicBarrier(2);
crdDiscoSpi.setClosure((node, msg) -> {
if (msg instanceof CacheAffinityChangeMessage) {
U.awaitQuiet(sync);
U.awaitQuiet(resumeDiscoSndLatch);
}
return null;
});
// Locks mapped wait.
IgniteInternalFuture fut = GridTestUtils.runAsync(() -> {
try {
startGrid(SERVER_NODES);
awaitPartitionMapExchange();
} catch (Exception e) {
fail(X.getFullStackTrace(e));
}
});
sync.await();
TestRecordingCommunicationSpi clientSpi = TestRecordingCommunicationSpi.spi(client);
clientSpi.blockMessages((node, msg) -> msg instanceof GridNearLockRequest);
IgniteInternalFuture txFut = GridTestUtils.runAsync(() -> {
try (Transaction tx = client.transactions().txStart()) {
Map<Integer, Integer> map = new LinkedHashMap<>();
// clientFirst=true in lockAll mapped to stable part.
map.put(g1Keys.get(0), g1Keys.get(0));
// clientFirst=false in lockAll mapped to moving part.
map.put(key, key);
cache.putAll(map);
cache2.putAll(new LinkedHashMap<>(map));
// Will start preparing in the middle of PME.
tx.commit();
}
});
IgniteInternalFuture lockFut = GridTestUtils.runAsync(() -> {
try {
// Wait for first lock request sent on local (late) topology.
clientSpi.waitForBlocked();
// Continue late switch PME.
resumeDiscoSndLatch.countDown();
crdDiscoSpi.setClosure(null);
// Wait late affinity switch.
awaitPartitionMapExchange();
// Continue tx mapping and preparing.
clientSpi.stopBlock();
} catch (InterruptedException e) {
fail(X.getFullStackTrace(e));
}
});
fut.get();
txFut.get();
lockFut.get();
assertPartitionsSame(idleVerify(crd, DEFAULT_CACHE_NAME));
// TX must be prepared over new owner.
PartitionUpdateCounter cntr = counter(key, grid(3).name());
assertNotNull(cntr);
assertEquals(cntr.toString(), 2, cntr.reserved());
PartitionUpdateCounter cntr2 = counter(key, DEFAULT_CACHE_NAME + "2", grid(3).name());
assertNotNull(cntr2);
assertEquals(cntr2.toString(), 2, cntr2.reserved());
}
use of org.apache.ignite.spi.discovery.tcp.BlockTcpDiscoverySpi in project ignite by apache.
the class BinaryMetadataConcurrentUpdateWithIndexesTest method testMissingSchemaUpdate.
/**
*/
@Test
public void testMissingSchemaUpdate() throws Exception {
// Start order is important.
Ignite node0 = startGrid("node0");
Ignite node1 = startGrid("node1");
IgniteEx client0 = startClientGrid("client0");
CacheObjectBinaryProcessorImpl.TestBinaryContext clientCtx = (CacheObjectBinaryProcessorImpl.TestBinaryContext) ((CacheObjectBinaryProcessorImpl) client0.context().cacheObjects()).binaryContext();
clientCtx.addListener(new CacheObjectBinaryProcessorImpl.TestBinaryContext.TestBinaryContextListener() {
@Override
public void onAfterMetadataRequest(int typeId, BinaryType type) {
if (syncMeta) {
try {
initMetaReq.countDown();
initMetaReq.await();
} catch (Exception e) {
throw new BinaryObjectException(e);
}
}
}
@Override
public void onBeforeMetadataUpdate(int typeId, BinaryMetadata metadata) {
// Delay one of updates until schema is locally updated on propose message.
if (delayMetadataUpdateThreadLoc.get() != null)
await(localMetaUpdatedLatch, 5000);
}
});
Ignite node2 = startGrid("node2");
Ignite node3 = startGrid("node3");
startGrid("node4");
node0.cluster().active(true);
awaitPartitionMapExchange();
syncMeta = true;
CountDownLatch clientProposeMsgBlockedLatch = new CountDownLatch(1);
AtomicBoolean clientWait = new AtomicBoolean();
final Object clientMux = new Object();
AtomicBoolean srvWait = new AtomicBoolean();
final Object srvMux = new Object();
((BlockTcpDiscoverySpi) node1.configuration().getDiscoverySpi()).setClosure((snd, msg) -> {
if (msg instanceof MetadataUpdateProposedMessage) {
if (Thread.currentThread().getName().contains("client")) {
log.info("Block custom message to client0: [locNode=" + snd + ", msg=" + msg + ']');
clientProposeMsgBlockedLatch.countDown();
// Message to client
synchronized (clientMux) {
while (!clientWait.get()) try {
clientMux.wait();
} catch (InterruptedException e) {
fail();
}
}
}
}
return null;
});
((BlockTcpDiscoverySpi) node2.configuration().getDiscoverySpi()).setClosure((snd, msg) -> {
if (msg instanceof MetadataUpdateProposedMessage) {
MetadataUpdateProposedMessage msg0 = (MetadataUpdateProposedMessage) msg;
int pendingVer = U.field(msg0, "pendingVer");
// Should not block propose messages until they reach coordinator.
if (pendingVer == 0)
return null;
log.info("Block custom message to next server: [locNode=" + snd + ", msg=" + msg + ']');
// Message to client
synchronized (srvMux) {
while (!srvWait.get()) try {
srvMux.wait();
} catch (InterruptedException e) {
fail();
}
}
}
return null;
});
Integer key = primaryKey(node3.cache(DEFAULT_CACHE_NAME));
IgniteInternalFuture fut0 = runAsync(() -> {
try (Transaction tx = client0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
client0.cache(DEFAULT_CACHE_NAME).put(key, build(client0, "val", 0));
tx.commit();
} catch (Throwable t) {
log.error("err", t);
}
});
// Implements test logic.
IgniteInternalFuture fut1 = runAsync(() -> {
// Wait for initial metadata received. It should be initial version: pending=0, accepted=0
await(initMetaReq, 5000);
// Wait for blocking proposal message to client node.
await(clientProposeMsgBlockedLatch, 5000);
// Unblock proposal message to client.
clientWait.set(true);
synchronized (clientMux) {
clientMux.notify();
}
// Give some time to apply update.
doSleep(3000);
// Unblock second metadata update.
localMetaUpdatedLatch.countDown();
// Give some time for tx to complete (success or fail). fut2 will throw an error if tx has failed on commit.
doSleep(3000);
// Unblock metadata message and allow for correct version acceptance.
srvWait.set(true);
synchronized (srvMux) {
srvMux.notify();
}
});
IgniteInternalFuture fut2 = runAsync(() -> {
delayMetadataUpdateThreadLoc.set(true);
try (Transaction tx = client0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, 0, 1)) {
client0.cache(DEFAULT_CACHE_NAME).put(key, build(client0, "val", 0));
tx.commit();
}
});
fut0.get();
fut1.get();
fut2.get();
}
use of org.apache.ignite.spi.discovery.tcp.BlockTcpDiscoverySpi in project ignite by apache.
the class BinaryMetadataConcurrentUpdateWithIndexesTest method getConfiguration.
/**
* {@inheritDoc}
*/
@Override
protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
cfg.setIncludeEventTypes(EventType.EVTS_DISCOVERY);
BlockTcpDiscoverySpi spi = new BlockTcpDiscoverySpi();
Field rndAddrsField = U.findField(BlockTcpDiscoverySpi.class, "skipAddrsRandomization");
assertNotNull(rndAddrsField);
rndAddrsField.set(spi, true);
cfg.setDiscoverySpi(spi.setIpFinder(sharedStaticIpFinder));
QueryEntity qryEntity = new QueryEntity("java.lang.Integer", "Value");
LinkedHashMap<String, String> fields = new LinkedHashMap<>();
Collection<QueryIndex> indexes = new ArrayList<>(FIELDS);
for (int i = 0; i < FIELDS; i++) {
String name = "s" + i;
fields.put(name, "java.lang.String");
indexes.add(new QueryIndex(name, QueryIndexType.SORTED));
}
qryEntity.setFields(fields);
qryEntity.setIndexes(indexes);
cfg.setDataStorageConfiguration(new DataStorageConfiguration().setDefaultDataRegionConfiguration(new DataRegionConfiguration().setMaxSize(50 * MB)));
cfg.setCacheConfiguration(new CacheConfiguration(DEFAULT_CACHE_NAME).setBackups(0).setQueryEntities(Collections.singleton(qryEntity)).setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setCacheMode(CacheMode.PARTITIONED));
return cfg;
}
Aggregations