use of org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC in project ignite by apache.
the class BinaryMetadataRegistrationInsideEntryProcessorTest method testContinuousQueryAndBinaryObjectBuilder.
/**
* Continuously execute multiple EntryProcessors with having continuous queries in parallel.
* This used to lead to several deadlocks.
*
* @throws Exception If failed.
*/
@Test
public void testContinuousQueryAndBinaryObjectBuilder() throws Exception {
startGrids(3).cluster().active(true);
grid(0).createCache(new CacheConfiguration<>().setName(CACHE_NAME).setAtomicityMode(ATOMIC).setBackups(2).setCacheMode(PARTITIONED).setWriteSynchronizationMode(FULL_SYNC).setPartitionLossPolicy(READ_WRITE_SAFE));
IgniteEx client1 = startClientGrid(getConfiguration().setIgniteInstanceName("client1"));
IgniteEx client2 = startClientGrid(getConfiguration().setIgniteInstanceName("client2"));
AtomicBoolean stop = new AtomicBoolean();
AtomicInteger keyCntr = new AtomicInteger();
AtomicInteger binaryTypeCntr = new AtomicInteger();
/**
*/
class MyEntryProcessor implements CacheEntryProcessor<Object, Object, Object> {
/**
* Cached int value retrieved from {@code binaryTypeCntr} variable.
*/
private int i;
/**
*/
public MyEntryProcessor(int i) {
this.i = i;
}
/**
*/
@IgniteInstanceResource
Ignite ignite;
/**
* {@inheritDoc}
*/
@Override
public Object process(MutableEntry<Object, Object> entry, Object... arguments) throws EntryProcessorException {
BinaryObjectBuilder builder = ignite.binary().builder("my_type");
builder.setField("new_field" + i, i);
entry.setValue(builder.build());
return null;
}
}
IgniteInternalFuture fut1 = GridTestUtils.runMultiThreadedAsync(() -> {
IgniteCache<Object, Object> cache = client1.cache(CACHE_NAME).withKeepBinary();
while (!stop.get()) {
Integer key = keyCntr.getAndIncrement();
cache.put(key, key);
cache.invoke(key, new MyEntryProcessor(binaryTypeCntr.get()));
binaryTypeCntr.incrementAndGet();
}
}, 8, "writer-thread");
IgniteInternalFuture fut2 = GridTestUtils.runAsync(() -> {
IgniteCache<Object, Object> cache = client2.cache(CACHE_NAME).withKeepBinary();
while (!stop.get()) {
ContinuousQuery<Object, Object> qry = new ContinuousQuery<>();
qry.setInitialQuery(new ScanQuery<>((key, val) -> true));
qry.setLocalListener(evts -> {
});
// noinspection EmptyTryBlock
try (QueryCursor<Cache.Entry<Object, Object>> cursor = cache.query(qry)) {
// No-op.
}
}
});
doSleep(10_000);
stop.set(true);
fut1.get(10, TimeUnit.SECONDS);
fut2.get(10, TimeUnit.SECONDS);
}
use of org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC in project ignite by apache.
the class CacheMvccAbstractBasicCoordinatorFailoverTest method checkCoordinatorsLeft.
/**
* @param num Number of coordinators to stop.
* @throws Exception If failed.
*/
@SuppressWarnings("unchecked")
private void checkCoordinatorsLeft(int num, boolean stopCrdFirst) throws Exception {
disableScheduledVacuum = true;
final int DATA_NODES = 3;
final int NODES = num + DATA_NODES;
nodeAttr = CRD_ATTR;
// Do not use startMultithreaded here.
startGrids(num);
nodeAttr = null;
startGridsMultiThreaded(num, DATA_NODES);
List<Ignite> victims = new ArrayList<>(num);
List<Ignite> survivors = new ArrayList<>(DATA_NODES);
for (int i = 0; i < NODES; i++) {
if (i < num)
victims.add(grid(i));
else
survivors.add(grid(i));
}
if (log.isInfoEnabled()) {
log.info("Nodes to be stopped [" + victims.stream().map(n -> n.cluster().localNode().id().toString()).collect(Collectors.joining(", ")) + ']');
log.info("Nodes not to be stopped [" + survivors.stream().map(n -> n.cluster().localNode().id().toString()).collect(Collectors.joining(", ")) + ']');
}
Ignite nearNode = survivors.get(0);
if (persistence)
nearNode.cluster().active(true);
CacheConfiguration ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, DATA_NODES - 1, DFLT_PARTITION_COUNT).setNodeFilter(new CoordinatorNodeFilter());
IgniteCache cache = nearNode.createCache(ccfg);
try (Transaction tx = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int key = 0; key < 10; key++) cache.put(key, 1);
tx.commit();
}
List<Thread> stopThreads = victims.stream().map(v -> new Thread(() -> stopGrid(v.name()))).collect(Collectors.toList());
ScanQuery<Object, Object> scan = new ScanQuery<>();
QueryCursor<Cache.Entry<Object, Object>> cur = survivors.get(0).cache(DEFAULT_CACHE_NAME).query(scan);
Iterator<Cache.Entry<Object, Object>> it = cur.iterator();
assertTrue(it.hasNext());
assertEquals(1, it.next().getValue());
if (log.isInfoEnabled())
log.info("Start stopping nodes.");
// Stop nodes and join threads.
if (stopCrdFirst) {
for (Thread t : stopThreads) t.start();
} else {
// We should stop the oldest node last.
GridCachePartitionExchangeManager exch = ((IgniteEx) survivors.get(1)).context().cache().context().exchange();
GridDhtTopologyFuture lastFinished = exch.lastFinishedFuture();
for (int i = 1; i < stopThreads.size(); i++) stopThreads.get(i).start();
while (lastFinished == exch.lastTopologyFuture()) doSleep(1);
stopThreads.get(0).start();
}
for (Thread t : stopThreads) t.join();
if (log.isInfoEnabled())
log.info("All nodes stopped.");
assertTrue(it.hasNext());
assertEquals(1, it.next().getValue());
for (Ignite node : survivors) {
for (int key = 0; key < 10; key++) assertEquals(1, node.cache(DEFAULT_CACHE_NAME).get(key));
}
try (Transaction tx = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int key = 0; key < 10; key++) cache.put(key, 2);
tx.commit();
} catch (Exception e) {
stopAllGrids(true);
fail(X.getFullStackTrace(e));
}
for (Ignite node : survivors) {
for (int key = 0; key < 10; key++) assertEquals(2, node.cache(DEFAULT_CACHE_NAME).get(key));
}
try (Transaction tx = nearNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int key = 0; key < 10; key++) cache.put(key, 3);
tx.commit();
} catch (Exception e) {
stopAllGrids(true);
fail(X.getFullStackTrace(e));
}
for (Ignite node : survivors) {
for (int key = 0; key < 10; key++) assertEquals(3, node.cache(DEFAULT_CACHE_NAME).get(key));
}
while (it.hasNext()) assertEquals(1, (int) it.next().getValue());
cur.close();
}
use of org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC in project ignite by apache.
the class ServicePredicateAccessCacheTest method testPredicateAccessCache.
/**
* @throws Exception If failed.
*/
@Test
public void testPredicateAccessCache() throws Exception {
final IgniteEx ignite0 = startGrid(0);
CacheConfiguration<String, String> cacheCfg = new CacheConfiguration<>();
cacheCfg.setName("testCache");
cacheCfg.setAtomicityMode(ATOMIC);
cacheCfg.setCacheMode(REPLICATED);
cacheCfg.setWriteSynchronizationMode(FULL_SYNC);
IgniteCache<String, String> cache = ignite0.getOrCreateCache(cacheCfg);
if (ignite0.context().service() instanceof IgniteServiceProcessor)
cache.put(ignite0.cluster().localNode().id().toString(), "val");
latch = new CountDownLatch(1);
final ClusterGroup grp = ignite0.cluster().forPredicate((IgnitePredicate<ClusterNode>) node -> {
System.out.println("Predicated started [thread=" + Thread.currentThread().getName() + ']');
latch.countDown();
try {
Thread.sleep(3000);
} catch (InterruptedException ignore) {
}
System.out.println("Call contains key [thread=" + Thread.currentThread().getName() + ']');
boolean ret = Ignition.localIgnite().cache("testCache").containsKey(node.id().toString());
System.out.println("After contains key [ret=" + ret + ", thread=" + Thread.currentThread().getName() + ']');
return ret;
});
IgniteInternalFuture<?> fut = GridTestUtils.runAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
info("Start deploy service.");
ignite0.services(grp).deployNodeSingleton("testService", new TestService());
info("Service deployed.");
return null;
}
}, "deploy-thread");
latch.await();
startGrid(1);
fut.get();
}
use of org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC in project ignite by apache.
the class CacheMvccSqlTxQueriesAbstractTest method testUpdateExplicitPartitionsWithReducer.
/**
* @throws Exception If failed.
*/
@Test
public void testUpdateExplicitPartitionsWithReducer() throws Exception {
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, 10).setIndexedTypes(Integer.class, Integer.class);
Ignite ignite = startGridsMultiThreaded(4);
awaitPartitionMapExchange();
IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME);
Affinity<Object> affinity = internalCache0(cache).affinity();
int keysCnt = 10, retryCnt = 0;
Integer test = 0;
Map<Integer, Integer> vals = new LinkedHashMap<>();
while (vals.size() < keysCnt) {
int partition = affinity.partition(test);
if (partition == 1 || partition == 2)
vals.put(test, 0);
else
assertTrue("Maximum retry number exceeded", ++retryCnt < 1000);
test++;
}
cache.putAll(vals);
SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer set _val=(SELECT 2 FROM DUAL)").setPartitions(1, 2);
List<List<?>> all = cache.query(qry).getAll();
assertEquals(Long.valueOf(keysCnt), all.stream().findFirst().orElseThrow(AssertionError::new).get(0));
List<List<?>> rows = cache.query(new SqlFieldsQuery("SELECT _val FROM Integer")).getAll();
assertEquals(keysCnt, rows.size());
assertTrue(rows.stream().map(r -> r.get(0)).map(Integer.class::cast).allMatch(v -> v == 2));
}
use of org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC in project ignite by apache.
the class CacheMvccBasicContinuousQueryTest method checkUpdateCountersGapsClosed.
/**
* @throws Exception If failed.
*/
private void checkUpdateCountersGapsClosed(CacheMode cacheMode) throws Exception {
testSpi = true;
int srvCnt = 4;
startGridsMultiThreaded(srvCnt);
IgniteEx nearNode = grid(srvCnt - 1);
IgniteCache<Object, Object> cache = nearNode.createCache(cacheConfiguration(cacheMode, FULL_SYNC, srvCnt - 1, srvCnt).setIndexedTypes(Integer.class, Integer.class));
IgniteEx primary = grid(0);
Affinity<Object> aff = nearNode.affinity(cache.getName());
int[] nearBackupParts = aff.backupPartitions(nearNode.localNode());
int[] primaryParts = aff.primaryPartitions(primary.localNode());
Collection<Integer> nearSet = new HashSet<>();
for (int part : nearBackupParts) nearSet.add(part);
Collection<Integer> primarySet = new HashSet<>();
for (int part : primaryParts) primarySet.add(part);
// We need backup partitions on the near node.
nearSet.retainAll(primarySet);
List<Integer> keys = singlePartKeys(primary.cache(DEFAULT_CACHE_NAME), 20, nearSet.iterator().next());
int range = 3;
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
List<CacheEntryEvent> arrivedEvts = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(range * 2);
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent e : evts) {
arrivedEvts.add(e);
latch.countDown();
}
}
});
QueryCursor<Cache.Entry<Integer, Integer>> cur = nearNode.cache(DEFAULT_CACHE_NAME).query(qry);
// prevent first transaction prepare on backups
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(primary);
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
private final AtomicInteger limiter = new AtomicInteger();
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return limiter.getAndIncrement() < srvCnt - 1;
return false;
}
});
Transaction txA = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
for (int i = 0; i < range; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 2);
txA.commitAsync();
GridTestUtils.runAsync(() -> {
try (Transaction tx = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int i = range; i < range * 2; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 1);
tx.commit();
}
}).get();
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return primary.context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == PREPARING);
}
}, 3_000);
GridTestUtils.runAsync(() -> {
try (Transaction txB = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int i = range * 2; i < range * 3; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 3);
txB.commit();
}
}).get();
long primaryUpdCntr = getUpdateCounter(primary, keys.get(0));
assertEquals(range * 3, primaryUpdCntr);
// drop primary
stopGrid(primary.name());
// Wait all txs are rolled back.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean allRolledBack = true;
for (int i = 1; i < srvCnt; i++) {
boolean rolledBack = grid(i).context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == ROLLED_BACK);
allRolledBack &= rolledBack;
}
return allRolledBack;
}
}, 3_000);
for (int i = 1; i < srvCnt; i++) {
IgniteCache backupCache = grid(i).cache(DEFAULT_CACHE_NAME);
int size = backupCache.query(new SqlFieldsQuery("select * from Integer")).getAll().size();
long backupCntr = getUpdateCounter(grid(i), keys.get(0));
assertEquals(range * 2, size);
assertEquals(primaryUpdCntr, backupCntr);
}
assertTrue(latch.await(5, SECONDS));
assertEquals(range * 2, arrivedEvts.size());
cur.close();
}
Aggregations