use of org.apache.ignite.cache.CacheMode.REPLICATED in project ignite by apache.
the class ServicePredicateAccessCacheTest method testPredicateAccessCache.
/**
* @throws Exception If failed.
*/
@Test
public void testPredicateAccessCache() throws Exception {
final IgniteEx ignite0 = startGrid(0);
CacheConfiguration<String, String> cacheCfg = new CacheConfiguration<>();
cacheCfg.setName("testCache");
cacheCfg.setAtomicityMode(ATOMIC);
cacheCfg.setCacheMode(REPLICATED);
cacheCfg.setWriteSynchronizationMode(FULL_SYNC);
IgniteCache<String, String> cache = ignite0.getOrCreateCache(cacheCfg);
if (ignite0.context().service() instanceof IgniteServiceProcessor)
cache.put(ignite0.cluster().localNode().id().toString(), "val");
latch = new CountDownLatch(1);
final ClusterGroup grp = ignite0.cluster().forPredicate((IgnitePredicate<ClusterNode>) node -> {
System.out.println("Predicated started [thread=" + Thread.currentThread().getName() + ']');
latch.countDown();
try {
Thread.sleep(3000);
} catch (InterruptedException ignore) {
}
System.out.println("Call contains key [thread=" + Thread.currentThread().getName() + ']');
boolean ret = Ignition.localIgnite().cache("testCache").containsKey(node.id().toString());
System.out.println("After contains key [ret=" + ret + ", thread=" + Thread.currentThread().getName() + ']');
return ret;
});
IgniteInternalFuture<?> fut = GridTestUtils.runAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
info("Start deploy service.");
ignite0.services(grp).deployNodeSingleton("testService", new TestService());
info("Service deployed.");
return null;
}
}, "deploy-thread");
latch.await();
startGrid(1);
fut.get();
}
use of org.apache.ignite.cache.CacheMode.REPLICATED in project ignite by apache.
the class EventsRemoteSecurityContextCheckTest method checkRemoteListenWithNullFilter.
/**
*/
private void checkRemoteListenWithNullFilter(boolean async) throws Exception {
for (IgniteEx initiator : initiators()) {
IgniteCache<Object, Object> cache = initiator.createCache(new CacheConfiguration<>("test_cache_" + INDEX.incrementAndGet()).setCacheMode(REPLICATED));
CountDownLatch srvNodesListenedLatch = new CountDownLatch(initiator.cluster().forServers().nodes().size());
IgniteBiPredicate<UUID, ? extends Event> locLsnr = (uuid, e) -> {
srvNodesListenedLatch.countDown();
return true;
};
UUID lsnrId = async ? initiator.events().remoteListenAsync(locLsnr, null, EVT_CACHE_OBJECT_PUT).get() : initiator.events().remoteListen(locLsnr, null, EVT_CACHE_OBJECT_PUT);
try {
cache.put("key", "val");
assertTrue(srvNodesListenedLatch.await(getTestTimeout(), MILLISECONDS));
} finally {
initiator.events().stopRemoteListen(lsnrId);
}
}
}
use of org.apache.ignite.cache.CacheMode.REPLICATED in project ignite by apache.
the class OpenCensusSqlJdbcTracingTest method testSelectLocal.
/**
* Tests tracing of local SQL SELECT query.
*
* @throws Exception If failed.
*/
@Test
public void testSelectLocal() throws Exception {
String orgTable = createTableAndPopulate(Organization.class, REPLICATED, 1);
SpanId rootSpan = executeAndCheckRootSpan("SELECT orgVal FROM " + orgTable, TEST_SCHEMA, false, false, true);
String qryId = getAttribute(rootSpan, SQL_QRY_ID);
assertTrue(Long.parseLong(qryId.substring(qryId.indexOf('_') + 1)) > 0);
UUID.fromString(qryId.substring(0, qryId.indexOf('_')));
checkChildSpan(SQL_QRY_PARSE, rootSpan);
checkChildSpan(SQL_CURSOR_OPEN, rootSpan);
checkChildSpan(SQL_ITER_OPEN, rootSpan);
SpanId iterSpan = checkChildSpan(SQL_ITER_OPEN, rootSpan);
checkChildSpan(SQL_QRY_EXECUTE, iterSpan);
int fetchedRows = findChildSpans(SQL_PAGE_FETCH, rootSpan).stream().mapToInt(span -> Integer.parseInt(getAttribute(span, SQL_PAGE_ROWS))).sum();
assertEquals(TEST_TABLE_POPULATION, fetchedRows);
checkChildSpan(SQL_ITER_CLOSE, rootSpan);
assertFalse(findChildSpans(SQL_CURSOR_CLOSE, rootSpan).isEmpty());
}
use of org.apache.ignite.cache.CacheMode.REPLICATED in project ignite by apache.
the class CacheBlockOnReadAbstractTest method testStopBaselineTransactionalReplicated.
/**
* @throws Exception If failed.
*/
@Params(baseline = 9, atomicityMode = TRANSACTIONAL, cacheMode = REPLICATED)
@Test
public void testStopBaselineTransactionalReplicated() throws Exception {
AtomicInteger cntDownCntr = new AtomicInteger(0);
doTest(asMessagePredicate(discoEvt -> discoEvt.type() == EventType.EVT_NODE_LEFT), () -> {
IgniteEx node = baseline.get(baseline.size() - cntDownCntr.get() - 1);
TestRecordingCommunicationSpi.spi(node).stopBlock();
cntDownCntr.incrementAndGet();
for (int i = 0; i < cntDownCntr.get(); i++) // This node and previously stopped nodes as well.
cntFinishedReadOperations.countDown();
stopGrid(node.name());
});
}
use of org.apache.ignite.cache.CacheMode.REPLICATED in project ignite by apache.
the class AbstractReadRepairTest method setDifferentValuesForSameKey.
/**
*/
private InconsistentMapping setDifferentValuesForSameKey(int key, boolean misses, boolean nulls, ReadRepairStrategy strategy) throws Exception {
List<Ignite> nodes = new ArrayList<>();
Map<Ignite, T2<Integer, GridCacheVersion>> mapping = new HashMap<>();
Ignite primary = primaryNode(key, DEFAULT_CACHE_NAME);
ThreadLocalRandom rnd = ThreadLocalRandom.current();
if (rnd.nextBoolean()) {
// Reversed order.
nodes.addAll(backupNodes(key, DEFAULT_CACHE_NAME));
nodes.add(primary);
} else {
nodes.add(primary);
nodes.addAll(backupNodes(key, DEFAULT_CACHE_NAME));
}
if (// Random order.
rnd.nextBoolean())
Collections.shuffle(nodes);
IgniteInternalCache<Integer, Integer> internalCache = (grid(1)).cachex(DEFAULT_CACHE_NAME);
GridCacheVersionManager mgr = ((GridCacheAdapter) internalCache.cache()).context().shared().versions();
int incVal = 0;
Integer primVal = null;
Collection<T2<Integer, GridCacheVersion>> vals = new ArrayList<>();
if (misses) {
List<Ignite> keeped = nodes.subList(0, rnd.nextInt(1, nodes.size()));
nodes.stream().filter(node -> !keeped.contains(node)).forEach(node -> {
T2<Integer, GridCacheVersion> nullT2 = new T2<>(null, null);
vals.add(nullT2);
mapping.put(node, nullT2);
});
// Recording nulls (missed values).
nodes = keeped;
}
boolean rmvd = false;
boolean incVer = rnd.nextBoolean();
GridCacheVersion ver = null;
for (Ignite node : nodes) {
IgniteInternalCache<Integer, Integer> cache = ((IgniteEx) node).cachex(DEFAULT_CACHE_NAME);
GridCacheAdapter<Integer, Integer> adapter = (GridCacheAdapter) cache.cache();
GridCacheEntryEx entry = adapter.entryEx(key);
if (ver == null || incVer)
// Incremental version.
ver = mgr.next(entry.context().kernalContext().discovery().topologyVersion());
boolean rmv = nulls && (!rmvd || rnd.nextBoolean());
Integer val = rmv ? null : rnd.nextBoolean() ? /*increment or same as previously*/
++incVal : incVal;
T2<Integer, GridCacheVersion> valVer = new T2<>(val, val != null ? ver : null);
vals.add(valVer);
mapping.put(node, valVer);
GridKernalContext kctx = ((IgniteEx) node).context();
// Incremental value.
byte[] bytes = kctx.cacheObjects().marshal(entry.context().cacheObjectContext(), rmv ? -1 : val);
try {
kctx.cache().context().database().checkpointReadLock();
boolean init = entry.initialValue(new CacheObjectImpl(null, bytes), ver, 0, 0, false, AffinityTopologyVersion.NONE, GridDrType.DR_NONE, false, false);
if (rmv) {
if (cache.configuration().getAtomicityMode() == ATOMIC)
entry.innerUpdate(ver, ((IgniteEx) node).localNode().id(), ((IgniteEx) node).localNode().id(), GridCacheOperation.DELETE, null, null, false, false, false, false, null, false, false, false, false, AffinityTopologyVersion.NONE, null, GridDrType.DR_NONE, 0, 0, null, false, false, null, null, null, null, false);
else
entry.innerRemove(null, ((IgniteEx) node).localNode().id(), ((IgniteEx) node).localNode().id(), false, false, false, false, false, null, AffinityTopologyVersion.NONE, CU.empty0(), GridDrType.DR_NONE, null, null, null, 1L);
rmvd = true;
assertFalse(entry.hasValue());
} else
assertTrue(entry.hasValue());
assertTrue("iterableKey " + key + " already inited", init);
if (node.equals(primary))
primVal = val;
} finally {
((IgniteEx) node).context().cache().context().database().checkpointReadUnlock();
}
}
assertEquals(vals.size(), mapping.size());
assertEquals(vals.size(), internalCache.configuration().getCacheMode() == REPLICATED ? serverNodesCount() : backupsCount() + 1);
if (!misses && !nulls)
// Primary value set.
assertTrue(primVal != null);
Integer fixed;
boolean consistent;
boolean repairable;
if (vals.stream().distinct().count() == 1) {
// Consistent value.
consistent = true;
repairable = true;
fixed = vals.iterator().next().getKey();
} else {
consistent = false;
// Currently, Atomic caches can not be repaired.
repairable = atomicityMode() != ATOMIC;
switch(strategy) {
case LWW:
if (misses || rmvd || !incVer) {
repairable = false;
// Should never be returned.
fixed = Integer.MIN_VALUE;
} else
fixed = incVal;
break;
case PRIMARY:
fixed = primVal;
break;
case RELATIVE_MAJORITY:
// Should never be returned.
fixed = Integer.MIN_VALUE;
Map<T2<Integer, GridCacheVersion>, Integer> counts = new HashMap<>();
for (T2<Integer, GridCacheVersion> val : vals) {
counts.putIfAbsent(val, 0);
counts.compute(val, (k, v) -> v + 1);
}
int[] sorted = counts.values().stream().sorted(Comparator.reverseOrder()).mapToInt(v -> v).toArray();
int max = sorted[0];
if (sorted.length > 1 && sorted[1] == max)
repairable = false;
if (repairable)
for (Map.Entry<T2<Integer, GridCacheVersion>, Integer> count : counts.entrySet()) if (count.getValue().equals(max)) {
fixed = count.getKey().getKey();
break;
}
break;
case REMOVE:
fixed = null;
break;
case CHECK_ONLY:
repairable = false;
// Should never be returned.
fixed = Integer.MIN_VALUE;
break;
default:
throw new UnsupportedOperationException(strategy.toString());
}
}
return new InconsistentMapping(mapping, primVal, fixed, repairable, consistent);
}
Aggregations