use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CacheMvccSqlUpdateCountersTest method testDeleteOwnKey.
/**
* @throws Exception If failed.
*/
@Test
public void testDeleteOwnKey() throws Exception {
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, 1).setCacheMode(CacheMode.REPLICATED).setIndexedTypes(Integer.class, Integer.class);
Ignite node = startGridsMultiThreaded(1);
IgniteCache cache = node.cache(DEFAULT_CACHE_NAME);
Affinity aff = affinity(cache);
int key1 = 1;
int part1 = aff.partition(key1);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (" + key1 + ",1)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ",2)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ",3)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("DELETE FROM Integer WHERE _key=" + key1);
cache.query(qry).getAll();
qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ",4)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ",5)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("DELETE FROM Integer WHERE _key=" + key1);
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 0);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ",1)");
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 1);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM Integer WHERE _key=" + key1);
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 2);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM Integer WHERE _key=" + key1);
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 2);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (" + key1 + ", 2)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("DELETE FROM Integer WHERE _key=" + key1);
cache.query(qry).getAll();
qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ", 3)");
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 3);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM Integer WHERE _key=" + key1);
cache.query(qry).getAll();
qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ",5)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("DELETE FROM Integer WHERE _key=" + key1);
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 4);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ",6)");
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 5);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM Integer WHERE _key=" + key1);
cache.query(qry).getAll();
qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ",7)");
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 6);
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CacheMvccSqlUpdateCountersTest method testUpdateCountersDoubleUpdate.
/**
* @throws Exception If failed.
*/
@Test
public void testUpdateCountersDoubleUpdate() throws Exception {
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT).setIndexedTypes(Integer.class, Integer.class);
Ignite node = startGridsMultiThreaded(3);
IgniteCache cache = node.cache(DEFAULT_CACHE_NAME);
Affinity aff = affinity(cache);
int key1 = 1;
int part1 = aff.partition(key1);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (" + key1 + ",1)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("UPDATE Integer SET _val=2 WHERE _key=" + key1);
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 1);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ",1)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("UPDATE Integer SET _val=2 WHERE _key=" + key1);
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 2);
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CacheMvccBackupsAbstractTest method testBackupsCoherenceWithInFlightBatchesOverflow.
/**
* Checks cache backups consistency with in-flight batches overflow.
*
* @throws Exception If failed.
*/
@Test
public void testBackupsCoherenceWithInFlightBatchesOverflow() throws Exception {
testSpi = true;
disableScheduledVacuum = true;
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 1, DFLT_PARTITION_COUNT).setIndexedTypes(Integer.class, Integer.class);
final int KEYS_CNT = 30_000;
assert KEYS_CNT % 2 == 0;
startGrids(2);
Ignite node1 = grid(0);
Ignite node2 = grid(1);
client = true;
Ignite client = startGrid();
awaitPartitionMapExchange();
IgniteCache<?, ?> clientCache = client.cache(DEFAULT_CACHE_NAME);
IgniteCache<?, ?> cache1 = node1.cache(DEFAULT_CACHE_NAME);
IgniteCache<?, ?> cache2 = node2.cache(DEFAULT_CACHE_NAME);
AtomicInteger keyGen = new AtomicInteger();
Affinity affinity = affinity(clientCache);
ClusterNode cNode1 = ((IgniteEx) node1).localNode();
ClusterNode cNode2 = ((IgniteEx) node2).localNode();
StringBuilder insert = new StringBuilder("INSERT INTO Integer (_key, _val) values ");
for (int i = 0; i < KEYS_CNT; i++) {
if (i > 0)
insert.append(',');
// To make big batches in near results future.
Integer key = i < KEYS_CNT / 2 ? keyForNode(affinity, keyGen, cNode1) : keyForNode(affinity, keyGen, cNode2);
assert key != null;
insert.append('(').append(key).append(',').append(key * 10).append(')');
}
String qryStr = insert.toString();
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
tx.timeout(txLongTimeout);
SqlFieldsQuery qry = new SqlFieldsQuery(qryStr);
clientCache.query(qry).getAll();
tx.commit();
}
// Add a delay to simulate batches overflow.
TestRecordingCommunicationSpi spi1 = TestRecordingCommunicationSpi.spi(node1);
TestRecordingCommunicationSpi spi2 = TestRecordingCommunicationSpi.spi(node2);
spi1.closure(new IgniteBiInClosure<ClusterNode, Message>() {
@Override
public void apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxQueryEnlistResponse)
doSleep(100);
}
});
spi2.closure(new IgniteBiInClosure<ClusterNode, Message>() {
@Override
public void apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxQueryEnlistResponse)
doSleep(100);
}
});
qryStr = "DELETE FROM Integer WHERE _key >= " + 10;
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
tx.timeout(txLongTimeout);
SqlFieldsQuery qry = new SqlFieldsQuery(qryStr);
clientCache.query(qry).getAll();
tx.commit();
}
Map<KeyCacheObject, List<CacheDataRow>> cache1Vers = allVersions(cache1);
List res1 = getAll(cache1, "Integer");
stopGrid(0);
awaitPartitionMapExchange();
Map<KeyCacheObject, List<CacheDataRow>> cache2Vers = allVersions(cache2);
assertVersionsEquals(cache1Vers, cache2Vers);
List res2 = getAll(cache2, "Integer");
assertEqualsCollections(res1, res2);
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CacheMvccBasicContinuousQueryTest method checkUpdateCountersGapsClosed.
/**
* @throws Exception If failed.
*/
private void checkUpdateCountersGapsClosed(CacheMode cacheMode) throws Exception {
testSpi = true;
int srvCnt = 4;
startGridsMultiThreaded(srvCnt);
IgniteEx nearNode = grid(srvCnt - 1);
IgniteCache<Object, Object> cache = nearNode.createCache(cacheConfiguration(cacheMode, FULL_SYNC, srvCnt - 1, srvCnt).setIndexedTypes(Integer.class, Integer.class));
IgniteEx primary = grid(0);
Affinity<Object> aff = nearNode.affinity(cache.getName());
int[] nearBackupParts = aff.backupPartitions(nearNode.localNode());
int[] primaryParts = aff.primaryPartitions(primary.localNode());
Collection<Integer> nearSet = new HashSet<>();
for (int part : nearBackupParts) nearSet.add(part);
Collection<Integer> primarySet = new HashSet<>();
for (int part : primaryParts) primarySet.add(part);
// We need backup partitions on the near node.
nearSet.retainAll(primarySet);
List<Integer> keys = singlePartKeys(primary.cache(DEFAULT_CACHE_NAME), 20, nearSet.iterator().next());
int range = 3;
ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
List<CacheEntryEvent> arrivedEvts = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(range * 2);
qry.setLocalListener(new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent e : evts) {
arrivedEvts.add(e);
latch.countDown();
}
}
});
QueryCursor<Cache.Entry<Integer, Integer>> cur = nearNode.cache(DEFAULT_CACHE_NAME).query(qry);
// prevent first transaction prepare on backups
TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(primary);
spi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
private final AtomicInteger limiter = new AtomicInteger();
@Override
public boolean apply(ClusterNode node, Message msg) {
if (msg instanceof GridDhtTxPrepareRequest)
return limiter.getAndIncrement() < srvCnt - 1;
return false;
}
});
Transaction txA = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ);
for (int i = 0; i < range; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 2);
txA.commitAsync();
GridTestUtils.runAsync(() -> {
try (Transaction tx = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int i = range; i < range * 2; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 1);
tx.commit();
}
}).get();
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return primary.context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == PREPARING);
}
}, 3_000);
GridTestUtils.runAsync(() -> {
try (Transaction txB = primary.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
for (int i = range * 2; i < range * 3; i++) primary.cache(DEFAULT_CACHE_NAME).put(keys.get(i), 3);
txB.commit();
}
}).get();
long primaryUpdCntr = getUpdateCounter(primary, keys.get(0));
assertEquals(range * 3, primaryUpdCntr);
// drop primary
stopGrid(primary.name());
// Wait all txs are rolled back.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
boolean allRolledBack = true;
for (int i = 1; i < srvCnt; i++) {
boolean rolledBack = grid(i).context().cache().context().tm().activeTransactions().stream().allMatch(tx -> tx.state() == ROLLED_BACK);
allRolledBack &= rolledBack;
}
return allRolledBack;
}
}, 3_000);
for (int i = 1; i < srvCnt; i++) {
IgniteCache backupCache = grid(i).cache(DEFAULT_CACHE_NAME);
int size = backupCache.query(new SqlFieldsQuery("select * from Integer")).getAll().size();
long backupCntr = getUpdateCounter(grid(i), keys.get(0));
assertEquals(range * 2, size);
assertEquals(primaryUpdCntr, backupCntr);
}
assertTrue(latch.await(5, SECONDS));
assertEquals(range * 2, arrivedEvts.size());
cur.close();
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CellularTxStreamer method run.
/**
* {@inheritDoc}
*/
@Override
public void run(JsonNode jsonNode) throws Exception {
String cacheName = jsonNode.get("cacheName").asText();
int warmup = jsonNode.get("warmup").asInt();
String cell = jsonNode.get("cell").asText();
String attr = jsonNode.get("attr").asText();
markInitialized();
waitForActivation();
IgniteCache<Integer, Integer> cache = ignite.getOrCreateCache(cacheName);
int precision = 5;
long[] latencies = new long[precision];
LocalDateTime[] opStartTimes = new LocalDateTime[precision];
Arrays.fill(latencies, -1);
int cnt = 0;
long initTime = 0;
boolean record = false;
Affinity<Integer> aff = ignite.affinity(cacheName);
List<Integer> cellKeys = new ArrayList<>();
int candidate = 0;
while (cellKeys.size() < 100) {
Collection<ClusterNode> nodes = aff.mapKeyToPrimaryAndBackups(++candidate);
Set<ClusterNode> stat = nodes.stream().filter(n -> n.attributes().get(attr).equals(cell)).collect(Collectors.toSet());
if (stat.isEmpty())
continue;
assert nodes.size() == stat.size();
cellKeys.add(candidate);
}
while (!terminated()) {
cnt++;
LocalDateTime start = LocalDateTime.now();
long from = System.nanoTime();
// Cycled update.
cache.put(cellKeys.get(cnt % cellKeys.size()), cnt);
long latency = System.nanoTime() - from;
if (!record && cnt > warmup) {
record = true;
initTime = System.currentTimeMillis();
log.info("WARMUP_FINISHED");
}
if (record) {
for (int i = 0; i < latencies.length; i++) {
if (latencies[i] <= latency) {
System.arraycopy(latencies, i, latencies, i + 1, latencies.length - i - 1);
System.arraycopy(opStartTimes, i, opStartTimes, i + 1, opStartTimes.length - i - 1);
latencies[i] = latency;
opStartTimes[i] = start;
break;
}
}
}
if (cnt % 1000 == 0)
log.info("APPLICATION_STREAMED " + cnt + " transactions [worst_latency=" + Arrays.toString(latencies) + "]");
}
List<String> result = new ArrayList<>();
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("HH:mm:ss.SSS");
for (int i = 0; i < precision; i++) result.add(Duration.ofNanos(latencies[i]).toMillis() + " ms at " + formatter.format(opStartTimes[i]));
recordResult("WORST_LATENCY", result.toString());
recordResult("STREAMED", cnt - warmup);
recordResult("MEASURE_DURATION", System.currentTimeMillis() - initTime);
markFinished();
}
Aggregations