use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CacheMvccSqlTxQueriesAbstractTest method testUpdateExplicitPartitionsWithoutReducer.
/**
* @throws Exception If failed.
*/
@Test
public void testUpdateExplicitPartitionsWithoutReducer() throws Exception {
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, 10).setIndexedTypes(Integer.class, Integer.class);
Ignite ignite = startGridsMultiThreaded(4);
awaitPartitionMapExchange();
IgniteCache<Object, Object> cache = ignite.cache(DEFAULT_CACHE_NAME);
Affinity<Object> affinity = internalCache0(cache).affinity();
int keysCnt = 10, retryCnt = 0;
Integer test = 0;
Map<Integer, Integer> vals = new LinkedHashMap<>();
while (vals.size() < keysCnt) {
int partition = affinity.partition(test);
if (partition == 1 || partition == 2)
vals.put(test, 0);
else
assertTrue("Maximum retry number exceeded", ++retryCnt < 1000);
test++;
}
cache.putAll(vals);
SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer set _val=2").setPartitions(1, 2);
List<List<?>> all = cache.query(qry).getAll();
assertEquals(Long.valueOf(keysCnt), all.stream().findFirst().orElseThrow(AssertionError::new).get(0));
List<List<?>> rows = cache.query(new SqlFieldsQuery("SELECT _val FROM Integer")).getAll();
assertEquals(keysCnt, rows.size());
assertTrue(rows.stream().map(r -> r.get(0)).map(Integer.class::cast).allMatch(v -> v == 2));
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CacheMvccSqlUpdateCountersTest method testUpdateCountersRollback.
/**
* @throws Exception If failed.
*/
@Test
public void testUpdateCountersRollback() throws Exception {
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT).setIndexedTypes(Integer.class, Integer.class);
Ignite node = startGridsMultiThreaded(3);
IgniteCache cache = node.cache(DEFAULT_CACHE_NAME);
Affinity aff = affinity(cache);
int key1 = 1;
int part1 = aff.partition(key1);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (" + key1 + ",1)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("UPDATE Integer SET _val=2 WHERE _key=" + key1);
cache.query(qry).getAll();
tx.rollback();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 0);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("MERGE INTO Integer (_key, _val) values (" + key1 + ",1)");
cache.query(qry).getAll();
qry = new SqlFieldsQuery("UPDATE Integer SET _val=2 WHERE _key=" + key1);
cache.query(qry).getAll();
tx.rollback();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 0);
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CacheMvccSqlUpdateCountersTest method testUpdateCountersMultithreaded.
/**
* @throws Exception If failed.
*/
@Test
public void testUpdateCountersMultithreaded() throws Exception {
final int writers = 4;
final int readers = 0;
int parts = 8;
int keys = 20;
final Map<Integer, AtomicLong> tracker = new ConcurrentHashMap<>();
for (int i = 0; i < keys; i++) tracker.put(i, new AtomicLong(1));
final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
@Override
public void apply(IgniteCache<Object, Object> cache) {
final IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO MvccTestAccount(_key, val, updateCnt) VALUES " + "(?, 0, 1)");
for (int i = 0; i < keys; i++) {
try (FieldsQueryCursor<List<?>> cur = cache.query(qry.setArgs(i))) {
assertEquals(1L, cur.iterator().next().get(0));
}
tx.commit();
}
}
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
ThreadLocalRandom rnd = ThreadLocalRandom.current();
Map<Integer, AtomicLong> acc = new HashMap<>();
int v = 0;
while (!stop.get()) {
int cnt = rnd.nextInt(keys / 3);
if (cnt == 0)
cnt = 2;
// Generate key set to be changed in tx.
while (acc.size() < cnt) acc.put(rnd.nextInt(cnt), new AtomicLong());
TestCache<Integer, Integer> cache = randomCache(caches, rnd);
boolean success = true;
try {
IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
Map<Integer, MvccTestAccount> allVals = readAllByMode(cache.cache, tracker.keySet(), SQL, ACCOUNT_CODEC);
boolean rmv = allVals.size() > keys * 2 / 3;
for (Map.Entry<Integer, AtomicLong> e : acc.entrySet()) {
int key = e.getKey();
AtomicLong accCntr = e.getValue();
boolean exists = allVals.containsKey(key);
int delta = 0;
boolean createdInTx = false;
if (rmv && rnd.nextBoolean()) {
if (exists)
delta = 1;
SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM MvccTestAccount WHERE _key=" + key);
cache.cache.query(qry).getAll();
} else {
delta = 1;
if (!exists)
createdInTx = true;
SqlFieldsQuery qry = new SqlFieldsQuery("MERGE INTO MvccTestAccount " + "(_key, val, updateCnt) VALUES (" + key + ", " + rnd.nextInt(100) + ", 1)");
cache.cache.query(qry).getAll();
}
if (rnd.nextBoolean()) {
if (createdInTx)
// Do not count cases when key created and removed in the same tx.
delta = 0;
SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM MvccTestAccount WHERE _key=" + key);
cache.cache.query(qry).getAll();
} else {
delta = 1;
SqlFieldsQuery qry = new SqlFieldsQuery("MERGE INTO MvccTestAccount " + "(_key, val, updateCnt) VALUES (" + key + ", " + rnd.nextInt(100) + ", 1)");
cache.cache.query(qry).getAll();
}
accCntr.addAndGet(delta);
}
tx.commit();
}
} catch (Exception e) {
handleTxException(e);
success = false;
int r = 0;
for (Map.Entry<Integer, AtomicLong> en : acc.entrySet()) {
if (((IgniteCacheProxy) cache.cache).context().affinity().partition(en.getKey()) == 0)
r += en.getValue().intValue();
}
} finally {
cache.readUnlock();
if (success) {
v++;
for (Map.Entry<Integer, AtomicLong> e : acc.entrySet()) {
int k = e.getKey();
long updCntr = e.getValue().get();
tracker.get(k).addAndGet(updCntr);
}
int r = 0;
for (Map.Entry<Integer, AtomicLong> en : acc.entrySet()) {
if (((IgniteCacheProxy) cache.cache).context().affinity().partition(en.getKey()) == 0)
r += en.getValue().intValue();
}
}
acc.clear();
}
}
info("Writer done, updates: " + v);
}
};
GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader = new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
@Override
public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
// No-op.
}
};
readWriteTest(null, 4, 1, 2, parts, writers, readers, DFLT_TEST_TIME, new InitIndexing(Integer.class, MvccTestAccount.class), init, writer, reader);
Map<Integer, AtomicLong> updPerParts = new HashMap<>(parts);
Affinity aff = grid(1).cachex(DEFAULT_CACHE_NAME).affinity();
for (Map.Entry<Integer, AtomicLong> e : tracker.entrySet()) {
int k = e.getKey();
long updCntr = e.getValue().get();
int p = aff.partition(k);
AtomicLong cntr = updPerParts.get(p);
if (cntr == null) {
cntr = new AtomicLong();
updPerParts.putIfAbsent(p, cntr);
}
cntr.addAndGet(updCntr);
}
for (Map.Entry<Integer, AtomicLong> e : updPerParts.entrySet()) checkUpdateCounters(DEFAULT_CACHE_NAME, e.getKey(), e.getValue().get());
}
use of org.apache.ignite.cache.affinity.Affinity in project ignite by apache.
the class CacheMvccSqlUpdateCountersTest method testUpdateCountersInsertSimple.
/**
* @throws Exception If failed.
*/
@Test
public void testUpdateCountersInsertSimple() throws Exception {
ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT).setIndexedTypes(Integer.class, Integer.class);
Ignite node = startGridsMultiThreaded(3);
IgniteCache cache = node.cache(DEFAULT_CACHE_NAME);
Affinity aff = affinity(cache);
Integer key1 = 1;
int part1 = aff.partition(key1);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (" + key1 + ",1)");
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 1);
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer SET _val=2 WHERE _key=" + key1);
cache.query(qry).getAll();
tx.commit();
}
checkUpdateCounters(DEFAULT_CACHE_NAME, part1, 2);
}
Aggregations