use of org.apache.ignite.IgniteTransactions in project ignite by apache.
the class CacheSerializableTransactionsTest method testMultipleOptimisticRead.
/**
* Multithreaded transactional reads.
*
* @throws Exception If failed.
*/
public void testMultipleOptimisticRead() throws Exception {
final Ignite ignite = ignite(0);
final Integer key = 1;
final Integer val = 1;
final int THREADS_CNT = 50;
final String cacheName = ignite.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 1, false, false)).getName();
try {
final IgniteCache<Integer, Integer> cache = ignite.cache(cacheName);
try (Transaction tx = ignite.transactions().txStart(OPTIMISTIC, SERIALIZABLE)) {
cache.put(key, val);
tx.commit();
}
assertTrue(cache.get(key).equals(val));
for (int i = 0; i < 10; i++) {
GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
try (Transaction tx = txs.txStart(OPTIMISTIC, SERIALIZABLE)) {
assertTrue(cache.get(key).equals(val));
tx.commit();
}
return null;
}
}, THREADS_CNT, "multiple-reads-thread").get();
}
} finally {
destroyCache(cacheName);
}
}
use of org.apache.ignite.IgniteTransactions in project ignite by apache.
the class IgniteCacheConfigVariationsFullApiTest method checkSkipStoreWithTransaction.
/**
* @param cache Cache instance.
* @param cacheSkipStore Cache skip store projection.
* @param data Data set.
* @param keys Keys list.
* @param txConcurrency Concurrency mode.
* @param txIsolation Isolation mode.
* @throws Exception If failed.
*/
private void checkSkipStoreWithTransaction(IgniteCache<String, Integer> cache, IgniteCache<String, Integer> cacheSkipStore, Map<String, Integer> data, List<String> keys, TransactionConcurrency txConcurrency, TransactionIsolation txIsolation) throws Exception {
info("Test tx skip store [concurrency=" + txConcurrency + ", isolation=" + txIsolation + ']');
cache.removeAll(data.keySet());
checkEmpty(cache, cacheSkipStore);
IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
Integer val = -1;
// Several put check.
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
for (String key : keys) cacheSkipStore.put(key, val);
for (String key : keys) {
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
tx.commit();
}
for (String key : keys) {
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
assertEquals(0, storeStgy.getStoreSize());
// cacheSkipStore putAll(..)/removeAll(..) check.
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
cacheSkipStore.putAll(data);
tx.commit();
}
for (String key : keys) {
val = data.get(key);
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
storeStgy.putAllToStore(data);
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
cacheSkipStore.removeAll(data.keySet());
tx.commit();
}
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
cache.remove(key);
}
assertTrue(storeStgy.getStoreSize() == 0);
// cache putAll(..)/removeAll(..) check.
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
cache.putAll(data);
for (String key : keys) {
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
cache.removeAll(data.keySet());
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
assertNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
tx.commit();
}
assertTrue(storeStgy.getStoreSize() == 0);
// putAll(..) from both cacheSkipStore and cache.
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
Map<String, Integer> subMap = new HashMap<>();
for (int i = 0; i < keys.size() / 2; i++) subMap.put(keys.get(i), i);
cacheSkipStore.putAll(subMap);
subMap.clear();
for (int i = keys.size() / 2; i < keys.size(); i++) subMap.put(keys.get(i), i);
cache.putAll(subMap);
for (String key : keys) {
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
tx.commit();
}
for (int i = 0; i < keys.size() / 2; i++) {
String key = keys.get(i);
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
for (int i = keys.size() / 2; i < keys.size(); i++) {
String key = keys.get(i);
assertNotNull(cacheSkipStore.get(key));
assertNotNull(cache.get(key));
assertTrue(storeStgy.isInStore(key));
}
cache.removeAll(data.keySet());
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
assertNull(cache.get(key));
assertFalse(storeStgy.isInStore(key));
}
// Check that read-through is disabled when cacheSkipStore is used.
for (int i = 0; i < keys.size(); i++) putToStore(keys.get(i), i);
assertTrue(cacheSkipStore.size(ALL) == 0);
assertTrue(cache.size(ALL) == 0);
assertTrue(storeStgy.getStoreSize() != 0);
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
assertTrue(cacheSkipStore.getAll(data.keySet()).size() == 0);
for (String key : keys) {
assertNull(cacheSkipStore.get(key));
if (txIsolation == READ_COMMITTED) {
assertNotNull(cache.get(key));
assertNotNull(cacheSkipStore.get(key));
}
}
tx.commit();
}
cache.removeAll(data.keySet());
val = -1;
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
for (String key : data.keySet()) {
storeStgy.putToStore(key, 0);
assertNull(cacheSkipStore.invoke(key, new SetValueProcessor(val)));
}
tx.commit();
}
for (String key : data.keySet()) {
assertEquals(0, storeStgy.getFromStore(key));
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
}
cache.removeAll(data.keySet());
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
for (String key : data.keySet()) {
storeStgy.putToStore(key, 0);
assertTrue(cacheSkipStore.putIfAbsent(key, val));
}
tx.commit();
}
for (String key : data.keySet()) {
assertEquals(0, storeStgy.getFromStore(key));
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
}
cache.removeAll(data.keySet());
try (Transaction tx = txs.txStart(txConcurrency, txIsolation)) {
for (String key : data.keySet()) {
storeStgy.putToStore(key, 0);
assertNull(cacheSkipStore.getAndPut(key, val));
}
tx.commit();
}
for (String key : data.keySet()) {
assertEquals(0, storeStgy.getFromStore(key));
assertEquals(val, cacheSkipStore.get(key));
assertEquals(val, cache.get(key));
}
cache.removeAll(data.keySet());
checkEmpty(cache, cacheSkipStore);
}
use of org.apache.ignite.IgniteTransactions in project ignite by apache.
the class IgniteCachePrimaryNodeFailureRecoveryAbstractTest method primaryAndOriginatingNodeFailure.
/**
* @param locBackupKey If {@code true} uses one key which is backup for originating node.
* @param rollback If {@code true} tests rollback after primary node failure.
* @param optimistic If {@code true} tests optimistic transaction.
* @throws Exception If failed.
*/
private void primaryAndOriginatingNodeFailure(final boolean locBackupKey, final boolean rollback, boolean optimistic) throws Exception {
IgniteCache<Integer, Integer> cache0 = jcache(0);
IgniteCache<Integer, Integer> cache2 = jcache(2);
Affinity<Integer> aff = ignite(0).affinity(DEFAULT_CACHE_NAME);
Integer key0 = null;
for (int key = 0; key < 10_000; key++) {
if (aff.isPrimary(ignite(1).cluster().localNode(), key)) {
if (locBackupKey == aff.isBackup(ignite(0).cluster().localNode(), key)) {
key0 = key;
break;
}
}
}
assertNotNull(key0);
final Integer key1 = key0;
final Integer key2 = primaryKey(cache2);
int backups = cache0.getConfiguration(CacheConfiguration.class).getBackups();
final Collection<ClusterNode> key1Nodes = (locBackupKey && backups < 2) ? null : aff.mapKeyToPrimaryAndBackups(key1);
final Collection<ClusterNode> key2Nodes = aff.mapKeyToPrimaryAndBackups(key2);
TestCommunicationSpi commSpi = (TestCommunicationSpi) ignite(0).configuration().getCommunicationSpi();
IgniteTransactions txs = ignite(0).transactions();
Transaction tx = txs.txStart(optimistic ? OPTIMISTIC : PESSIMISTIC, REPEATABLE_READ);
log.info("Put key1: " + key1);
cache0.put(key1, key1);
log.info("Put key2: " + key2);
cache0.put(key2, key2);
log.info("Start prepare.");
GridNearTxLocal txEx = ((TransactionProxyImpl) tx).tx();
// Do not allow to finish prepare for key2.
commSpi.blockMessages(ignite(2).cluster().localNode().id());
IgniteInternalFuture<?> prepFut = txEx.prepareNearTxLocal();
waitPrepared(ignite(1));
log.info("Stop one primary node.");
stopGrid(1);
// Wait some time to catch possible issues in tx recovery.
U.sleep(1000);
if (!rollback) {
commSpi.stopBlock();
prepFut.get(10_000);
}
log.info("Stop originating node.");
stopGrid(0);
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
try {
checkKey(key1, rollback ? null : key1Nodes);
checkKey(key2, rollback ? null : key2Nodes);
return true;
} catch (AssertionError e) {
log.info("Check failed: " + e);
return false;
}
}
}, 5000);
checkKey(key1, rollback ? null : key1Nodes);
checkKey(key2, rollback ? null : key2Nodes);
}
use of org.apache.ignite.IgniteTransactions in project ignite by apache.
the class IgniteCachePrimaryNodeFailureRecoveryAbstractTest method primaryNodeFailure.
/**
* @param locBackupKey If {@code true} uses one key which is backup for originating node.
* @param rollback If {@code true} tests rollback after primary node failure.
* @param optimistic If {@code true} tests optimistic transaction.
* @throws Exception If failed.
*/
private void primaryNodeFailure(boolean locBackupKey, final boolean rollback, boolean optimistic) throws Exception {
IgniteCache<Integer, Integer> cache0 = jcache(0);
IgniteCache<Integer, Integer> cache2 = jcache(2);
Affinity<Integer> aff = ignite(0).affinity(DEFAULT_CACHE_NAME);
Integer key0 = null;
for (int key = 0; key < 10_000; key++) {
if (aff.isPrimary(ignite(1).cluster().localNode(), key)) {
if (locBackupKey == aff.isBackup(ignite(0).cluster().localNode(), key)) {
key0 = key;
break;
}
}
}
assertNotNull(key0);
final Integer key1 = key0;
final Integer key2 = primaryKey(cache2);
final Collection<ClusterNode> key1Nodes = aff.mapKeyToPrimaryAndBackups(key1);
final Collection<ClusterNode> key2Nodes = aff.mapKeyToPrimaryAndBackups(key2);
TestCommunicationSpi commSpi = (TestCommunicationSpi) ignite(0).configuration().getCommunicationSpi();
IgniteTransactions txs = ignite(0).transactions();
try (Transaction tx = txs.txStart(optimistic ? OPTIMISTIC : PESSIMISTIC, REPEATABLE_READ)) {
log.info("Put key1: " + key1);
cache0.put(key1, key1);
log.info("Put key2: " + key2);
cache0.put(key2, key2);
log.info("Start prepare.");
GridNearTxLocal txEx = ((TransactionProxyImpl) tx).tx();
// Do not allow to finish prepare for key2.
commSpi.blockMessages(ignite(2).cluster().localNode().id());
IgniteInternalFuture<?> prepFut = txEx.prepareNearTxLocal();
waitPrepared(ignite(1));
log.info("Stop one primary node.");
stopGrid(1);
// Wait some time to catch possible issues in tx recovery.
U.sleep(1000);
commSpi.stopBlock();
prepFut.get(10_000);
if (rollback) {
log.info("Rollback.");
tx.rollback();
} else {
log.info("Commit.");
tx.commit();
}
}
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
try {
checkKey(key1, rollback ? null : key1Nodes);
checkKey(key2, rollback ? null : key2Nodes);
return true;
} catch (AssertionError e) {
log.info("Check failed: " + e);
return false;
}
}
}, 5000);
checkKey(key1, rollback ? null : key1Nodes);
checkKey(key2, rollback ? null : key2Nodes);
}
use of org.apache.ignite.IgniteTransactions in project ignite by apache.
the class IgniteCacheCrossCacheTxFailoverTest method crossCacheTxFailover.
/**
* @param cacheMode Cache mode.
* @param sameAff If {@code false} uses different number of partitions for caches.
* @param concurrency Transaction concurrency.
* @param isolation Transaction isolation.
* @throws Exception If failed.
*/
private void crossCacheTxFailover(CacheMode cacheMode, boolean sameAff, final TransactionConcurrency concurrency, final TransactionIsolation isolation) throws Exception {
IgniteKernal ignite0 = (IgniteKernal) ignite(0);
final AtomicBoolean stop = new AtomicBoolean();
try {
ignite0.createCache(cacheConfiguration(CACHE1, cacheMode, 256));
ignite0.createCache(cacheConfiguration(CACHE2, cacheMode, sameAff ? 256 : 128));
final AtomicInteger threadIdx = new AtomicInteger();
IgniteInternalFuture<?> fut = runMultiThreadedAsync(new Callable<Void>() {
@Override
public Void call() throws Exception {
int idx = threadIdx.getAndIncrement();
Ignite ignite = ignite(idx % GRID_CNT);
log.info("Started update thread [node=" + ignite.name() + ", client=" + ignite.configuration().isClientMode() + ']');
IgniteCache<TestKey, TestValue> cache1 = ignite.cache(CACHE1);
IgniteCache<TestKey, TestValue> cache2 = ignite.cache(CACHE2);
assertNotSame(cache1, cache2);
IgniteTransactions txs = ignite.transactions();
ThreadLocalRandom rnd = ThreadLocalRandom.current();
long iter = 0;
while (!stop.get()) {
boolean sameKey = rnd.nextBoolean();
try {
try (Transaction tx = txs.txStart(concurrency, isolation)) {
if (sameKey) {
TestKey key = new TestKey(rnd.nextLong(KEY_RANGE));
cacheOperation(rnd, cache1, key);
cacheOperation(rnd, cache2, key);
} else {
TestKey key1 = new TestKey(rnd.nextLong(KEY_RANGE));
TestKey key2 = new TestKey(key1.key() + 1);
cacheOperation(rnd, cache1, key1);
cacheOperation(rnd, cache2, key2);
}
tx.commit();
}
} catch (CacheException | IgniteException e) {
log.info("Update error: " + e);
}
if (iter++ % 500 == 0)
log.info("Iteration: " + iter);
}
return null;
}
/**
* @param rnd Random.
* @param cache Cache.
* @param key Key.
*/
private void cacheOperation(ThreadLocalRandom rnd, IgniteCache<TestKey, TestValue> cache, TestKey key) {
switch(rnd.nextInt(4)) {
case 0:
cache.put(key, new TestValue(rnd.nextLong()));
break;
case 1:
cache.remove(key);
break;
case 2:
cache.invoke(key, new TestEntryProcessor(rnd.nextBoolean() ? 1L : null));
break;
case 3:
cache.get(key);
break;
default:
assert false;
}
}
}, 10, "tx-thread");
long stopTime = System.currentTimeMillis() + 3 * 60_000;
long topVer = ignite0.cluster().topologyVersion();
boolean failed = false;
while (System.currentTimeMillis() < stopTime) {
log.info("Start node.");
IgniteKernal ignite = (IgniteKernal) startGrid(GRID_CNT);
assertFalse(ignite.configuration().isClientMode());
topVer++;
IgniteInternalFuture<?> affFut = ignite.context().cache().context().exchange().affinityReadyFuture(new AffinityTopologyVersion(topVer));
try {
if (affFut != null)
affFut.get(30_000);
} catch (IgniteFutureTimeoutCheckedException ignored) {
log.error("Failed to wait for affinity future after start: " + topVer);
failed = true;
break;
}
Thread.sleep(500);
log.info("Stop node.");
stopGrid(GRID_CNT);
topVer++;
affFut = ignite0.context().cache().context().exchange().affinityReadyFuture(new AffinityTopologyVersion(topVer));
try {
if (affFut != null)
affFut.get(30_000);
} catch (IgniteFutureTimeoutCheckedException ignored) {
log.error("Failed to wait for affinity future after stop: " + topVer);
failed = true;
break;
}
}
stop.set(true);
fut.get();
assertFalse("Test failed, see log for details.", failed);
} finally {
stop.set(true);
ignite0.destroyCache(CACHE1);
ignite0.destroyCache(CACHE2);
AffinityTopologyVersion topVer = ignite0.context().cache().context().exchange().lastTopologyFuture().get();
for (Ignite ignite : G.allGrids()) ((IgniteKernal) ignite).context().cache().context().exchange().affinityReadyFuture(topVer).get();
awaitPartitionMapExchange();
}
}
Aggregations