use of org.apache.ignite.internal.processors.cache.GridCacheEntryEx in project ignite by apache.
the class IgniteTxManager method lockMultiple.
/**
* @param tx Transaction.
* @param entries Entries to lock.
* @return {@code True} if all keys were locked.
* @throws IgniteCheckedException If lock has been cancelled.
*/
private boolean lockMultiple(IgniteInternalTx tx, Iterable<IgniteTxEntry> entries) throws IgniteCheckedException {
assert tx.optimistic() || !tx.local();
long remainingTime = tx.remainingTime();
// For serializable transactions, failure to acquire lock means
// that there is a serializable conflict. For all other isolation levels,
// we wait for the lock.
long timeout = remainingTime < 0 ? 0 : remainingTime;
GridCacheVersion serOrder = (tx.serializable() && tx.optimistic()) ? tx.nearXidVersion() : null;
for (IgniteTxEntry txEntry1 : entries) {
// Check if this entry was prepared before.
if (!txEntry1.markPrepared() || txEntry1.explicitVersion() != null)
continue;
GridCacheContext cacheCtx = txEntry1.context();
while (true) {
cctx.database().checkpointReadLock();
try {
GridCacheEntryEx entry1 = txEntry1.cached();
assert entry1 != null : txEntry1;
assert !entry1.detached() : "Expected non-detached entry for near transaction " + "[locNodeId=" + cctx.localNodeId() + ", entry=" + entry1 + ']';
GridCacheVersion serReadVer = txEntry1.entryReadVersion();
assert serReadVer == null || (tx.optimistic() && tx.serializable()) : txEntry1;
boolean read = serOrder != null && txEntry1.op() == READ;
entry1.unswap();
if (!entry1.tmLock(tx, timeout, serOrder, serReadVer, read)) {
// Unlock locks locked so far.
for (IgniteTxEntry txEntry2 : entries) {
if (txEntry2 == txEntry1)
break;
txUnlock(tx, txEntry2);
}
return false;
}
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in TM lockMultiple(..) method (will retry): " + txEntry1);
try {
// Renew cache entry.
txEntry1.cached(cacheCtx.cache().entryEx(txEntry1.key(), tx.topologyVersion()));
} catch (GridDhtInvalidPartitionException e) {
assert tx.dht() : "Received invalid partition for non DHT transaction [tx=" + tx + ", invalidPart=" + e.partition() + ']';
// If partition is invalid, we ignore this entry.
tx.addInvalidPartition(cacheCtx, e.partition());
break;
}
} catch (GridDistributedLockCancelledException ignore) {
tx.setRollbackOnly();
throw new IgniteCheckedException("Entry lock has been cancelled for transaction: " + tx);
} finally {
cctx.database().checkpointReadUnlock();
}
}
}
return true;
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryEx in project ignite by apache.
the class GridCacheAtomicInvalidPartitionHandlingSelfTest method checkRestarts.
/**
* @param writeSync Write synchronization mode to check.
* @throws Exception If failed.
*/
private void checkRestarts(CacheWriteSynchronizationMode writeSync) throws Exception {
this.writeSync = writeSync;
final int gridCnt = 6;
startGrids(gridCnt);
awaitPartitionMapExchange();
try {
assertEquals(testClientNode(), (boolean) grid(0).configuration().isClientMode());
final IgniteCache<Object, Object> cache = grid(0).cache(DEFAULT_CACHE_NAME);
final int range = 100_000;
final Set<Integer> keys = new LinkedHashSet<>();
try (IgniteDataStreamer<Integer, Integer> streamer = grid(0).dataStreamer(DEFAULT_CACHE_NAME)) {
streamer.allowOverwrite(true);
for (int i = 0; i < range; i++) {
streamer.addData(i, 0);
keys.add(i);
if (i > 0 && i % 10_000 == 0)
System.err.println("Put: " + i);
}
}
final Affinity<Integer> aff = grid(0).affinity(DEFAULT_CACHE_NAME);
boolean putDone = GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
Iterator<Integer> it = keys.iterator();
while (it.hasNext()) {
Integer key = it.next();
Collection<ClusterNode> affNodes = aff.mapKeyToPrimaryAndBackups(key);
for (int i = 0; i < gridCnt; i++) {
ClusterNode locNode = grid(i).localNode();
IgniteCache<Object, Object> cache = grid(i).cache(DEFAULT_CACHE_NAME);
Object val = cache.localPeek(key);
if (affNodes.contains(locNode)) {
if (val == null)
return false;
} else
assertNull(val);
}
it.remove();
}
return true;
}
}, 30_000);
assertTrue(putDone);
assertTrue(keys.isEmpty());
final AtomicBoolean done = new AtomicBoolean();
delay = true;
System.err.println("FINISHED PUTS");
// Start put threads.
IgniteInternalFuture<?> fut = multithreadedAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
Random rnd = new Random();
while (!done.get()) {
try {
int cnt = rnd.nextInt(5);
if (cnt < 2) {
int key = rnd.nextInt(range);
int val = rnd.nextInt();
cache.put(key, val);
} else {
Map<Integer, Integer> upd = new TreeMap<>();
for (int i = 0; i < cnt; i++) upd.put(rnd.nextInt(range), rnd.nextInt());
cache.putAll(upd);
}
} catch (CachePartialUpdateException ignored) {
// No-op.
}
}
return null;
}
}, 4, "putAll-thread");
Random rnd = new Random();
// Restart random nodes.
for (int r = 0; r < 20; r++) {
int idx0 = rnd.nextInt(gridCnt - 1) + 1;
stopGrid(idx0);
U.sleep(200);
startGrid(idx0);
}
done.set(true);
awaitPartitionMapExchange();
fut.get();
for (int k = 0; k < range; k++) {
Collection<ClusterNode> affNodes = affinity(cache).mapKeyToPrimaryAndBackups(k);
// Test is valid with at least one backup.
assert affNodes.size() >= 2;
Object val = null;
GridCacheVersion ver = null;
UUID nodeId = null;
for (int i = 0; i < gridCnt; i++) {
ClusterNode locNode = grid(i).localNode();
GridCacheAdapter<Object, Object> c = ((IgniteKernal) grid(i)).internalCache(DEFAULT_CACHE_NAME);
GridCacheEntryEx entry = null;
try {
entry = c.entryEx(k);
entry.unswap();
} catch (GridDhtInvalidPartitionException ignored) {
// Skip key.
}
for (int r = 0; r < 10; r++) {
try {
if (affNodes.contains(locNode)) {
assert c.affinity().isPrimaryOrBackup(locNode, k);
boolean primary = c.affinity().isPrimary(locNode, k);
assertNotNull("Failed to find entry on node for key [locNode=" + locNode.id() + ", key=" + k + ']', entry);
if (val == null) {
assertNull(ver);
val = CU.value(entry.rawGet(), entry.context(), false);
ver = entry.version();
nodeId = locNode.id();
} else {
assertNotNull(ver);
assertEquals("Failed to check value for key [key=" + k + ", node=" + locNode.id() + ", primary=" + primary + ", recNodeId=" + nodeId + ']', val, CU.value(entry.rawGet(), entry.context(), false));
assertEquals("Failed to check version for key [key=" + k + ", node=" + locNode.id() + ", primary=" + primary + ", recNodeId=" + nodeId + ']', ver, entry.version());
}
} else
assertTrue("Invalid entry: " + entry, entry == null || !entry.partitionValid());
} catch (AssertionError e) {
if (r == 9) {
info("Failed to verify cache contents: " + e.getMessage());
throw e;
}
info("Failed to verify cache contents, will retry: " + e.getMessage());
// Give some time to finish async updates.
U.sleep(1000);
}
}
}
}
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryEx in project ignite by apache.
the class GridCachePartitionedMultiNodeCounterSelfTest method checkNearAndPrimary.
/**
* @param gridCnt Grid count.
* @param priThreads Primary threads.
* @param nearThreads Near threads.
* @throws Exception If failed.
*/
private void checkNearAndPrimary(int gridCnt, int priThreads, int nearThreads) throws Exception {
assert gridCnt > 0;
assert priThreads >= 0;
assert nearThreads >= 0;
X.println("*** Retries: " + RETRIES);
X.println("*** Log frequency: " + LOG_FREQ);
Affinity<String> aff = affinity(grid(0).<String, Integer>cache(DEFAULT_CACHE_NAME));
Collection<ClusterNode> affNodes = aff.mapKeyToPrimaryAndBackups(CNTR_KEY);
X.println("*** Affinity nodes [key=" + CNTR_KEY + ", nodes=" + U.nodeIds(affNodes) + ", igniteInstanceNames=" + igniteInstanceNames(U.nodeIds(affNodes)) + ']');
assertEquals(1 + backups, affNodes.size());
ClusterNode first = F.first(affNodes);
assert first != null;
final Ignite pri = G.ignite(first.id());
List<Ignite> nears = grids(gridCnt, pri);
final UUID priId = pri.cluster().localNode().id();
// Initialize.
pri.cache(DEFAULT_CACHE_NAME).put(CNTR_KEY, 0);
// nears.get(0).cache(DEFAULT_CACHE_NAME).put(CNTR_KEY, 0);
assertNull(near(pri).peekEx(CNTR_KEY));
final GridCacheEntryEx dhtEntry = dht(pri).entryEx(CNTR_KEY);
assertNotNull(dhtEntry);
dhtEntry.unswap();
assertEquals(Integer.valueOf(0), dhtEntry.rawGet().value(dhtEntry.context().cacheObjectContext(), false));
final AtomicInteger globalCntr = new AtomicInteger(0);
Collection<Thread> threads = new LinkedList<>();
final CountDownLatch startLatch = new CountDownLatch(gridCnt);
final AtomicBoolean locked = new AtomicBoolean(false);
if (priThreads > 0) {
final AtomicInteger logCntr = new AtomicInteger();
for (int i = 0; i < priThreads; i++) {
info("*** Starting primary thread: " + i);
threads.add(new Thread(new Runnable() {
@Override
public void run() {
info("*** Started primary thread ***");
try {
startLatch.countDown();
startLatch.await();
for (int i = 0; i < RETRIES; i++) {
if (DEBUG)
info("***");
int cntr = logCntr.getAndIncrement();
if (DEBUG || cntr % LOG_FREQ == 0)
info("*** Primary Iteration #" + i + ": " + cntr + " ***");
if (DEBUG)
info("***");
IgniteCache<String, Integer> c = pri.cache(DEFAULT_CACHE_NAME);
Integer oldCntr = c.localPeek(CNTR_KEY, CachePeekMode.ONHEAP);
GridCacheEntryEx dhtNear = near(pri).peekEx(CNTR_KEY);
try (Transaction tx = pri.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
if (DEBUG)
info("Started tx [igniteInstanceName=" + pri.name() + ", primary=true, xid=" + tx.xid() + ", oldCntr=" + oldCntr + ", node=" + priId + ", dhtEntry=" + dhtEntry + ", dhtNear=" + dhtNear + ']');
// Initial lock.
int curCntr = c.get(CNTR_KEY);
assertTrue("Lock violation: " + tx, locked.compareAndSet(false, true));
if (dhtNear == null)
dhtNear = near(pri).peekEx(CNTR_KEY);
if (DEBUG)
info("Read counter [igniteInstanceName=" + pri.name() + ", primary=true, curCntr=" + curCntr + ", oldCntr=" + oldCntr + ", node=" + priId + ", dhtEntry=" + dhtEntry + ", dhtNear=" + dhtNear + ']');
int global = globalCntr.get();
assert curCntr >= global : invalid("Counter mismatch", pri, true, curCntr, global);
int newCntr = curCntr + 1;
if (DEBUG)
info("Setting global counter [old=" + global + ", new=" + newCntr + ']');
assert globalCntr.compareAndSet(global, newCntr) : invalid("Invalid global counter", pri, true, newCntr, global);
int prev = c.getAndPut(CNTR_KEY, newCntr);
if (DEBUG)
info("Put new value [igniteInstanceName=" + pri.name() + ", primary=true, prev=" + prev + ", newCntr=" + newCntr + ']');
assert curCntr == prev : invalid("Counter mismatch", pri, true, curCntr, prev);
assertTrue("Lock violation: " + tx, locked.compareAndSet(true, false));
tx.commit();
if (DEBUG)
info("Committed tx: " + tx);
}
}
} catch (Throwable e) {
error(e.getMessage(), e);
fail(e.getMessage());
}
}
}, "primary-t#" + i));
}
}
if (nearThreads > 0) {
int tid = 0;
final AtomicInteger logCntr = new AtomicInteger();
for (final Ignite near : nears) {
for (int i = 0; i < nearThreads; i++) {
info("*** Starting near thread: " + i);
threads.add(new Thread(new Runnable() {
@Override
public void run() {
info("*** Started near thread ***");
UUID nearId = near.cluster().localNode().id();
GridCacheEntryEx nearEntry = near(near).peekEx(CNTR_KEY);
try {
startLatch.countDown();
startLatch.await();
for (int i = 0; i < RETRIES; i++) {
if (DEBUG)
info("***");
int cntr = logCntr.getAndIncrement();
if (DEBUG || cntr % LOG_FREQ == 0)
info("*** Near Iteration #" + i + ": " + cntr + " ***");
if (DEBUG)
info("***");
IgniteCache<String, Integer> c = near.cache(DEFAULT_CACHE_NAME);
Integer oldCntr = c.localPeek(CNTR_KEY, CachePeekMode.ONHEAP);
try (Transaction tx = near.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
if (DEBUG)
info("Started tx [igniteInstanceName=" + near.name() + ", primary=false, xid=" + tx.xid() + ", oldCntr=" + oldCntr + ", node=" + nearId + ", nearEntry=" + nearEntry + ']');
// Initial lock.
Integer curCntr = c.get(CNTR_KEY);
nearEntry = near(near).peekEx(CNTR_KEY);
assert curCntr != null : "Counter is null [nearEntry=" + nearEntry + ", dhtEntry=" + dht(near).peekEx(CNTR_KEY) + ']';
if (DEBUG)
info("Read counter [igniteInstanceName=" + near.name() + ", primary=false, curCntr=" + curCntr + ", oldCntr=" + oldCntr + ", node=" + nearId + ", nearEntry=" + nearEntry + ']');
assert locked.compareAndSet(false, true) : "Lock violation: " + tx;
int global = globalCntr.get();
assert curCntr >= global : invalid("Counter mismatch", near, false, curCntr, global);
int newCntr = curCntr + 1;
if (DEBUG)
info("Setting global counter [old=" + global + ", new=" + newCntr + ']');
assert globalCntr.compareAndSet(global, newCntr) : invalid("Invalid global counter", near, false, newCntr, global);
int prev = c.getAndPut(CNTR_KEY, newCntr);
if (DEBUG)
info("Put new value [igniteInstanceName=" + near.name() + ", primary=false, prev=" + prev + ", newCntr=" + newCntr + ']');
assert curCntr == prev : invalid("Counter mismatch", near, false, curCntr, prev);
assertTrue("Lock violation: " + tx, locked.compareAndSet(true, false));
tx.commit();
if (DEBUG)
info("Committed tx: " + tx);
}
}
} catch (Throwable t) {
error(t.getMessage(), t);
fail(t.getMessage());
}
}
}, "near-#" + tid + "-t#" + i));
}
tid++;
}
}
for (Thread t : threads) t.start();
for (Thread t : threads) t.join();
X.println("*** ");
Map<String, Integer> cntrs = new HashMap<>();
for (int i = 0; i < gridCnt; i++) {
Ignite g = grid(i);
dht(g).context().tm().printMemoryStats();
near(g).context().tm().printMemoryStats();
IgniteCache<String, Integer> cache = grid(i).cache(DEFAULT_CACHE_NAME);
int cntr = nearThreads > 0 && nears.contains(g) ? cache.get(CNTR_KEY) : cache.localPeek(CNTR_KEY);
X.println("*** Cache counter [igniteInstanceName=" + g.name() + ", cntr=" + cntr + ']');
cntrs.put(g.name(), cntr);
}
int updateCnt = priThreads + nears.size() * nearThreads;
int exp = RETRIES * updateCnt;
for (Map.Entry<String, Integer> e : cntrs.entrySet()) assertEquals("Counter check failed on grid [igniteInstanceName=" + e.getKey() + ", dhtEntry=" + dht(G.ignite(e.getKey())).peekEx(CNTR_KEY) + ", nearEntry=" + near(G.ignite(e.getKey())).peekEx(CNTR_KEY) + ']', exp, e.getValue().intValue());
X.println("*** ");
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryEx in project ignite by apache.
the class IgniteCacheLockFailoverSelfTest method testUnlockPrimaryLeft.
/**
* @throws Exception If failed.
*/
public void testUnlockPrimaryLeft() throws Exception {
GridCacheAdapter<Integer, Integer> cache = ((IgniteKernal) grid(0)).internalCache(DEFAULT_CACHE_NAME);
Integer key = backupKey(grid(0).cache(DEFAULT_CACHE_NAME));
cache.lock(key, 0);
stopGrid(1);
cache.unlock(key);
GridCacheEntryEx entry = cache.peekEx(key);
assertTrue("Remote MVCC is not empty: " + entry, entry == null || entry.remoteMvccSnapshot().isEmpty());
startGrid(1);
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryEx in project ignite by apache.
the class IgniteCacheExpiryPolicyWithStoreAbstractTest method checkTtl.
/**
* @param key Key.
* @param ttl TTL.
* @param primaryOnly If {@code true} expect entries only on primary node.
* @throws Exception If failed.
*/
private void checkTtl(Object key, final long ttl, boolean primaryOnly) throws Exception {
boolean found = false;
for (int i = 0; i < gridCount(); i++) {
IgniteKernal grid = (IgniteKernal) grid(i);
GridCacheAdapter<Object, Object> cache = grid.context().cache().internalCache(DEFAULT_CACHE_NAME);
GridCacheEntryEx e = null;
try {
e = cache.entryEx(key);
e.unswap();
} catch (GridDhtInvalidPartitionException ignore) {
// No-op.
}
if ((e == null || e.rawGet() == null) && cache.context().isNear())
e = cache.context().near().dht().peekEx(key);
if (e == null || e.rawGet() == null)
e = null;
if (e == null) {
if (primaryOnly)
assertTrue("Not found " + key, !grid.affinity(DEFAULT_CACHE_NAME).isPrimary(grid.localNode(), key));
else
assertTrue("Not found " + key, !grid.affinity(DEFAULT_CACHE_NAME).isPrimaryOrBackup(grid.localNode(), key));
} else {
found = true;
if (ttl > 0)
assertTrue(e.expireTime() > 0);
else
assertEquals(0, e.expireTime());
}
}
assertTrue(found);
}
Aggregations