use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridCacheNearReadersSelfTest method testBackupEntryReaders.
/** @throws Exception If failed. */
public void testBackupEntryReaders() throws Exception {
aff.backups(1);
grids = 2;
aff.partitions(grids);
startGrids();
Collection<ClusterNode> nodes = new ArrayList<>(aff.nodes(aff.partition(1), grid(0).cluster().nodes()));
ClusterNode primary = F.first(nodes);
assert primary != null;
nodes.remove(primary);
ClusterNode backup = F.first(nodes);
assert backup != null;
assertNotSame(primary, backup);
assertFalse("Nodes cannot be equal: " + primary, primary.equals(backup));
IgniteCache<Integer, String> cache1 = grid(primary.id()).cache(DEFAULT_CACHE_NAME);
IgniteCache<Integer, String> cache2 = grid(backup.id()).cache(DEFAULT_CACHE_NAME);
// Store a values in cache.
assertNull(cache1.getAndPut(1, "v1"));
GridDhtCacheEntry e1 = (GridDhtCacheEntry) dht(cache1).peekEx(1);
GridDhtCacheEntry e2 = (GridDhtCacheEntry) dht(cache2).peekEx(1);
assertNull(e1);
assertNull(e2);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridCacheNearReadersSelfTest method testTwoNodesTwoKeysOneBackup.
/** @throws Exception If failed. */
public void testTwoNodesTwoKeysOneBackup() throws Exception {
aff.backups(1);
grids = 2;
aff.partitions(grids);
startGrids();
ClusterNode n1 = F.first(aff.nodes(aff.partition(1), grid(0).cluster().nodes()));
ClusterNode n2 = F.first(aff.nodes(aff.partition(2), grid(0).cluster().nodes()));
assertNotNull(n1);
assertNotNull(n2);
assertNotSame(n1, n2);
assertFalse("Nodes cannot be equal: " + n1, n1.equals(n2));
Ignite g1 = grid(n1.id());
Ignite g2 = grid(n2.id());
awaitPartitionMapExchange();
GridCacheContext ctx = ((IgniteKernal) g1).internalCache(DEFAULT_CACHE_NAME).context();
List<KeyCacheObject> cacheKeys = F.asList(ctx.toCacheKeyObject(1), ctx.toCacheKeyObject(2));
IgniteInternalFuture<Object> f1 = ((IgniteKernal) g1).internalCache(DEFAULT_CACHE_NAME).preloader().request(cacheKeys, new AffinityTopologyVersion(2));
if (f1 != null)
f1.get();
IgniteInternalFuture<Object> f2 = ((IgniteKernal) g2).internalCache(DEFAULT_CACHE_NAME).preloader().request(cacheKeys, new AffinityTopologyVersion(2));
if (f2 != null)
f2.get();
IgniteCache<Integer, String> cache1 = g1.cache(DEFAULT_CACHE_NAME);
IgniteCache<Integer, String> cache2 = g2.cache(DEFAULT_CACHE_NAME);
assertEquals(g1.affinity(DEFAULT_CACHE_NAME).mapKeyToNode(1), g1.cluster().localNode());
assertFalse(g1.affinity(DEFAULT_CACHE_NAME).mapKeyToNode(2).equals(g1.cluster().localNode()));
assertEquals(g1.affinity(DEFAULT_CACHE_NAME).mapKeyToNode(2), g2.cluster().localNode());
assertFalse(g2.affinity(DEFAULT_CACHE_NAME).mapKeyToNode(1).equals(g2.cluster().localNode()));
// Store first value in cache.
assertNull(cache1.getAndPut(1, "v1"));
assertTrue(cache1.containsKey(1));
assertTrue(cache2.containsKey(1));
assertEquals("v1", nearPeek(cache1, 1));
assertEquals("v1", nearPeek(cache2, 1));
assertEquals("v1", dhtPeek(cache1, 1));
assertEquals("v1", dhtPeek(cache2, 1));
assertNull(near(cache1).peekEx(1));
assertNull(near(cache2).peekEx(1));
GridDhtCacheEntry e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
// Store second value in cache.
assertNull(cache1.getAndPut(2, "v2"));
assertTrue(cache1.containsKey(2));
assertTrue(cache2.containsKey(2));
assertEquals("v2", nearPeek(cache1, 2));
assertEquals("v2", nearPeek(cache2, 2));
assertEquals("v2", dhtPeek(cache1, 2));
assertEquals("v2", dhtPeek(cache2, 2));
assertNull(near(cache1).peekEx(2));
assertNull(near(cache2).peekEx(2));
GridDhtCacheEntry c2e2 = (GridDhtCacheEntry) dht(cache2).entryEx(2);
// Nodes are backups of each other, so no readers should be added.
assertFalse(c2e2.readers().contains(n1.id()));
assertFalse(e1.readers().contains(n2.id()));
// Get key1 on node2 (value should come from local DHT cache, as it has a backup).
assertEquals("v1", cache2.get(1));
// Since DHT cache2 has the value, Near cache2 should not have it.
assertNull(near(cache2).peekEx(1));
e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
// Since v1 was retrieved locally from cache2, cache1 should not know about it.
assertFalse(e1.readers().contains(n2.id()));
// Evict locally from cache2.
// It should not be successful since it's not allowed to evict entry on backup node.
cache2.localEvict(Collections.singleton(1));
assertNull(near(cache2).peekEx(1));
assertEquals("v1", dhtPeek(cache2, 1));
assertEquals("v1", cache1.getAndPut(1, "z1"));
e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
// Node 1 should not have node2 in readers map.
assertFalse(e1.readers().contains(n2.id()));
assertNull(near(cache2).peekEx(1));
assertEquals("z1", dhtPeek(cache2, 1));
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridCacheNearReadersSelfTest method testTwoNodesTwoKeysNoBackups.
/** @throws Exception If failed. */
public void testTwoNodesTwoKeysNoBackups() throws Exception {
aff.backups(0);
grids = 2;
aff.partitions(grids);
startGrids();
ClusterNode n1 = F.first(aff.nodes(aff.partition(1), grid(0).cluster().nodes()));
final ClusterNode n2 = F.first(aff.nodes(aff.partition(2), grid(0).cluster().nodes()));
assertNotNull(n1);
assertNotNull(n2);
assertNotSame(n1, n2);
assertFalse("Nodes cannot be equal: " + n1, n1.equals(n2));
Ignite g1 = grid(n1.id());
Ignite g2 = grid(n2.id());
IgniteCache<Integer, String> cache1 = g1.cache(DEFAULT_CACHE_NAME);
IgniteCache<Integer, String> cache2 = g2.cache(DEFAULT_CACHE_NAME);
// Store some values in cache.
assertNull(cache1.getAndPut(1, "v1"));
assertNull(cache1.getAndPut(2, "v2"));
GridDhtCacheEntry e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
GridDhtCacheEntry e2 = (GridDhtCacheEntry) dht(cache2).entryEx(2);
assertNotNull(e1.readers());
assertTrue(cache1.containsKey(1));
assertTrue(cache1.containsKey(2));
assertNotNull(nearPeek(cache1, 1));
assertNotNull(nearPeek(cache1, 2));
assertNotNull(dhtPeek(cache1, 1));
assertNull(dhtPeek(cache1, 2));
assertNull(nearPeek(cache2, 1));
assertNotNull(dhtPeek(cache2, 2));
// Node2 should have node1 in reader's map, since request to
// put key 2 came from node1.
assertTrue(e2.readers().contains(n1.id()));
e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
// Node1 should not have node2 in readers map yet.
assertFalse(e1.readers().contains(n2.id()));
// Get key1 on node2.
assertEquals("v1", cache2.get(1));
// Check that key1 is in near cache of cache2.
assertNotNull(nearPeek(cache2, 1));
e1 = (GridDhtCacheEntry) dht(cache1).entryEx(1);
// Now node1 should have node2 in readers map.
assertTrue(e1.readers().contains(n2.id()));
// Evict locally from cache2.
cache2.localEvict(Collections.singleton(1));
assertNull(nearPeek(cache2, 1));
assertNull(dhtPeek(cache2, 1));
// Node 1 still has node2 in readers map.
assertTrue(e1.readers().contains(n2.id()));
assertNotNull(cache1.getAndPut(1, "z1"));
final GridDhtCacheEntry e1f = e1;
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
try {
return !e1f.readers().contains(n2.id());
} catch (GridCacheEntryRemovedException ignored) {
return true;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}, 5000);
// Node 1 still has node2 in readers map.
assertFalse(((GridDhtCacheEntry) dht(cache1).entryEx(1)).readers().contains(n2.id()));
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridNearLockFuture method map.
/**
* Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
* such approach does not preserve order of lock acquisition. Instead, keys are split in continuous
* groups belonging to one primary node and locks for these groups are acquired sequentially.
*
* @param keys Keys.
* @param remap Remap flag.
* @param topLocked {@code True} if thread already acquired lock preventing topology change.
*/
private void map(Iterable<KeyCacheObject> keys, boolean remap, boolean topLocked) {
try {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0 : topVer;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for near-only cache (all " + "partition nodes left the grid)."));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
synchronized (this) {
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.nearLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
boolean explicit = false;
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
while (true) {
GridNearCacheEntry entry = null;
try {
entry = cctx.near().entryExx(key, topVer);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false);
return;
}
// Removed exception may be thrown here.
GridCacheMvccCandidate cand = addEntry(topVer, entry, node.id());
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']');
return;
}
if (cand != null) {
if (tx == null && !cand.reentry())
cctx.mvcc().addExplicitLock(threadId, cand, topVer);
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.versionedValue();
if (val == null) {
GridDhtCacheEntry dhtEntry = dht().peekExx(key);
try {
if (dhtEntry != null)
val = dhtEntry.versionedValue(topVer);
} catch (GridCacheEntryRemovedException ignored) {
assert dhtEntry.obsolete() : dhtEntry;
if (log.isDebugEnabled())
log.debug("Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
}
}
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (!cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, cctx.deploymentEnabled());
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry.
cctx);
}
if (cand.reentry())
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
} else
// Ignore reentries within transactions.
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty())
mapping.distributedKeys(distributedKeys);
else {
assert mapping.request() == null;
iter.remove();
}
}
}
cctx.mvcc().recheckPendingLocks();
proceedMapping();
} catch (IgniteCheckedException ex) {
onError(ex);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridNearCacheEntry method initializeFromDht.
/**
* @param topVer Topology version.
* @throws GridCacheEntryRemovedException If this entry is obsolete.
*/
public void initializeFromDht(AffinityTopologyVersion topVer) throws GridCacheEntryRemovedException {
GridDhtCacheEntry entry = cctx.near().dht().peekExx(key);
if (entry != null) {
GridCacheEntryInfo e = entry.info();
if (e != null) {
GridCacheVersion enqueueVer = null;
try {
ClusterNode primaryNode = cctx.affinity().primaryByKey(key, topVer);
synchronized (this) {
checkObsolete();
if (isNew() || !valid(topVer)) {
// Version does not change for load ops.
update(e.value(), e.expireTime(), e.ttl(), e.isNew() ? ver : e.version(), true);
if (cctx.deferredDelete() && !isNew() && !isInternal()) {
boolean deleted = val == null;
if (deleted != deletedUnlocked()) {
deletedUnlocked(deleted);
if (deleted)
enqueueVer = e.version();
}
}
if (primaryNode == null)
this.topVer = AffinityTopologyVersion.NONE;
else
recordNodeId(primaryNode.id(), topVer);
dhtVer = e.isNew() || e.isDeleted() ? null : e.version();
}
}
} finally {
if (enqueueVer != null)
cctx.onDeferredDelete(this, enqueueVer);
}
}
}
}
Aggregations