use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping in project ignite by apache.
the class GridDhtTxFinishFuture method finish.
/**
* @param commit Commit flag.
* @param dhtMap DHT map.
* @param nearMap Near map.
* @return {@code True} in case there is at least one synchronous {@code MiniFuture} to wait for.
*/
private boolean finish(boolean commit, Map<UUID, GridDistributedTxMapping> dhtMap, Map<UUID, GridDistributedTxMapping> nearMap) {
if (tx.onePhaseCommit())
return false;
boolean sync = tx.syncMode() == FULL_SYNC;
if (tx.explicitLock())
sync = true;
boolean res = false;
int miniId = 0;
// Create mini futures.
for (GridDistributedTxMapping dhtMapping : dhtMap.values()) {
ClusterNode n = dhtMapping.primary();
assert !n.isLocal();
GridDistributedTxMapping nearMapping = nearMap.get(n.id());
if (dhtMapping.empty() && nearMapping != null && nearMapping.empty())
// Nothing to send.
continue;
MiniFuture fut = new MiniFuture(++miniId, dhtMapping, nearMapping);
// Append new future.
add(fut);
Collection<Long> updCntrs = new ArrayList<>(dhtMapping.entries().size());
for (IgniteTxEntry e : dhtMapping.entries()) updCntrs.add(e.updateCounter());
GridDhtTxFinishRequest req = new GridDhtTxFinishRequest(tx.nearNodeId(), futId, fut.futureId(), tx.topologyVersion(), tx.xidVersion(), tx.commitVersion(), tx.threadId(), tx.isolation(), commit, tx.isInvalidate(), tx.system(), tx.ioPolicy(), tx.isSystemInvalidate(), sync ? FULL_SYNC : tx.syncMode(), tx.completedBase(), tx.committedVersions(), tx.rolledbackVersions(), tx.pendingVersions(), tx.size(), tx.subjectId(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), updCntrs, false, false);
req.writeVersion(tx.writeVersion() != null ? tx.writeVersion() : tx.xidVersion());
try {
cctx.io().send(n, req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, sent request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ']');
}
if (sync)
res = true;
else
fut.onDone();
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft((ClusterTopologyCheckedException) e);
else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, failed to send request dht [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + n.id() + ", err=" + e + ']');
}
fut.onResult(e);
}
}
}
for (GridDistributedTxMapping nearMapping : nearMap.values()) {
if (!dhtMap.containsKey(nearMapping.primary().id())) {
if (nearMapping.empty())
// Nothing to send.
continue;
MiniFuture fut = new MiniFuture(++miniId, null, nearMapping);
// Append new future.
add(fut);
GridDhtTxFinishRequest req = new GridDhtTxFinishRequest(tx.nearNodeId(), futId, fut.futureId(), tx.topologyVersion(), tx.xidVersion(), tx.commitVersion(), tx.threadId(), tx.isolation(), commit, tx.isInvalidate(), tx.system(), tx.ioPolicy(), tx.isSystemInvalidate(), sync ? FULL_SYNC : tx.syncMode(), tx.completedBase(), tx.committedVersions(), tx.rolledbackVersions(), tx.pendingVersions(), tx.size(), tx.subjectId(), tx.taskNameHash(), tx.activeCachesDeploymentEnabled(), false, false);
req.writeVersion(tx.writeVersion());
try {
cctx.io().send(nearMapping.primary(), req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, sent request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ']');
}
if (sync)
res = true;
else
fut.onDone();
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft((ClusterTopologyCheckedException) e);
else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT finish fut, failed to send request near [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearMapping.primary().id() + ", err=" + e + ']');
}
fut.onResult(e);
}
}
}
}
return res;
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping in project ignite by apache.
the class GridDhtTxLocalAdapter method removeMapping.
/**
* @param nodeId Node ID.
* @param entry Entry to remove.
* @param map Map to remove from.
* @return {@code True} if was removed.
*/
private boolean removeMapping(UUID nodeId, @Nullable GridCacheEntryEx entry, Map<UUID, GridDistributedTxMapping> map) {
if (entry != null) {
if (log.isDebugEnabled())
log.debug("Removing mapping for entry [nodeId=" + nodeId + ", entry=" + entry + ']');
IgniteTxEntry txEntry = entry(entry.txKey());
if (txEntry == null)
return false;
GridDistributedTxMapping m = map.get(nodeId);
boolean ret = m != null && m.removeEntry(txEntry);
if (m != null && m.empty())
map.remove(nodeId);
return ret;
} else
return map.remove(nodeId) != null;
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping in project ignite by apache.
the class GridDhtTxLocalAdapter method addMapping.
/**
* @param mappings Entry mappings.
* @param dst Transaction mappings.
*/
private void addMapping(Map<ClusterNode, List<GridDhtCacheEntry>> mappings, Map<UUID, GridDistributedTxMapping> dst) {
for (Map.Entry<ClusterNode, List<GridDhtCacheEntry>> mapping : mappings.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedTxMapping m = dst.get(n.id());
List<GridDhtCacheEntry> entries = mapping.getValue();
for (GridDhtCacheEntry entry : entries) {
IgniteTxEntry txEntry = entry(entry.txKey());
if (txEntry != null) {
if (m == null)
dst.put(n.id(), m = new GridDistributedTxMapping(n));
m.add(txEntry);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping in project ignite by apache.
the class GridNearOptimisticTxPrepareFuture method prepareSingle.
/**
* @param write Write.
* @param topLocked {@code True} if thread already acquired lock preventing topology change.
* @param remap Remap flag.
*/
private void prepareSingle(IgniteTxEntry write, boolean topLocked, boolean remap) {
write.clearEntryReadVersion();
AffinityTopologyVersion topVer = tx.topologyVersion();
assert topVer.topologyVersion() > 0;
txMapping = new GridDhtTxMapping();
GridDistributedTxMapping mapping = map(write, topVer, null, topLocked, remap);
if (mapping.primary().isLocal()) {
if (write.context().isNear())
tx.nearLocallyMapped(true);
else if (write.context().isColocated())
tx.colocatedLocallyMapped(true);
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (keyLockFut != null)
keyLockFut.onAllKeysAdded();
tx.addSingleEntryMapping(mapping, write);
cctx.mvcc().recheckPendingLocks();
mapping.last(true);
tx.transactionNodes(txMapping.transactionNodes());
if (!write.context().isNear())
checkOnePhase(txMapping);
assert !(mapping.hasColocatedCacheEntries() && mapping.hasNearCacheEntries()) : mapping;
proceedPrepare(mapping, null);
}
use of org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping in project ignite by apache.
the class GridNearOptimisticTxPrepareFuture method map.
/**
* @param entry Transaction entry.
* @param topVer Topology version.
* @param cur Current mapping.
* @param topLocked {@code True} if thread already acquired lock preventing topology change.
* @param remap Remap flag.
* @return Mapping.
*/
private GridDistributedTxMapping map(IgniteTxEntry entry, AffinityTopologyVersion topVer, @Nullable GridDistributedTxMapping cur, boolean topLocked, boolean remap) {
GridCacheContext cacheCtx = entry.context();
List<ClusterNode> nodes;
GridCacheEntryEx cached0 = entry.cached();
if (cached0.isDht())
nodes = cacheCtx.topology().nodes(cached0.partition(), topVer);
else
nodes = cacheCtx.isLocal() ? cacheCtx.affinity().nodesByKey(entry.key(), topVer) : cacheCtx.topology().nodes(cacheCtx.affinity().partition(entry.key()), topVer);
txMapping.addMapping(nodes);
ClusterNode primary = F.first(nodes);
assert primary != null;
if (log.isDebugEnabled()) {
log.debug("Mapped key to primary node [key=" + entry.key() + ", part=" + cacheCtx.affinity().partition(entry.key()) + ", primary=" + U.toShortString(primary) + ", topVer=" + topVer + ']');
}
// Must re-initialize cached entry while holding topology lock.
if (cacheCtx.isNear())
entry.cached(cacheCtx.nearTx().entryExx(entry.key(), topVer));
else if (!cacheCtx.isLocal())
entry.cached(cacheCtx.colocated().entryExx(entry.key(), topVer, true));
else
entry.cached(cacheCtx.local().entryEx(entry.key(), topVer));
if (cacheCtx.isNear() || cacheCtx.isLocal()) {
if (entry.explicitVersion() == null && !remap) {
if (keyLockFut == null) {
keyLockFut = new KeyLockFuture();
add(keyLockFut);
}
keyLockFut.addLockKey(entry.txKey());
}
}
if (cur == null || !cur.primary().id().equals(primary.id()) || (primary.isLocal() && cur.hasNearCacheEntries() != cacheCtx.isNear())) {
boolean clientFirst = cur == null && !topLocked && cctx.kernalContext().clientNode();
cur = new GridDistributedTxMapping(primary);
cur.clientFirst(clientFirst);
}
cur.add(entry);
if (entry.explicitVersion() != null) {
tx.markExplicit(primary.id());
cur.markExplicitLock();
}
entry.nodeId(primary.id());
if (cacheCtx.isNear()) {
while (true) {
try {
GridNearCacheEntry cached = (GridNearCacheEntry) entry.cached();
cached.dhtNodeId(tx.xidVersion(), primary.id());
break;
} catch (GridCacheEntryRemovedException ignore) {
entry.cached(cacheCtx.near().entryEx(entry.key(), topVer));
}
}
}
return cur;
}
Aggregations