use of org.apache.ignite.internal.processors.cache.GridCacheMessage in project ignite by apache.
the class IgniteTxHandler method finishDhtLocal.
/**
* @param nodeId Node ID initiated commit.
* @param locTx Optional local transaction.
* @param req Finish request.
* @return Finish future.
*/
private IgniteInternalFuture<IgniteInternalTx> finishDhtLocal(UUID nodeId, @Nullable GridNearTxLocal locTx, GridNearTxFinishRequest req) {
GridCacheVersion dhtVer = ctx.tm().mappedVersion(req.version());
GridDhtTxLocal tx = null;
if (dhtVer == null) {
if (log.isDebugEnabled())
log.debug("Received transaction finish request for unknown near version (was lock explicit?): " + req);
} else
tx = ctx.tm().tx(dhtVer);
if (tx != null)
req.txState(tx.txState());
if (tx == null && locTx != null && !req.commit()) {
U.warn(log, "DHT local tx not found for near local tx rollback " + "[req=" + req + ", dhtVer=" + dhtVer + ", tx=" + locTx + ']');
return null;
}
if (tx == null && !req.explicitLock()) {
assert locTx == null : "DHT local tx should never be lost for near local tx: " + locTx;
U.warn(txFinishMsgLog, "Received finish request for completed transaction (the message may be too late) [" + "txId=" + req.version() + ", dhtTxId=" + dhtVer + ", node=" + nodeId + ", commit=" + req.commit() + ']');
// Always send finish response.
GridCacheMessage res = new GridNearTxFinishResponse(req.partition(), req.version(), req.threadId(), req.futureId(), req.miniId(), new IgniteCheckedException("Transaction has been already completed."));
try {
ctx.io().send(nodeId, res, req.policy());
if (txFinishMsgLog.isDebugEnabled()) {
txFinishMsgLog.debug("Sent near finish response for completed tx [txId=" + req.version() + ", dhtTxId=" + dhtVer + ", node=" + nodeId + ']');
}
} catch (Throwable e) {
// Double-check.
if (ctx.discovery().node(nodeId) == null) {
if (txFinishMsgLog.isDebugEnabled()) {
txFinishMsgLog.debug("Failed to send near finish response for completed tx, node failed [" + "txId=" + req.version() + ", dhtTxId=" + dhtVer + ", node=" + nodeId + ']');
}
} else {
U.error(txFinishMsgLog, "Failed to send near finish response for completed tx, node failed [" + "txId=" + req.version() + ", dhtTxId=" + dhtVer + ", node=" + nodeId + ", req=" + req + ", res=" + res + ']', e);
}
if (e instanceof Error)
throw (Error) e;
}
return null;
}
try {
assert tx != null : "Transaction is null for near finish request [nodeId=" + nodeId + ", req=" + req + "]";
assert req.syncMode() != null : req;
tx.syncMode(req.syncMode());
tx.nearFinishFutureId(req.futureId());
tx.nearFinishMiniId(req.miniId());
tx.storeEnabled(req.storeEnabled());
if (req.commit()) {
if (!tx.markFinalizing(USER_FINISH)) {
if (log.isDebugEnabled())
log.debug("Will not finish transaction (it is handled by another thread): " + tx);
return null;
}
IgniteInternalFuture<IgniteInternalTx> commitFut = tx.commitDhtLocalAsync();
// Only for error logging.
commitFut.listen(CU.errorLogger(log));
return commitFut;
} else {
IgniteInternalFuture<IgniteInternalTx> rollbackFut = tx.rollbackDhtLocalAsync();
// Only for error logging.
rollbackFut.listen(CU.errorLogger(log));
return rollbackFut;
}
} catch (Throwable e) {
tx.commitError(e);
tx.systemInvalidate(true);
U.error(log, "Failed completing transaction [commit=" + req.commit() + ", tx=" + tx + ']', e);
IgniteInternalFuture<IgniteInternalTx> res;
IgniteInternalFuture<IgniteInternalTx> rollbackFut = tx.rollbackDhtLocalAsync();
// Only for error logging.
rollbackFut.listen(CU.errorLogger(log));
res = rollbackFut;
if (e instanceof Error)
throw (Error) e;
return res;
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheMessage in project ignite by apache.
the class GridNearGetFuture method map.
/**
* @param keys Keys.
* @param mapped Mappings to check for duplicates.
* @param topVer Topology version to map on.
*/
private void map(Collection<KeyCacheObject> keys, Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped, final AffinityTopologyVersion topVer) {
Collection<ClusterNode> affNodes = CU.affinityNodes(cctx, topVer);
if (affNodes.isEmpty()) {
assert !cctx.affinityNode();
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for near-only cache (all partition " + "nodes left the grid)."));
return;
}
Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings = U.newHashMap(affNodes.size());
Map<KeyCacheObject, GridNearCacheEntry> savedEntries = null;
{
boolean success = false;
try {
// Assign keys to primary nodes.
for (KeyCacheObject key : keys) savedEntries = map(key, mappings, topVer, mapped, savedEntries);
success = true;
} finally {
// Exception has been thrown, must release reserved near entries.
if (!success) {
GridCacheVersion obsolete = cctx.versions().next(topVer);
if (savedEntries != null) {
for (GridNearCacheEntry reserved : savedEntries.values()) {
reserved.releaseEviction();
if (reserved.markObsolete(obsolete))
reserved.context().cache().removeEntry(reserved);
}
}
}
}
}
if (isDone())
return;
final Map<KeyCacheObject, GridNearCacheEntry> saved = savedEntries != null ? savedEntries : Collections.<KeyCacheObject, GridNearCacheEntry>emptyMap();
final int keysSize = keys.size();
// Create mini futures.
for (Map.Entry<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> entry : mappings.entrySet()) {
final ClusterNode n = entry.getKey();
final LinkedHashMap<KeyCacheObject, Boolean> mappedKeys = entry.getValue();
assert !mappedKeys.isEmpty();
// If this is the primary or backup node for the keys.
if (n.isLocal()) {
final GridDhtFuture<Collection<GridCacheEntryInfo>> fut = dht().getDhtAsync(n.id(), -1, mappedKeys, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals, recovery);
final Collection<Integer> invalidParts = fut.invalidPartitions();
if (!F.isEmpty(invalidParts)) {
Collection<KeyCacheObject> remapKeys = new ArrayList<>(keysSize);
for (KeyCacheObject key : keys) {
if (key != null && invalidParts.contains(cctx.affinity().partition(key)))
remapKeys.add(key);
}
AffinityTopologyVersion updTopVer = cctx.discovery().topologyVersionEx();
assert updTopVer.compareTo(topVer) > 0 : "Got invalid partitions for local node but topology version did " + "not change [topVer=" + topVer + ", updTopVer=" + updTopVer + ", invalidParts=" + invalidParts + ']';
// Remap recursively.
map(remapKeys, mappings, updTopVer);
}
// Add new future.
add(fut.chain(new C1<IgniteInternalFuture<Collection<GridCacheEntryInfo>>, Map<K, V>>() {
@Override
public Map<K, V> apply(IgniteInternalFuture<Collection<GridCacheEntryInfo>> fut) {
try {
return loadEntries(n.id(), mappedKeys.keySet(), fut.get(), saved, topVer);
} catch (Exception e) {
U.error(log, "Failed to get values from dht cache [fut=" + fut + "]", e);
onDone(e);
return Collections.emptyMap();
}
}
}));
} else {
if (!trackable) {
trackable = true;
cctx.mvcc().addFuture(this, futId);
}
MiniFuture fut = new MiniFuture(n, mappedKeys, saved, topVer);
GridCacheMessage req = new GridNearGetRequest(cctx.cacheId(), futId, fut.futureId(), ver, mappedKeys, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc != null ? expiryPlc.forCreate() : -1L, expiryPlc != null ? expiryPlc.forAccess() : -1L, skipVals, cctx.deploymentEnabled(), recovery);
// Append new future.
add(fut);
try {
cctx.io().send(n, req, cctx.ioPolicy());
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft();
else
fut.onResult(e);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheMessage in project ignite by apache.
the class GridPartitionedGetFuture method map.
/**
* @param keys Keys.
* @param mapped Mappings to check for duplicates.
* @param topVer Topology version on which keys should be mapped.
*/
private void map(Collection<KeyCacheObject> keys, Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped, AffinityTopologyVersion topVer) {
Collection<ClusterNode> cacheNodes = CU.affinityNodes(cctx, topVer);
if (cacheNodes.isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid) [topVer=" + topVer + ", cache=" + cctx.name() + ']'));
return;
}
GridDhtTopologyFuture topFut = cctx.shared().exchange().lastFinishedFuture();
Throwable err = topFut != null ? topFut.validateCache(cctx, recovery, true, null, keys) : null;
if (err != null) {
onDone(err);
return;
}
Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings = U.newHashMap(cacheNodes.size());
final int keysSize = keys.size();
Map<K, V> locVals = U.newHashMap(keysSize);
boolean hasRmtNodes = false;
// Assign keys to primary nodes.
for (KeyCacheObject key : keys) hasRmtNodes |= map(key, mappings, locVals, topVer, mapped);
if (isDone())
return;
if (!locVals.isEmpty())
add(new GridFinishedFuture<>(locVals));
if (hasRmtNodes) {
if (!trackable) {
trackable = true;
cctx.mvcc().addFuture(this, futId);
}
}
// Create mini futures.
for (Map.Entry<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> entry : mappings.entrySet()) {
final ClusterNode n = entry.getKey();
final LinkedHashMap<KeyCacheObject, Boolean> mappedKeys = entry.getValue();
assert !mappedKeys.isEmpty();
// If this is the primary or backup node for the keys.
if (n.isLocal()) {
final GridDhtFuture<Collection<GridCacheEntryInfo>> fut = cache().getDhtAsync(n.id(), -1, mappedKeys, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals, recovery);
final Collection<Integer> invalidParts = fut.invalidPartitions();
if (!F.isEmpty(invalidParts)) {
Collection<KeyCacheObject> remapKeys = new ArrayList<>(keysSize);
for (KeyCacheObject key : keys) {
if (key != null && invalidParts.contains(cctx.affinity().partition(key)))
remapKeys.add(key);
}
AffinityTopologyVersion updTopVer = cctx.discovery().topologyVersionEx();
assert updTopVer.compareTo(topVer) > 0 : "Got invalid partitions for local node but topology version did " + "not change [topVer=" + topVer + ", updTopVer=" + updTopVer + ", invalidParts=" + invalidParts + ']';
// Remap recursively.
map(remapKeys, mappings, updTopVer);
}
// Add new future.
add(fut.chain(new C1<IgniteInternalFuture<Collection<GridCacheEntryInfo>>, Map<K, V>>() {
@Override
public Map<K, V> apply(IgniteInternalFuture<Collection<GridCacheEntryInfo>> fut) {
try {
return createResultMap(fut.get());
} catch (Exception e) {
U.error(log, "Failed to get values from dht cache [fut=" + fut + "]", e);
onDone(e);
return Collections.emptyMap();
}
}
}));
} else {
MiniFuture fut = new MiniFuture(n, mappedKeys, topVer);
GridCacheMessage req = new GridNearGetRequest(cctx.cacheId(), futId, fut.futureId(), null, mappedKeys, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc != null ? expiryPlc.forCreate() : -1L, expiryPlc != null ? expiryPlc.forAccess() : -1L, skipVals, cctx.deploymentEnabled(), recovery);
// Append new future.
add(fut);
try {
cctx.io().send(n, req, cctx.ioPolicy());
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft((ClusterTopologyCheckedException) e);
else
fut.onResult(e);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheMessage in project ignite by apache.
the class GridPartitionedSingleGetFuture method map.
/**
* @param topVer Topology version.
*/
@SuppressWarnings("unchecked")
private void map(AffinityTopologyVersion topVer) {
ClusterNode node = mapKeyToNode(topVer);
if (node == null) {
assert isDone() : this;
return;
}
if (isDone())
return;
if (node.isLocal()) {
Map<KeyCacheObject, Boolean> map = Collections.singletonMap(key, false);
final GridDhtFuture<Collection<GridCacheEntryInfo>> fut = cctx.dht().getDhtAsync(node.id(), -1, map, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals, recovery);
final Collection<Integer> invalidParts = fut.invalidPartitions();
if (!F.isEmpty(invalidParts)) {
AffinityTopologyVersion updTopVer = cctx.discovery().topologyVersionEx();
assert updTopVer.compareTo(topVer) > 0 : "Got invalid partitions for local node but topology " + "version did not change [topVer=" + topVer + ", updTopVer=" + updTopVer + ", invalidParts=" + invalidParts + ']';
// Remap recursively.
map(updTopVer);
} else {
fut.listen(new CI1<IgniteInternalFuture<Collection<GridCacheEntryInfo>>>() {
@Override
public void apply(IgniteInternalFuture<Collection<GridCacheEntryInfo>> fut) {
try {
Collection<GridCacheEntryInfo> infos = fut.get();
assert F.isEmpty(infos) || infos.size() == 1 : infos;
setResult(F.first(infos));
} catch (Exception e) {
U.error(log, "Failed to get values from dht cache [fut=" + fut + "]", e);
onDone(e);
}
}
});
}
} else {
synchronized (this) {
assert this.node == null;
this.topVer = topVer;
this.node = node;
}
if (!trackable) {
trackable = true;
cctx.mvcc().addFuture(this, futId);
}
GridCacheMessage req = new GridNearSingleGetRequest(cctx.cacheId(), futId.localId(), key, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc != null ? expiryPlc.forCreate() : -1L, expiryPlc != null ? expiryPlc.forAccess() : -1L, skipVals, /*add reader*/
false, needVer, cctx.deploymentEnabled(), recovery);
try {
cctx.io().send(node, req, cctx.ioPolicy());
} catch (IgniteCheckedException e) {
if (e instanceof ClusterTopologyCheckedException)
onNodeLeft(node.id());
else
onDone(e);
}
}
}
Aggregations