use of org.apache.ignite.internal.processors.tracing.SpanType.CACHE_API_GET_MAP in project gridgain by gridgain.
the class GridPartitionedGetFuture method map.
/**
* @param keys Keys.
* @param mapped Mappings to check for duplicates.
* @param topVer Topology version on which keys should be mapped.
*/
@Override
protected void map(Collection<KeyCacheObject> keys, Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped, AffinityTopologyVersion topVer) {
try (TraceSurroundings ignored = MTC.support(cctx.kernalContext().tracing().create(CACHE_API_GET_MAP, span))) {
MTC.span().addTag("topology.version", () -> Objects.toString(topVer));
GridDhtPartitionsExchangeFuture fut = cctx.shared().exchange().lastTopologyFuture();
// Finished DHT future is required for topology validation.
if (!fut.isDone()) {
if ((topVer.topologyVersion() > 0 && fut.initialVersion().after(topVer)) || (fut.exchangeActions() != null && fut.exchangeActions().hasStop()))
fut = cctx.shared().exchange().lastFinishedFuture();
else {
fut.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
try {
AffinityTopologyVersion topVer0 = fut.get();
cctx.closures().runLocalSafe(new GridPlainRunnable() {
@Override
public void run() {
map(keys, mapped, topVer.topologyVersion() > 0 ? topVer : topVer0);
}
}, true);
} catch (IgniteCheckedException e) {
onDone(e);
}
}
});
return;
}
}
Collection<ClusterNode> cacheNodes = CU.affinityNodes(cctx, topVer);
validate(cacheNodes, fut);
// Future can be already done with some exception.
if (isDone())
return;
Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings = U.newHashMap(cacheNodes.size());
int keysSize = keys.size();
// Map for local (key,value) pairs.
Map<K, V> locVals = U.newHashMap(keysSize);
// True if we have remote nodes after key mapping complete.
boolean hasRmtNodes = false;
// Assign keys to nodes.
for (KeyCacheObject key : keys) hasRmtNodes |= map(key, topVer, mappings, mapped, locVals);
// Future can be alredy done with some exception.
if (isDone())
return;
// Add local read (key,value) in result.
if (!locVals.isEmpty())
add(new GridFinishedFuture<>(locVals));
// If we have remote nodes in mapping we should registrate future in mvcc manager.
if (hasRmtNodes)
registrateFutureInMvccManager(this);
// Create mini futures after mapping to remote nodes.
for (Map.Entry<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> entry : mappings.entrySet()) {
// Node for request.
ClusterNode n = entry.getKey();
// Keys for request.
LinkedHashMap<KeyCacheObject, Boolean> mappedKeys = entry.getValue();
assert !mappedKeys.isEmpty();
// If this is the primary or backup node for the keys.
if (n.isLocal()) {
GridDhtFuture<Collection<GridCacheEntryInfo>> fut0 = cache().getDhtAsync(n.id(), -1, mappedKeys, false, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals, recovery, txLbl, mvccSnapshot());
Collection<Integer> invalidParts = fut0.invalidPartitions();
if (!F.isEmpty(invalidParts)) {
Collection<KeyCacheObject> remapKeys = new ArrayList<>(keysSize);
for (KeyCacheObject key : keys) {
int part = cctx.affinity().partition(key);
if (key != null && invalidParts.contains(part)) {
addNodeAsInvalid(n, part, topVer);
remapKeys.add(key);
}
}
AffinityTopologyVersion updTopVer = cctx.shared().exchange().readyAffinityVersion();
// Remap recursively.
map(remapKeys, mappings, updTopVer);
}
// Add new future.
add(fut0.chain(f -> {
try {
return createResultMap(f.get());
} catch (Exception e) {
U.error(log, "Failed to get values from dht cache [fut=" + fut0 + "]", e);
onDone(e);
return Collections.emptyMap();
}
}));
} else {
MiniFuture miniFut = new MiniFuture(n, mappedKeys, topVer);
GridCacheMessage req = miniFut.createGetRequest(futId);
// Append new future.
add(miniFut);
try {
cctx.io().send(n, req, cctx.ioPolicy());
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
miniFut.onNodeLeft((ClusterTopologyCheckedException) e);
else
miniFut.onResult(e);
}
}
}
markInitialized();
}
}
use of org.apache.ignite.internal.processors.tracing.SpanType.CACHE_API_GET_MAP in project gridgain by gridgain.
the class GridNearGetFuture method map.
/**
* @param keys Keys.
* @param mapped Mappings to check for duplicates.
* @param topVer Topology version to map on.
*/
@Override
protected void map(Collection<KeyCacheObject> keys, Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped, AffinityTopologyVersion topVer) {
try (TraceSurroundings ignored = MTC.support(cctx.kernalContext().tracing().create(CACHE_API_GET_MAP, span))) {
MTC.span().addTag("topology.version", () -> Objects.toString(topVer));
Collection<ClusterNode> affNodes = CU.affinityNodes(cctx, topVer);
if (affNodes.isEmpty()) {
assert !cctx.affinityNode();
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for near-only cache (all partition nodes left the grid)."));
return;
}
Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings = U.newHashMap(affNodes.size());
Map<KeyCacheObject, GridNearCacheEntry> savedEntries = null;
{
boolean success = false;
try {
// Assign keys to primary nodes.
for (KeyCacheObject key : keys) savedEntries = map(key, topVer, mappings, mapped, savedEntries);
success = true;
} finally {
// Exception has been thrown, must release reserved near entries.
if (!success) {
GridCacheVersion obsolete = cctx.versions().next(topVer.topologyVersion());
if (savedEntries != null) {
for (GridNearCacheEntry reserved : savedEntries.values()) {
reserved.releaseEviction();
if (reserved.markObsolete(obsolete))
reserved.context().cache().removeEntry(reserved);
}
}
}
}
}
if (isDone())
return;
Map<KeyCacheObject, GridNearCacheEntry> saved = savedEntries != null ? savedEntries : Collections.emptyMap();
int keysSize = keys.size();
// Create mini futures.
for (Map.Entry<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> entry : mappings.entrySet()) {
ClusterNode n = entry.getKey();
LinkedHashMap<KeyCacheObject, Boolean> mappedKeys = entry.getValue();
assert !mappedKeys.isEmpty();
// If this is the primary or backup node for the keys.
if (n.isLocal()) {
GridDhtFuture<Collection<GridCacheEntryInfo>> fut = dht().getDhtAsync(n.id(), -1, mappedKeys, false, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals, recovery, null, null);
// TODO IGNITE-7371
Collection<Integer> invalidParts = fut.invalidPartitions();
if (!F.isEmpty(invalidParts)) {
Collection<KeyCacheObject> remapKeys = new ArrayList<>(keysSize);
for (KeyCacheObject key : keys) {
int part = cctx.affinity().partition(key);
if (key != null && invalidParts.contains(part)) {
addNodeAsInvalid(n, part, topVer);
remapKeys.add(key);
}
}
AffinityTopologyVersion updTopVer = cctx.shared().exchange().readyAffinityVersion();
// Remap recursively.
map(remapKeys, mappings, updTopVer);
}
// Add new future.
add(fut.chain(f -> {
try {
return loadEntries(n.id(), mappedKeys.keySet(), f.get(), saved, topVer);
} catch (Exception e) {
U.error(log, "Failed to get values from dht cache [fut=" + fut + "]", e);
onDone(e);
return Collections.emptyMap();
}
}));
} else {
registrateFutureInMvccManager(this);
MiniFuture miniFuture = new MiniFuture(n, mappedKeys, saved, topVer);
GridNearGetRequest req = miniFuture.createGetRequest(futId);
// Append new future.
add(miniFuture);
try {
cctx.io().send(n, req, cctx.ioPolicy());
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
miniFuture.onNodeLeft((ClusterTopologyCheckedException) e);
else
miniFuture.onResult(e);
}
}
}
}
}
Aggregations