use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class GridPartitionedGetFuture method map.
/**
* @param keys Keys.
* @param mapped Mappings to check for duplicates.
* @param topVer Topology version on which keys should be mapped.
*/
private void map(Collection<KeyCacheObject> keys, Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped, final AffinityTopologyVersion topVer) {
Collection<ClusterNode> cacheNodes = CU.affinityNodes(cctx, topVer);
if (cacheNodes.isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid) [topVer=" + topVer + ", cache=" + cctx.name() + ']'));
return;
}
GridDhtTopologyFuture topFut = cctx.shared().exchange().lastFinishedFuture();
Throwable err = topFut != null ? topFut.validateCache(cctx, recovery, true, null, keys) : null;
if (err != null) {
onDone(err);
return;
}
Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings = U.newHashMap(cacheNodes.size());
final int keysSize = keys.size();
Map<K, V> locVals = U.newHashMap(keysSize);
boolean hasRmtNodes = false;
// Assign keys to primary nodes.
for (KeyCacheObject key : keys) hasRmtNodes |= map(key, mappings, locVals, topVer, mapped);
if (isDone())
return;
if (!locVals.isEmpty())
add(new GridFinishedFuture<>(locVals));
if (hasRmtNodes) {
if (!trackable) {
trackable = true;
cctx.mvcc().addFuture(this, futId);
}
}
// Create mini futures.
for (Map.Entry<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> entry : mappings.entrySet()) {
final ClusterNode n = entry.getKey();
final LinkedHashMap<KeyCacheObject, Boolean> mappedKeys = entry.getValue();
assert !mappedKeys.isEmpty();
// If this is the primary or backup node for the keys.
if (n.isLocal()) {
final GridDhtFuture<Collection<GridCacheEntryInfo>> fut = cache().getDhtAsync(n.id(), -1, mappedKeys, false, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals, recovery);
final Collection<Integer> invalidParts = fut.invalidPartitions();
if (!F.isEmpty(invalidParts)) {
Collection<KeyCacheObject> remapKeys = new ArrayList<>(keysSize);
for (KeyCacheObject key : keys) {
if (key != null && invalidParts.contains(cctx.affinity().partition(key)))
remapKeys.add(key);
}
AffinityTopologyVersion updTopVer = cctx.shared().exchange().readyAffinityVersion();
assert updTopVer.compareTo(topVer) > 0 : "Got invalid partitions for local node but topology version did " + "not change [topVer=" + topVer + ", updTopVer=" + updTopVer + ", invalidParts=" + invalidParts + ']';
// Remap recursively.
map(remapKeys, mappings, updTopVer);
}
// Add new future.
add(fut.chain(new C1<IgniteInternalFuture<Collection<GridCacheEntryInfo>>, Map<K, V>>() {
@Override
public Map<K, V> apply(IgniteInternalFuture<Collection<GridCacheEntryInfo>> fut) {
try {
return createResultMap(fut.get());
} catch (Exception e) {
U.error(log, "Failed to get values from dht cache [fut=" + fut + "]", e);
onDone(e);
return Collections.emptyMap();
}
}
}));
} else {
MiniFuture fut = new MiniFuture(n, mappedKeys, topVer, CU.createBackupPostProcessingClosure(topVer, log, cctx, null, expiryPlc, readThrough, skipVals));
GridCacheMessage req = new GridNearGetRequest(cctx.cacheId(), futId, fut.futureId(), null, mappedKeys, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc != null ? expiryPlc.forCreate() : -1L, expiryPlc != null ? expiryPlc.forAccess() : -1L, false, skipVals, cctx.deploymentEnabled(), recovery);
// Append new future.
add(fut);
try {
cctx.io().send(n, req, cctx.ioPolicy());
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft((ClusterTopologyCheckedException) e);
else
fut.onResult(e);
}
}
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class CacheAffinitySharedManager method processClientCacheStartRequests.
/**
* @param msg Change request.
* @param crd Coordinator flag.
* @param topVer Current topology version.
* @param discoCache Discovery data cache.
* @return Map of started caches (cache ID to near enabled flag).
*/
@Nullable
private Map<Integer, Boolean> processClientCacheStartRequests(ClientCacheChangeDummyDiscoveryMessage msg, boolean crd, AffinityTopologyVersion topVer, DiscoCache discoCache) {
Map<String, DynamicCacheChangeRequest> startReqs = msg.startRequests();
if (startReqs == null)
return null;
List<DynamicCacheDescriptor> startDescs = clientCachesToStart(msg.requestId(), msg.startRequests());
if (startDescs == null || startDescs.isEmpty()) {
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), null);
return null;
}
Map<Integer, GridDhtAssignmentFetchFuture> fetchFuts = U.newHashMap(startDescs.size());
Set<String> startedCaches = U.newHashSet(startDescs.size());
Map<Integer, Boolean> startedInfos = U.newHashMap(startDescs.size());
for (DynamicCacheDescriptor desc : startDescs) {
try {
startedCaches.add(desc.cacheName());
DynamicCacheChangeRequest startReq = startReqs.get(desc.cacheName());
cctx.cache().prepareCacheStart(desc.cacheConfiguration(), desc, startReq.nearCacheConfiguration(), topVer, startReq.disabledAfterStart());
startedInfos.put(desc.cacheId(), startReq.nearCacheConfiguration() != null);
CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
assert grp != null : desc.groupId();
assert !grp.affinityNode() || grp.isLocal() : grp.cacheOrGroupName();
if (!grp.isLocal() && grp.affinity().lastVersion().equals(AffinityTopologyVersion.NONE)) {
assert grp.localStartVersion().equals(topVer) : grp.localStartVersion();
if (crd) {
CacheGroupHolder grpHolder = grpHolders.get(grp.groupId());
assert grpHolder != null && grpHolder.affinity().idealAssignment() != null;
if (grpHolder.client()) {
ClientCacheDhtTopologyFuture topFut = new ClientCacheDhtTopologyFuture(topVer);
grp.topology().updateTopologyVersion(topFut, discoCache, -1, false);
grpHolder = new CacheGroupHolder1(grp, grpHolder.affinity());
grpHolders.put(grp.groupId(), grpHolder);
GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId());
if (clientTop != null) {
grp.topology().update(grpHolder.affinity().lastVersion(), clientTop.partitionMap(true), clientTop.fullUpdateCounters(), Collections.<Integer>emptySet(), null);
}
assert grpHolder.affinity().lastVersion().equals(grp.affinity().lastVersion());
}
} else if (!fetchFuts.containsKey(grp.groupId())) {
GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture(cctx, grp.groupId(), topVer, discoCache);
fetchFut.init(true);
fetchFuts.put(grp.groupId(), fetchFut);
}
}
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
}
for (GridDhtAssignmentFetchFuture fetchFut : fetchFuts.values()) {
try {
CacheGroupContext grp = cctx.cache().cacheGroup(fetchFut.groupId());
assert grp != null;
GridDhtAffinityAssignmentResponse res = fetchAffinity(topVer, null, discoCache, grp.affinity(), fetchFut);
GridDhtPartitionFullMap partMap;
ClientCacheDhtTopologyFuture topFut;
if (res != null) {
partMap = res.partitionMap();
assert partMap != null : res;
topFut = new ClientCacheDhtTopologyFuture(topVer);
} else {
partMap = new GridDhtPartitionFullMap(cctx.localNodeId(), cctx.localNode().order(), 1);
topFut = new ClientCacheDhtTopologyFuture(topVer, new ClusterTopologyServerNotFoundException("All server nodes left grid."));
}
grp.topology().updateTopologyVersion(topFut, discoCache, -1, false);
grp.topology().update(topVer, partMap, null, Collections.<Integer>emptySet(), null);
topFut.validate(grp, discoCache.allNodes());
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
}
for (DynamicCacheDescriptor desc : startDescs) {
if (desc.cacheConfiguration().getCacheMode() != LOCAL) {
CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
assert grp != null;
grp.topology().onExchangeDone(null, grp.affinity().cachedAffinity(topVer), true);
}
}
cctx.cache().initCacheProxies(topVer, null);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), null);
return startedInfos;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class GridNearAtomicUpdateFuture method map.
/**
* @param topVer Topology version.
* @param remapKeys Keys to remap.
*/
private void map(AffinityTopologyVersion topVer, @Nullable Collection<KeyCacheObject> remapKeys) {
Collection<ClusterNode> topNodes = CU.affinityNodes(cctx, topVer);
if (F.isEmpty(topNodes)) {
completeFuture(null, new ClusterTopologyServerNotFoundException("Failed to map keys for cache (all partition nodes left the grid)."), null);
return;
}
long futId = cctx.mvcc().nextAtomicId();
Exception err = null;
PrimaryRequestState singleReq0 = null;
Map<UUID, PrimaryRequestState> mappings0 = null;
int size = keys.size();
boolean mappingKnown = cctx.topology().rebalanceFinished(topVer);
try {
if (size == 1) {
assert remapKeys == null || remapKeys.size() == 1;
singleReq0 = mapSingleUpdate(topVer, futId, mappingKnown);
} else {
Map<UUID, PrimaryRequestState> pendingMappings = mapUpdate(topNodes, topVer, futId, remapKeys, mappingKnown);
if (pendingMappings.size() == 1)
singleReq0 = F.firstValue(pendingMappings);
else {
mappings0 = pendingMappings;
assert !mappings0.isEmpty() || size == 0 : this;
}
}
synchronized (this) {
assert topVer.topologyVersion() > 0 : topVer;
assert this.topVer == AffinityTopologyVersion.ZERO : this;
this.topVer = topVer;
this.futId = futId;
resCnt = 0;
singleReq = singleReq0;
mappings = mappings0;
this.remapKeys = null;
}
if (storeFuture() && !cctx.mvcc().addAtomicFuture(futId, this)) {
assert isDone();
return;
}
} catch (Exception e) {
err = e;
}
if (err != null) {
completeFuture(null, err, futId);
return;
}
// Optimize mapping for single key.
if (singleReq0 != null)
sendSingleRequest(singleReq0.req.nodeId(), singleReq0.req);
else {
assert mappings0 != null;
if (size == 0) {
completeFuture(new GridCacheReturn(cctx, true, true, null, true), null, futId);
return;
} else
sendUpdateRequests(mappings0);
}
if (syncMode == FULL_ASYNC) {
completeFuture(new GridCacheReturn(cctx, true, true, null, true), null, futId);
return;
}
if (mappingKnown && syncMode == FULL_SYNC && cctx.discovery().topologyVersion() != topVer.topologyVersion())
checkDhtNodes(futId);
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class GridNearAtomicUpdateFuture method onAllReceived.
/**
* @return Non null topology version if update should be remapped.
*/
@Nullable
private AffinityTopologyVersion onAllReceived() {
assert Thread.holdsLock(this);
assert futureMapped() : this;
AffinityTopologyVersion remapTopVer0 = null;
if (remapKeys != null) {
assert remapTopVer != null;
remapTopVer0 = remapTopVer;
} else {
if (err != null && X.hasCause(err, CachePartialUpdateCheckedException.class) && X.hasCause(err, ClusterTopologyCheckedException.class) && storeFuture() && --remapCnt > 0) {
ClusterTopologyCheckedException topErr = X.cause(err, ClusterTopologyCheckedException.class);
if (!(topErr instanceof ClusterTopologyServerNotFoundException)) {
CachePartialUpdateCheckedException cause = X.cause(err, CachePartialUpdateCheckedException.class);
assert cause != null && cause.topologyVersion() != null : err;
assert remapKeys == null;
assert remapTopVer == null;
remapTopVer = remapTopVer0 = new AffinityTopologyVersion(cause.topologyVersion().topologyVersion() + 1);
err = null;
Collection<Object> failedKeys = cause.failedKeys();
remapKeys = new ArrayList<>(failedKeys.size());
for (Object key : failedKeys) remapKeys.add(cctx.toCacheKeyObject(key));
}
}
}
if (remapTopVer0 != null) {
cctx.mvcc().removeAtomicFuture(futId);
topVer = AffinityTopologyVersion.ZERO;
futId = 0;
remapTopVer = null;
}
return remapTopVer0;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class GridNearAtomicUpdateFuture method mapUpdate.
/**
* @param topNodes Cache nodes.
* @param topVer Topology version.
* @param futId Future ID.
* @param remapKeys Keys to remap.
* @return Mapping.
* @throws Exception If failed.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
private Map<UUID, PrimaryRequestState> mapUpdate(Collection<ClusterNode> topNodes, AffinityTopologyVersion topVer, Long futId, @Nullable Collection<KeyCacheObject> remapKeys, boolean mappingKnown) throws Exception {
Iterator<?> it = null;
if (vals != null)
it = vals.iterator();
Iterator<GridCacheDrInfo> conflictPutValsIt = null;
if (conflictPutVals != null)
conflictPutValsIt = conflictPutVals.iterator();
Iterator<GridCacheVersion> conflictRmvValsIt = null;
if (conflictRmvVals != null)
conflictRmvValsIt = conflictRmvVals.iterator();
Map<UUID, PrimaryRequestState> pendingMappings = U.newHashMap(topNodes.size());
// Create mappings first, then send messages.
for (Object key : keys) {
if (key == null)
throw new NullPointerException("Null key.");
Object val;
GridCacheVersion conflictVer;
long conflictTtl;
long conflictExpireTime;
if (vals != null) {
val = it.next();
conflictVer = null;
conflictTtl = CU.TTL_NOT_CHANGED;
conflictExpireTime = CU.EXPIRE_TIME_CALCULATE;
if (val == null)
throw new NullPointerException("Null value.");
} else if (conflictPutVals != null) {
GridCacheDrInfo conflictPutVal = conflictPutValsIt.next();
val = conflictPutVal.valueEx();
conflictVer = conflictPutVal.version();
conflictTtl = conflictPutVal.ttl();
conflictExpireTime = conflictPutVal.expireTime();
} else if (conflictRmvVals != null) {
val = null;
conflictVer = conflictRmvValsIt.next();
conflictTtl = CU.TTL_NOT_CHANGED;
conflictExpireTime = CU.EXPIRE_TIME_CALCULATE;
} else {
val = null;
conflictVer = null;
conflictTtl = CU.TTL_NOT_CHANGED;
conflictExpireTime = CU.EXPIRE_TIME_CALCULATE;
}
if (val == null && op != GridCacheOperation.DELETE)
continue;
KeyCacheObject cacheKey = cctx.toCacheKeyObject(key);
if (remapKeys != null && !remapKeys.contains(cacheKey))
continue;
if (op != TRANSFORM) {
val = cctx.toCacheObject(val);
if (op == CREATE || op == UPDATE)
cctx.validateKeyAndValue(cacheKey, (CacheObject) val);
} else
val = EntryProcessorResourceInjectorProxy.wrap(cctx.kernalContext(), (EntryProcessor) val);
List<ClusterNode> nodes = cctx.affinity().nodesByKey(cacheKey, topVer);
if (F.isEmpty(nodes))
throw new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid).");
ClusterNode primary = nodes.get(0);
boolean needPrimaryRes = !mappingKnown || primary.isLocal() || nearEnabled;
UUID nodeId = primary.id();
PrimaryRequestState mapped = pendingMappings.get(nodeId);
if (mapped == null) {
byte flags = GridNearAtomicAbstractUpdateRequest.flags(nearEnabled, topLocked, retval, mappingKnown, needPrimaryRes, skipStore, keepBinary, recovery);
GridNearAtomicFullUpdateRequest req = new GridNearAtomicFullUpdateRequest(cctx.cacheId(), nodeId, futId, topVer, syncMode, op, expiryPlc, invokeArgs, filter, subjId, taskNameHash, flags, cctx.deploymentEnabled(), keys.size());
mapped = new PrimaryRequestState(req, nodes, false);
pendingMappings.put(nodeId, mapped);
}
if (mapped.req.initMappingLocally())
mapped.addMapping(nodes);
mapped.req.addUpdateEntry(cacheKey, val, conflictTtl, conflictExpireTime, conflictVer);
}
return pendingMappings;
}
Aggregations