use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class IgfsUtils method toIgfsException.
/**
* Converts any passed exception to IGFS exception.
*
* @param err Initial exception.
* @return Converted IGFS exception.
*/
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
public static IgfsException toIgfsException(Throwable err) {
IgfsException err0 = err instanceof IgfsException ? (IgfsException) err : null;
IgfsException igfsErr = X.cause(err, IgfsException.class);
while (igfsErr != null && igfsErr != err0) {
err0 = igfsErr;
igfsErr = X.cause(err, IgfsException.class);
}
// If initial exception is already IGFS exception and no inner stuff exists, just return it unchanged.
if (err0 != err) {
if (err0 != null)
// Dealing with a kind of IGFS error, wrap it once again, preserving message and root cause.
err0 = newIgfsException(err0.getClass(), err0.getMessage(), err0);
else {
if (err instanceof ClusterTopologyServerNotFoundException)
err0 = new IgfsException("Cache server nodes not found.", err);
else
// Unknown error nature.
err0 = new IgfsException("Generic IGFS error occurred.", err);
}
}
return err0;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class GridNearLockFuture method map.
/**
* Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
* such approach does not preserve order of lock acquisition. Instead, keys are split in continuous
* groups belonging to one primary node and locks for these groups are acquired sequentially.
*
* @param keys Keys.
* @param remap Remap flag.
* @param topLocked {@code True} if thread already acquired lock preventing topology change.
*/
private void map(Iterable<KeyCacheObject> keys, boolean remap, boolean topLocked) {
try {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0 : topVer;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for near-only cache (all " + "partition nodes left the grid)."));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
synchronized (this) {
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.nearLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
boolean explicit = false;
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
while (true) {
GridNearCacheEntry entry = null;
try {
entry = cctx.near().entryExx(key, topVer);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false);
return;
}
// Removed exception may be thrown here.
GridCacheMvccCandidate cand = addEntry(topVer, entry, node.id());
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']');
return;
}
if (cand != null) {
if (tx == null && !cand.reentry())
cctx.mvcc().addExplicitLock(threadId, cand, topVer);
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.versionedValue();
if (val == null) {
GridDhtCacheEntry dhtEntry = dht().peekExx(key);
try {
if (dhtEntry != null)
val = dhtEntry.versionedValue(topVer);
} catch (GridCacheEntryRemovedException ignored) {
assert dhtEntry.obsolete() : dhtEntry;
if (log.isDebugEnabled())
log.debug("Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
}
}
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (!cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.subjectId() : null, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, cctx.deploymentEnabled());
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry.
cctx);
}
if (cand.reentry())
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
} else
// Ignore reentries within transactions.
explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty())
mapping.distributedKeys(distributedKeys);
else {
assert mapping.request() == null;
iter.remove();
}
}
}
cctx.mvcc().recheckPendingLocks();
proceedMapping();
} catch (IgniteCheckedException ex) {
onError(ex);
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class GridNearGetFuture method map.
/**
* @param keys Keys.
* @param mapped Mappings to check for duplicates.
* @param topVer Topology version to map on.
*/
private void map(Collection<KeyCacheObject> keys, Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped, final AffinityTopologyVersion topVer) {
Collection<ClusterNode> affNodes = CU.affinityNodes(cctx, topVer);
if (affNodes.isEmpty()) {
assert !cctx.affinityNode();
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for near-only cache (all partition " + "nodes left the grid)."));
return;
}
Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings = U.newHashMap(affNodes.size());
Map<KeyCacheObject, GridNearCacheEntry> savedEntries = null;
{
boolean success = false;
try {
// Assign keys to primary nodes.
for (KeyCacheObject key : keys) savedEntries = map(key, mappings, topVer, mapped, savedEntries);
success = true;
} finally {
// Exception has been thrown, must release reserved near entries.
if (!success) {
GridCacheVersion obsolete = cctx.versions().next(topVer);
if (savedEntries != null) {
for (GridNearCacheEntry reserved : savedEntries.values()) {
reserved.releaseEviction();
if (reserved.markObsolete(obsolete))
reserved.context().cache().removeEntry(reserved);
}
}
}
}
}
if (isDone())
return;
final Map<KeyCacheObject, GridNearCacheEntry> saved = savedEntries != null ? savedEntries : Collections.<KeyCacheObject, GridNearCacheEntry>emptyMap();
final int keysSize = keys.size();
// Create mini futures.
for (Map.Entry<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> entry : mappings.entrySet()) {
final ClusterNode n = entry.getKey();
final LinkedHashMap<KeyCacheObject, Boolean> mappedKeys = entry.getValue();
assert !mappedKeys.isEmpty();
// If this is the primary or backup node for the keys.
if (n.isLocal()) {
final GridDhtFuture<Collection<GridCacheEntryInfo>> fut = dht().getDhtAsync(n.id(), -1, mappedKeys, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals, recovery);
final Collection<Integer> invalidParts = fut.invalidPartitions();
if (!F.isEmpty(invalidParts)) {
Collection<KeyCacheObject> remapKeys = new ArrayList<>(keysSize);
for (KeyCacheObject key : keys) {
if (key != null && invalidParts.contains(cctx.affinity().partition(key)))
remapKeys.add(key);
}
AffinityTopologyVersion updTopVer = cctx.discovery().topologyVersionEx();
assert updTopVer.compareTo(topVer) > 0 : "Got invalid partitions for local node but topology version did " + "not change [topVer=" + topVer + ", updTopVer=" + updTopVer + ", invalidParts=" + invalidParts + ']';
// Remap recursively.
map(remapKeys, mappings, updTopVer);
}
// Add new future.
add(fut.chain(new C1<IgniteInternalFuture<Collection<GridCacheEntryInfo>>, Map<K, V>>() {
@Override
public Map<K, V> apply(IgniteInternalFuture<Collection<GridCacheEntryInfo>> fut) {
try {
return loadEntries(n.id(), mappedKeys.keySet(), fut.get(), saved, topVer);
} catch (Exception e) {
U.error(log, "Failed to get values from dht cache [fut=" + fut + "]", e);
onDone(e);
return Collections.emptyMap();
}
}
}));
} else {
if (!trackable) {
trackable = true;
cctx.mvcc().addFuture(this, futId);
}
MiniFuture fut = new MiniFuture(n, mappedKeys, saved, topVer);
GridCacheMessage req = new GridNearGetRequest(cctx.cacheId(), futId, fut.futureId(), ver, mappedKeys, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc != null ? expiryPlc.forCreate() : -1L, expiryPlc != null ? expiryPlc.forAccess() : -1L, skipVals, cctx.deploymentEnabled(), recovery);
// Append new future.
add(fut);
try {
cctx.io().send(n, req, cctx.ioPolicy());
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onNodeLeft();
else
fut.onResult(e);
}
}
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class GridNearAtomicUpdateFuture method onAllReceived.
/**
* @return Non null topology version if update should be remapped.
*/
@Nullable
private AffinityTopologyVersion onAllReceived() {
assert Thread.holdsLock(this);
assert futureMapped() : this;
AffinityTopologyVersion remapTopVer0 = null;
if (remapKeys != null) {
assert remapTopVer != null;
remapTopVer0 = remapTopVer;
} else {
if (err != null && X.hasCause(err, CachePartialUpdateCheckedException.class) && X.hasCause(err, ClusterTopologyCheckedException.class) && storeFuture() && --remapCnt > 0) {
ClusterTopologyCheckedException topErr = X.cause(err, ClusterTopologyCheckedException.class);
if (!(topErr instanceof ClusterTopologyServerNotFoundException)) {
CachePartialUpdateCheckedException cause = X.cause(err, CachePartialUpdateCheckedException.class);
assert cause != null && cause.topologyVersion() != null : err;
assert remapKeys == null;
assert remapTopVer == null;
remapTopVer = remapTopVer0 = new AffinityTopologyVersion(cause.topologyVersion().topologyVersion() + 1);
err = null;
Collection<Object> failedKeys = cause.failedKeys();
remapKeys = new ArrayList<>(failedKeys.size());
for (Object key : failedKeys) remapKeys.add(cctx.toCacheKeyObject(key));
}
}
}
if (remapTopVer0 != null) {
cctx.mvcc().removeAtomicFuture(futId);
topVer = AffinityTopologyVersion.ZERO;
futId = 0;
remapTopVer = null;
}
return remapTopVer0;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class GridNearAtomicUpdateFuture method mapUpdate.
/**
* @param topNodes Cache nodes.
* @param topVer Topology version.
* @param futId Future ID.
* @param remapKeys Keys to remap.
* @return Mapping.
* @throws Exception If failed.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
private Map<UUID, PrimaryRequestState> mapUpdate(Collection<ClusterNode> topNodes, AffinityTopologyVersion topVer, Long futId, @Nullable Collection<KeyCacheObject> remapKeys, boolean mappingKnown) throws Exception {
Iterator<?> it = null;
if (vals != null)
it = vals.iterator();
Iterator<GridCacheDrInfo> conflictPutValsIt = null;
if (conflictPutVals != null)
conflictPutValsIt = conflictPutVals.iterator();
Iterator<GridCacheVersion> conflictRmvValsIt = null;
if (conflictRmvVals != null)
conflictRmvValsIt = conflictRmvVals.iterator();
Map<UUID, PrimaryRequestState> pendingMappings = U.newHashMap(topNodes.size());
// Create mappings first, then send messages.
for (Object key : keys) {
if (key == null)
throw new NullPointerException("Null key.");
Object val;
GridCacheVersion conflictVer;
long conflictTtl;
long conflictExpireTime;
if (vals != null) {
val = it.next();
conflictVer = null;
conflictTtl = CU.TTL_NOT_CHANGED;
conflictExpireTime = CU.EXPIRE_TIME_CALCULATE;
if (val == null)
throw new NullPointerException("Null value.");
} else if (conflictPutVals != null) {
GridCacheDrInfo conflictPutVal = conflictPutValsIt.next();
val = conflictPutVal.valueEx();
conflictVer = conflictPutVal.version();
conflictTtl = conflictPutVal.ttl();
conflictExpireTime = conflictPutVal.expireTime();
} else if (conflictRmvVals != null) {
val = null;
conflictVer = conflictRmvValsIt.next();
conflictTtl = CU.TTL_NOT_CHANGED;
conflictExpireTime = CU.EXPIRE_TIME_CALCULATE;
} else {
val = null;
conflictVer = null;
conflictTtl = CU.TTL_NOT_CHANGED;
conflictExpireTime = CU.EXPIRE_TIME_CALCULATE;
}
if (val == null && op != GridCacheOperation.DELETE)
continue;
KeyCacheObject cacheKey = cctx.toCacheKeyObject(key);
if (remapKeys != null && !remapKeys.contains(cacheKey))
continue;
if (op != TRANSFORM)
val = cctx.toCacheObject(val);
else
val = EntryProcessorResourceInjectorProxy.wrap(cctx.kernalContext(), (EntryProcessor) val);
List<ClusterNode> nodes = cctx.affinity().nodesByKey(cacheKey, topVer);
if (F.isEmpty(nodes))
throw new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid).");
ClusterNode primary = nodes.get(0);
boolean needPrimaryRes = !mappingKnown || primary.isLocal();
UUID nodeId = primary.id();
PrimaryRequestState mapped = pendingMappings.get(nodeId);
if (mapped == null) {
GridNearAtomicFullUpdateRequest req = new GridNearAtomicFullUpdateRequest(cctx.cacheId(), nodeId, futId, topVer, topLocked, syncMode, op, retval, expiryPlc, invokeArgs, filter, subjId, taskNameHash, needPrimaryRes, skipStore, keepBinary, recovery, cctx.deploymentEnabled(), keys.size());
mapped = new PrimaryRequestState(req, nodes, false);
pendingMappings.put(nodeId, mapped);
}
if (mapped.req.initMappingLocally())
mapped.addMapping(nodes);
mapped.req.addUpdateEntry(cacheKey, val, conflictTtl, conflictExpireTime, conflictVer);
}
return pendingMappings;
}
Aggregations