use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class IgfsUtils method toIgfsException.
/**
* Converts any passed exception to IGFS exception.
*
* @param err Initial exception.
* @return Converted IGFS exception.
*/
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
public static IgfsException toIgfsException(Throwable err) {
IgfsException err0 = err instanceof IgfsException ? (IgfsException) err : null;
IgfsException igfsErr = X.cause(err, IgfsException.class);
while (igfsErr != null && igfsErr != err0) {
err0 = igfsErr;
igfsErr = X.cause(err, IgfsException.class);
}
// If initial exception is already IGFS exception and no inner stuff exists, just return it unchanged.
if (err0 != err) {
if (err0 != null)
// Dealing with a kind of IGFS error, wrap it once again, preserving message and root cause.
err0 = newIgfsException(err0.getClass(), err0.getMessage(), err0);
else {
if (err instanceof ClusterTopologyServerNotFoundException)
err0 = new IgfsException("Cache server nodes not found.", err);
else
// Unknown error nature.
err0 = new IgfsException("Generic IGFS error occurred.", err);
}
}
return err0;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class GridCacheAdapter method syncOp.
/**
* @param op Cache operation.
* @param <T> Return type.
* @return Operation result.
* @throws IgniteCheckedException If operation failed.
*/
@SuppressWarnings({ "ErrorNotRethrown", "AssignmentToCatchBlockParameter" })
@Nullable
private <T> T syncOp(SyncOp<T> op) throws IgniteCheckedException {
checkJta();
awaitLastFut();
GridNearTxLocal tx = checkCurrentTx();
if (tx == null || tx.implicit()) {
TransactionConfiguration tCfg = CU.transactionConfiguration(ctx, ctx.kernalContext().config());
CacheOperationContext opCtx = ctx.operationContextPerCall();
int retries = opCtx != null && opCtx.noRetries() ? 1 : MAX_RETRIES;
for (int i = 0; i < retries; i++) {
tx = ctx.tm().newTx(true, op.single(), ctx.systemTx() ? ctx : null, ctx.mvccEnabled() ? PESSIMISTIC : OPTIMISTIC, ctx.mvccEnabled() ? REPEATABLE_READ : READ_COMMITTED, tCfg.getDefaultTxTimeout(), !ctx.skipStore(), ctx.mvccEnabled(), 0, null, false);
assert tx != null;
try {
T t = op.op(tx);
assert tx.done() : "Transaction is not done: " + tx;
return t;
} catch (IgniteInterruptedCheckedException | IgniteTxHeuristicCheckedException | NodeStoppingException | IgniteConsistencyViolationException e) {
throw e;
} catch (IgniteCheckedException e) {
if (!(e instanceof IgniteTxRollbackCheckedException)) {
try {
tx.rollback();
if (!(e instanceof TransactionCheckedException))
e = new IgniteTxRollbackCheckedException("Transaction has been rolled back: " + tx.xid(), e);
} catch (IgniteCheckedException | AssertionError | RuntimeException e1) {
U.error(log, "Failed to rollback transaction (cache may contain stale locks): " + CU.txString(tx), e1);
if (e != e1)
e.addSuppressed(e1);
}
}
if (X.hasCause(e, ClusterTopologyCheckedException.class) && i != retries - 1) {
ClusterTopologyCheckedException topErr = e.getCause(ClusterTopologyCheckedException.class);
if (!(topErr instanceof ClusterTopologyServerNotFoundException)) {
AffinityTopologyVersion topVer = tx.topologyVersion();
assert topVer != null && topVer.topologyVersion() > 0 : tx;
AffinityTopologyVersion awaitVer = new AffinityTopologyVersion(topVer.topologyVersion() + 1, 0);
ctx.shared().exchange().affinityReadyFuture(awaitVer).get();
continue;
}
}
throw e;
} catch (RuntimeException e) {
try {
tx.rollback();
} catch (IgniteCheckedException | AssertionError | RuntimeException e1) {
U.error(log, "Failed to rollback transaction " + CU.txString(tx), e1);
}
throw e;
} finally {
ctx.tm().resetContext();
if (ctx.isNear())
ctx.near().dht().context().tm().resetContext();
}
}
// Should not happen.
throw new IgniteCheckedException("Failed to perform cache operation (maximum number of retries exceeded).");
} else
return op.op(tx);
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class IgniteTxImplicitSingleStateImpl method validateTopology.
/**
* {@inheritDoc}
*/
@Override
public IgniteCheckedException validateTopology(GridCacheSharedContext cctx, boolean read, GridDhtTopologyFuture topFut) {
if (cacheCtx == null)
return null;
Throwable err = null;
if (entry != null) {
// An entry is a singleton list here, so a key is taken from a first element.
KeyCacheObject key = entry.get(0).key();
err = topFut.validateCache(cacheCtx, recovery, read, key, null);
}
if (err != null) {
return new IgniteCheckedException("Failed to perform cache operation (cache topology is not valid): " + U.maskName(cacheCtx.name()), err);
}
if (CU.affinityNodes(cacheCtx, topFut.topologyVersion()).isEmpty()) {
return new ClusterTopologyServerNotFoundException("Failed to map keys for cache (all " + "partition nodes left the grid): " + cacheCtx.name());
}
return null;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class IgniteTxStateImpl method validateTopology.
/**
* {@inheritDoc}
*/
@Override
public IgniteCheckedException validateTopology(GridCacheSharedContext cctx, boolean read, GridDhtTopologyFuture topFut) {
Map<Integer, Set<KeyCacheObject>> keysByCacheId = new HashMap<>();
for (IgniteTxKey key : txMap.keySet()) {
Set<KeyCacheObject> set = keysByCacheId.get(key.cacheId());
if (set == null)
keysByCacheId.put(key.cacheId(), set = new HashSet<>());
set.add(key.key());
}
for (Map.Entry<Integer, Set<KeyCacheObject>> e : keysByCacheId.entrySet()) {
int cacheId = e.getKey();
GridCacheContext ctx = cctx.cacheContext(cacheId);
assert ctx != null : cacheId;
CacheInvalidStateException err = topFut.validateCache(ctx, recovery(), read, null, e.getValue());
if (err != null)
return err;
}
for (int i = 0; i < activeCacheIds.size(); i++) {
int cacheId = activeCacheIds.get(i);
GridCacheContext<?, ?> cacheCtx = cctx.cacheContext(cacheId);
if (CU.affinityNodes(cacheCtx, topFut.topologyVersion()).isEmpty()) {
return new ClusterTopologyServerNotFoundException("Failed to map keys for cache (all " + "partition nodes left the grid): " + cacheCtx.name());
}
}
return null;
}
use of org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException in project ignite by apache.
the class GridDhtColocatedLockFuture method map0.
/**
* @param keys Keys to map.
* @param remap Remap flag.
* @param topLocked Topology locked flag.
* @throws IgniteCheckedException If mapping failed.
*/
private synchronized void map0(Collection<KeyCacheObject> keys, boolean remap, boolean topLocked) throws IgniteCheckedException {
try {
AffinityTopologyVersion topVer = this.topVer;
assert topVer != null;
assert topVer.topologyVersion() > 0;
if (CU.affinityNodes(cctx, topVer).isEmpty()) {
onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " + "(all partition nodes left the grid): " + cctx.name()));
return;
}
boolean clientNode = cctx.kernalContext().clientNode();
assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
// First assume this node is primary for all keys passed in.
if (!clientNode && mapAsPrimary(keys, topVer))
return;
mappings = new ArrayDeque<>();
// Assign keys to primary nodes.
GridNearLockMapping map = null;
for (KeyCacheObject key : keys) {
GridNearLockMapping updated = map(key, map, topVer);
// If new mapping was created, add to collection.
if (updated != map) {
mappings.add(updated);
if (tx != null && updated.node().isLocal())
tx.colocatedLocallyMapped(true);
}
map = updated;
}
if (isDone()) {
if (log.isDebugEnabled())
log.debug("Abandoning (re)map because future is done: " + this);
return;
}
if (log.isDebugEnabled())
log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
boolean hasRmtNodes = false;
boolean first = true;
// Create mini futures.
for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
GridNearLockMapping mapping = iter.next();
ClusterNode node = mapping.node();
Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
boolean loc = node.equals(cctx.localNode());
assert !mappedKeys.isEmpty();
GridNearLockRequest req = null;
Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
for (KeyCacheObject key : mappedKeys) {
IgniteTxKey txKey = cctx.txKey(key);
GridDistributedCacheEntry entry = null;
if (tx != null) {
IgniteTxEntry txEntry = tx.entry(txKey);
if (txEntry != null) {
entry = (GridDistributedCacheEntry) txEntry.cached();
if (entry != null && loc == entry.detached()) {
entry = cctx.colocated().entryExx(key, topVer, true);
txEntry.cached(entry);
}
}
}
boolean explicit;
while (true) {
try {
if (entry == null)
entry = cctx.colocated().entryExx(key, topVer, true);
if (!cctx.isAll(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry being locked did not pass filter (will not lock): " + entry);
onComplete(false, false);
return;
}
assert loc ^ entry.detached() : "Invalid entry [loc=" + loc + ", entry=" + entry + ']';
GridCacheMvccCandidate cand = addEntry(entry);
// Will either return value from dht cache or null if this is a miss.
IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.detached() ? null : ((GridDhtCacheEntry) entry).versionedValue(topVer);
GridCacheVersion dhtVer = null;
if (val != null) {
dhtVer = val.get1();
valMap.put(key, val);
}
if (cand != null && !cand.reentry()) {
if (req == null) {
boolean clientFirst = false;
if (first) {
clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
first = false;
}
assert !implicitTx() && !implicitSingleTx() : tx;
req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, false, cctx.deploymentEnabled(), inTx() ? tx.label() : null);
mapping.request(req);
}
distributedKeys.add(key);
if (tx != null)
tx.addKeyMapping(txKey, mapping.node());
req.addKeyBytes(key, retval, // Include DHT version to match remote DHT entry.
dhtVer, cctx);
}
explicit = inTx() && cand == null;
if (explicit)
tx.addKeyMapping(txKey, mapping.node());
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
entry = null;
}
}
// Mark mapping explicit lock flag.
if (explicit) {
boolean marked = tx != null && tx.markExplicit(node.id());
assert tx == null || marked;
}
}
if (!distributedKeys.isEmpty()) {
mapping.distributedKeys(distributedKeys);
hasRmtNodes |= !mapping.node().isLocal();
} else {
assert mapping.request() == null;
iter.remove();
}
}
} finally {
/**
* Notify ready {@link mappings} waiters. See {@link #cancel()}
*/
if (tx != null) {
mappingsReady = true;
notifyAll();
}
}
proceedMapping();
}
Aggregations