use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridCacheTxRecoveryFuture method prepare.
/**
* Initializes future.
*/
@SuppressWarnings("ConstantConditions")
public void prepare() {
if (nearTxCheck) {
UUID nearNodeId = tx.eventNodeId();
if (cctx.localNodeId().equals(nearNodeId)) {
IgniteInternalFuture<Boolean> fut = cctx.tm().txCommitted(tx.nearXidVersion());
fut.listen(new CI1<IgniteInternalFuture<Boolean>>() {
@Override
public void apply(IgniteInternalFuture<Boolean> fut) {
try {
onDone(fut.get());
} catch (IgniteCheckedException e) {
onDone(e);
}
}
});
} else {
MiniFuture fut = new MiniFuture(tx.eventNodeId());
add(fut);
GridCacheTxRecoveryRequest req = new GridCacheTxRecoveryRequest(tx, 0, true, futureId(), fut.futureId(), tx.activeCachesDeploymentEnabled());
try {
cctx.io().send(nearNodeId, req, tx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("Recovery fut, sent request near tx [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearNodeId + ']');
}
} catch (ClusterTopologyCheckedException ignore) {
fut.onNodeLeft(nearNodeId);
} catch (IgniteCheckedException e) {
if (msgLog.isDebugEnabled()) {
msgLog.debug("Recovery fut, failed to send request near tx [txId=" + tx.nearXidVersion() + ", dhtTxId=" + tx.xidVersion() + ", node=" + nearNodeId + ", err=" + e + ']');
}
fut.onError(e);
}
markInitialized();
}
return;
}
// First check transactions on local node.
int locTxNum = nodeTransactions(cctx.localNodeId());
if (locTxNum > 1) {
IgniteInternalFuture<Boolean> fut = cctx.tm().txsPreparedOrCommitted(tx.nearXidVersion(), locTxNum);
if (fut == null || fut.isDone()) {
boolean prepared;
try {
prepared = fut == null ? true : fut.get();
} catch (IgniteCheckedException e) {
U.error(log, "Check prepared transaction future failed: " + e, e);
prepared = false;
}
if (!prepared) {
onDone(false);
markInitialized();
return;
}
} else {
fut.listen(new CI1<IgniteInternalFuture<Boolean>>() {
@Override
public void apply(IgniteInternalFuture<Boolean> fut) {
boolean prepared;
try {
prepared = fut.get();
} catch (IgniteCheckedException e) {
U.error(log, "Check prepared transaction future failed: " + e, e);
prepared = false;
}
if (!prepared) {
onDone(false);
markInitialized();
} else
proceedPrepare();
}
});
return;
}
}
proceedPrepare();
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class DataStreamProcessor method localUpdate.
/**
* @param nodeId Node id.
* @param req Request.
* @param updater Updater.
* @param topic Topic.
*/
private void localUpdate(final UUID nodeId, final DataStreamerRequest req, final StreamReceiver<K, V> updater, final Object topic) {
final boolean allowOverwrite = !(updater instanceof DataStreamerImpl.IsolatedUpdater);
try {
GridCacheAdapter cache = ctx.cache().internalCache(req.cacheName());
if (cache == null) {
throw new IgniteCheckedException("Cache not created or already destroyed: " + req.cacheName());
}
GridCacheContext cctx = cache.context();
DataStreamerUpdateJob job = null;
GridFutureAdapter waitFut = null;
if (!allowOverwrite)
cctx.topology().readLock();
GridDhtTopologyFuture topWaitFut = null;
try {
Exception remapErr = null;
AffinityTopologyVersion streamerFutTopVer = null;
if (!allowOverwrite) {
GridDhtTopologyFuture topFut = cctx.topologyVersionFuture();
AffinityTopologyVersion topVer = topFut.isDone() ? topFut.topologyVersion() : topFut.initialVersion();
if (topVer.compareTo(req.topologyVersion()) > 0) {
remapErr = new ClusterTopologyCheckedException("DataStreamer will retry " + "data transfer at stable topology [reqTop=" + req.topologyVersion() + ", topVer=" + topFut.initialVersion() + ", node=remote]");
} else if (!topFut.isDone())
topWaitFut = topFut;
else
streamerFutTopVer = topFut.topologyVersion();
}
if (remapErr != null) {
sendResponse(nodeId, topic, req.requestId(), remapErr, req.forceLocalDeployment());
return;
} else if (topWaitFut == null) {
job = new DataStreamerUpdateJob(ctx, log, req.cacheName(), req.entries(), req.ignoreDeploymentOwnership(), req.skipStore(), req.keepBinary(), updater);
waitFut = allowOverwrite ? null : cctx.mvcc().addDataStreamerFuture(streamerFutTopVer);
}
} finally {
if (!allowOverwrite)
cctx.topology().readUnlock();
}
if (topWaitFut != null) {
// Need call 'listen' after topology read lock is released.
topWaitFut.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> e) {
localUpdate(nodeId, req, updater, topic);
}
});
return;
}
try {
job.call();
sendResponse(nodeId, topic, req.requestId(), null, req.forceLocalDeployment());
} finally {
if (waitFut != null)
waitFut.onDone();
}
} catch (Throwable e) {
sendResponse(nodeId, topic, req.requestId(), e, req.forceLocalDeployment());
if (e instanceof Error)
throw (Error) e;
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridJobSiblingImpl method cancel.
/**
* {@inheritDoc}
*/
@Override
public void cancel() {
GridTaskSessionImpl ses = ctx.session().getSession(sesId);
Collection<ClusterNode> nodes = ses == null ? ctx.discovery().remoteNodes() : ctx.discovery().nodes(ses.getTopology());
for (ClusterNode node : nodes) {
if (!ctx.localNodeId().equals(node.id())) {
try {
ctx.io().sendToGridTopic(node, TOPIC_JOB_CANCEL, new GridJobCancelRequest(sesId, jobId), SYSTEM_POOL);
} catch (ClusterTopologyCheckedException e) {
IgniteLogger log = ctx.log(GridJobSiblingImpl.class);
if (log.isDebugEnabled())
log.debug("Failed to send cancel request, node left [nodeId=" + node.id() + ", ses=" + ses + ']');
} catch (IgniteCheckedException e) {
// Avoid stack trace for left nodes.
if (ctx.discovery().node(node.id()) != null && ctx.discovery().pingNodeNoError(node.id()))
U.error(ctx.log(GridJobSiblingImpl.class), "Failed to send cancel request to node " + "[nodeId=" + node.id() + ", ses=" + ses + ']', e);
}
}
}
// Cancel local jobs directly.
ctx.job().cancelJob(sesId, jobId, false);
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridServiceProcessor method reassign.
/**
* Reassigns service to nodes.
*
* @param dep Service deployment.
* @param topVer Topology version.
* @throws IgniteCheckedException If failed.
*/
private void reassign(GridServiceDeployment dep, AffinityTopologyVersion topVer) throws IgniteCheckedException {
IgniteInternalCache<Object, Object> cache = serviceCache();
ServiceConfiguration cfg = dep.configuration();
Object nodeFilter = cfg.getNodeFilter();
if (nodeFilter != null)
ctx.resource().injectGeneric(nodeFilter);
int totalCnt = cfg.getTotalCount();
int maxPerNodeCnt = cfg.getMaxPerNodeCount();
String cacheName = cfg.getCacheName();
Object affKey = cfg.getAffinityKey();
while (true) {
GridServiceAssignments assigns = new GridServiceAssignments(cfg, dep.nodeId(), topVer.topologyVersion());
Collection<ClusterNode> nodes;
// Call node filter outside of transaction.
if (affKey == null) {
nodes = ctx.discovery().nodes(topVer);
if (assigns.nodeFilter() != null) {
Collection<ClusterNode> nodes0 = new ArrayList<>();
for (ClusterNode node : nodes) {
if (assigns.nodeFilter().apply(node))
nodes0.add(node);
}
nodes = nodes0;
}
} else
nodes = null;
try (GridNearTxLocal tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) {
GridServiceAssignmentsKey key = new GridServiceAssignmentsKey(cfg.getName());
GridServiceAssignments oldAssigns = (GridServiceAssignments) cache.get(key);
Map<UUID, Integer> cnts = new HashMap<>();
if (affKey != null) {
ClusterNode n = ctx.affinity().mapKeyToNode(cacheName, affKey, topVer);
if (n != null) {
int cnt = maxPerNodeCnt == 0 ? totalCnt == 0 ? 1 : totalCnt : maxPerNodeCnt;
cnts.put(n.id(), cnt);
}
} else {
if (!nodes.isEmpty()) {
int size = nodes.size();
int perNodeCnt = totalCnt != 0 ? totalCnt / size : maxPerNodeCnt;
int remainder = totalCnt != 0 ? totalCnt % size : 0;
if (perNodeCnt >= maxPerNodeCnt && maxPerNodeCnt != 0) {
perNodeCnt = maxPerNodeCnt;
remainder = 0;
}
for (ClusterNode n : nodes) cnts.put(n.id(), perNodeCnt);
assert perNodeCnt >= 0;
assert remainder >= 0;
if (remainder > 0) {
int cnt = perNodeCnt + 1;
if (oldAssigns != null) {
Collection<UUID> used = new HashSet<>();
// Avoid redundant moving of services.
for (Map.Entry<UUID, Integer> e : oldAssigns.assigns().entrySet()) {
// Do not assign services to left nodes.
if (ctx.discovery().node(e.getKey()) == null)
continue;
// If old count and new count match, then reuse the assignment.
if (e.getValue() == cnt) {
cnts.put(e.getKey(), cnt);
used.add(e.getKey());
if (--remainder == 0)
break;
}
}
if (remainder > 0) {
List<Map.Entry<UUID, Integer>> entries = new ArrayList<>(cnts.entrySet());
// Randomize.
Collections.shuffle(entries);
for (Map.Entry<UUID, Integer> e : entries) {
// Assign only the ones that have not been reused from previous assignments.
if (!used.contains(e.getKey())) {
if (e.getValue() < maxPerNodeCnt || maxPerNodeCnt == 0) {
e.setValue(e.getValue() + 1);
if (--remainder == 0)
break;
}
}
}
}
} else {
List<Map.Entry<UUID, Integer>> entries = new ArrayList<>(cnts.entrySet());
// Randomize.
Collections.shuffle(entries);
for (Map.Entry<UUID, Integer> e : entries) {
e.setValue(e.getValue() + 1);
if (--remainder == 0)
break;
}
}
}
}
}
assigns.assigns(cnts);
cache.put(key, assigns);
tx.commit();
break;
} catch (ClusterTopologyCheckedException e) {
if (log.isDebugEnabled())
log.debug("Topology changed while reassigning (will retry): " + e.getMessage());
U.sleep(10);
}
}
}
use of org.apache.ignite.internal.cluster.ClusterTopologyCheckedException in project ignite by apache.
the class GridServiceProxy method invokeMethod.
/**
* Invoek the method.
*
* @param mtd Method.
* @param args Arugments.
* @return Result.
*/
@SuppressWarnings("BusyWait")
public Object invokeMethod(final Method mtd, final Object[] args) {
if (U.isHashCodeMethod(mtd))
return System.identityHashCode(proxy);
else if (U.isEqualsMethod(mtd))
return proxy == args[0];
else if (U.isToStringMethod(mtd))
return GridServiceProxy.class.getSimpleName() + " [name=" + name + ", sticky=" + sticky + ']';
ctx.gateway().readLock();
try {
final long startTime = U.currentTimeMillis();
while (true) {
ClusterNode node = null;
try {
node = nodeForService(name, sticky);
if (node == null)
throw new IgniteException("Failed to find deployed service: " + name);
// If service is deployed locally, then execute locally.
if (node.isLocal()) {
ServiceContextImpl svcCtx = ctx.service().serviceContext(name);
if (svcCtx != null) {
Service svc = svcCtx.service();
if (svc != null)
return mtd.invoke(svc, args);
}
} else {
if (node.version().compareTo(SVC_POOL_SINCE_VER) >= 0)
ctx.task().setThreadContext(TC_IO_POLICY, GridIoPolicy.SERVICE_POOL);
// Execute service remotely.
return ctx.closure().callAsyncNoFailover(GridClosureCallMode.BROADCAST, new ServiceProxyCallable(mtd.getName(), name, mtd.getParameterTypes(), args), Collections.singleton(node), false, waitTimeout, true).get();
}
} catch (GridServiceNotFoundException | ClusterTopologyCheckedException e) {
if (log.isDebugEnabled())
log.debug("Service was not found or topology changed (will retry): " + e.getMessage());
} catch (RuntimeException | Error e) {
throw e;
} catch (IgniteCheckedException e) {
throw U.convertException(e);
} catch (Exception e) {
throw new IgniteException(e);
}
// If we are here, that means that service was not found
// or topology was changed. In this case, we erase the
// previous sticky node and try again.
rmtNode.compareAndSet(node, null);
// Add sleep between retries to avoid busy-wait loops.
try {
Thread.sleep(10);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IgniteException(e);
}
if (waitTimeout > 0 && U.currentTimeMillis() - startTime >= waitTimeout)
throw new IgniteException("Service acquire timeout was reached, stopping. [timeout=" + waitTimeout + "]");
}
} finally {
ctx.gateway().readUnlock();
}
}
Aggregations