use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class CacheAffinitySharedManager method initCoordinatorCaches.
/**
* @param fut Exchange future.
* @param newAff {@code True} if there are no older nodes with affinity info available.
* @return Future completed when caches initialization is done.
* @throws IgniteCheckedException If failed.
*/
public IgniteInternalFuture<?> initCoordinatorCaches(final GridDhtPartitionsExchangeFuture fut, final boolean newAff) throws IgniteCheckedException {
boolean locJoin = fut.firstEvent().eventNode().isLocal();
if (!locJoin)
return null;
final List<IgniteInternalFuture<AffinityTopologyVersion>> futs = Collections.synchronizedList(new ArrayList<>());
final AffinityTopologyVersion topVer = fut.initialVersion();
forAllRegisteredCacheGroups(new IgniteInClosureX<CacheGroupDescriptor>() {
@Override
public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException {
CacheGroupHolder grpHolder = getOrCreateGroupHolder(topVer, desc);
if (grpHolder.affinity().idealAssignmentRaw() != null)
return;
// Need initialize holders and affinity if this node became coordinator during this exchange.
int grpId = desc.groupId();
CacheGroupContext grp = cctx.cache().cacheGroup(grpId);
if (grp == null) {
grpHolder = CacheGroupNoAffOrFilteredHolder.create(cctx, desc, topVer, null);
final GridAffinityAssignmentCache aff = grpHolder.affinity();
if (newAff) {
if (!aff.lastVersion().equals(topVer))
calculateAndInit(fut.events(), aff, topVer);
grpHolder.topology(fut.context().events().discoveryCache()).beforeExchange(fut, true, false);
} else {
List<GridDhtPartitionsExchangeFuture> exchFuts = cctx.exchange().exchangeFutures();
int idx = exchFuts.indexOf(fut);
assert idx >= 0 && idx < exchFuts.size() - 1 : "Invalid exchange futures state [cur=" + idx + ", total=" + exchFuts.size() + ']';
GridDhtPartitionsExchangeFuture futureToFetchAffinity = null;
for (int i = idx + 1; i < exchFuts.size(); i++) {
GridDhtPartitionsExchangeFuture prev = exchFuts.get(i);
assert prev.isDone() && prev.topologyVersion().compareTo(topVer) < 0;
if (prev.isMerged())
continue;
futureToFetchAffinity = prev;
break;
}
if (futureToFetchAffinity == null)
throw new IgniteCheckedException("Failed to find completed exchange future to fetch affinity.");
if (log.isDebugEnabled()) {
log.debug("Need initialize affinity on coordinator [" + "cacheGrp=" + desc.cacheOrGroupName() + "prevAff=" + futureToFetchAffinity.topologyVersion() + ']');
}
GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture(cctx, desc.groupId(), futureToFetchAffinity.topologyVersion(), futureToFetchAffinity.events().discoveryCache());
fetchFut.init(false);
final GridFutureAdapter<AffinityTopologyVersion> affFut = new GridFutureAdapter<>();
final GridDhtPartitionsExchangeFuture futureToFetchAffinity0 = futureToFetchAffinity;
fetchFut.listen(new IgniteInClosureX<IgniteInternalFuture<GridDhtAffinityAssignmentResponse>>() {
@Override
public void applyx(IgniteInternalFuture<GridDhtAffinityAssignmentResponse> fetchFut) throws IgniteCheckedException {
fetchAffinity(futureToFetchAffinity0.topologyVersion(), futureToFetchAffinity0.events(), futureToFetchAffinity0.events().discoveryCache(), aff, (GridDhtAssignmentFetchFuture) fetchFut);
aff.calculate(topVer, fut.events(), fut.events().discoveryCache());
affFut.onDone(topVer);
cctx.exchange().exchangerUpdateHeartbeat();
}
});
futs.add(affFut);
}
} else {
grpHolder = new CacheGroupAffNodeHolder(grp);
if (newAff) {
GridAffinityAssignmentCache aff = grpHolder.affinity();
if (!aff.lastVersion().equals(topVer))
calculateAndInit(fut.events(), aff, topVer);
grpHolder.topology(fut.context().events().discoveryCache()).beforeExchange(fut, true, false);
}
}
grpHolders.put(grpHolder.groupId(), grpHolder);
cctx.exchange().exchangerUpdateHeartbeat();
fut.timeBag().finishLocalStage("Coordinator affinity cache init " + "[grp=" + desc.cacheOrGroupName() + "]");
}
});
if (!futs.isEmpty()) {
GridCompoundFuture<AffinityTopologyVersion, ?> affFut = new GridCompoundFuture<>();
for (IgniteInternalFuture<AffinityTopologyVersion> f : futs) affFut.add(f);
affFut.markInitialized();
return affFut;
}
return null;
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class GridDhtPartitionDemander method forceRebalance.
/**
* @return Rebalance future.
*/
IgniteInternalFuture<Boolean> forceRebalance() {
GridTimeoutObject obj = lastTimeoutObj.getAndSet(null);
if (obj != null)
ctx.time().removeTimeoutObject(obj);
final GridDhtPartitionsExchangeFuture exchFut = lastExchangeFut;
if (exchFut != null) {
if (log.isDebugEnabled())
log.debug("Forcing rebalance event for future: " + exchFut);
final GridFutureAdapter<Boolean> fut = new GridFutureAdapter<>();
exchFut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> t) {
if (t.error() == null) {
IgniteInternalFuture<Boolean> fut0 = ctx.exchange().forceRebalance(exchFut.exchangeId());
fut0.listen(new IgniteInClosure<IgniteInternalFuture<Boolean>>() {
@Override
public void apply(IgniteInternalFuture<Boolean> fut1) {
try {
fut.onDone(fut1.get());
} catch (Exception e) {
fut.onDone(e);
}
}
});
} else
fut.onDone(t.error());
}
});
return fut;
} else if (log.isDebugEnabled())
log.debug("Ignoring force rebalance request (no topology event happened yet).");
return new GridFinishedFuture<>(true);
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class DataStreamProcessor method localUpdate.
/**
* @param nodeId Node id.
* @param req Request.
* @param updater Updater.
* @param topic Topic.
*/
private void localUpdate(final UUID nodeId, final DataStreamerRequest req, final StreamReceiver<K, V> updater, final Object topic) {
final boolean allowOverwrite = !(updater instanceof DataStreamerImpl.IsolatedUpdater);
try {
GridCacheAdapter cache = ctx.cache().internalCache(req.cacheName());
if (cache == null) {
throw new IgniteCheckedException("Cache not created or already destroyed: " + req.cacheName());
}
GridCacheContext cctx = cache.context();
DataStreamerUpdateJob job = null;
GridFutureAdapter waitFut = null;
if (!allowOverwrite)
cctx.topology().readLock();
GridDhtTopologyFuture topWaitFut = null;
try {
Exception remapErr = null;
AffinityTopologyVersion streamerFutTopVer = null;
if (!allowOverwrite) {
GridDhtTopologyFuture topFut = cctx.topologyVersionFuture();
AffinityTopologyVersion topVer = topFut.isDone() ? topFut.topologyVersion() : topFut.initialVersion();
if (topVer.compareTo(req.topologyVersion()) > 0) {
remapErr = new ClusterTopologyCheckedException("DataStreamer will retry " + "data transfer at stable topology [reqTop=" + req.topologyVersion() + ", topVer=" + topFut.initialVersion() + ", node=remote]");
} else if (!topFut.isDone())
topWaitFut = topFut;
else
streamerFutTopVer = topFut.topologyVersion();
}
if (remapErr != null) {
sendResponse(nodeId, topic, req.requestId(), remapErr, req.forceLocalDeployment());
return;
} else if (topWaitFut == null) {
job = new DataStreamerUpdateJob(ctx, log, req.cacheName(), req.entries(), req.ignoreDeploymentOwnership(), req.skipStore(), req.keepBinary(), updater);
waitFut = allowOverwrite ? null : cctx.mvcc().addDataStreamerFuture(streamerFutTopVer);
}
} finally {
if (!allowOverwrite)
cctx.topology().readUnlock();
}
if (topWaitFut != null) {
// Need call 'listen' after topology read lock is released.
topWaitFut.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
@Override
public void apply(IgniteInternalFuture<AffinityTopologyVersion> e) {
localUpdate(nodeId, req, updater, topic);
}
});
return;
}
try {
job.call();
sendResponse(nodeId, topic, req.requestId(), null, req.forceLocalDeployment());
} finally {
if (waitFut != null)
waitFut.onDone();
}
} catch (Throwable e) {
sendResponse(nodeId, topic, req.requestId(), e, req.forceLocalDeployment());
if (e instanceof Error)
throw (Error) e;
}
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class DataStreamerImpl method load0.
/**
* @param entries Entries.
* @param resFut Result future.
* @param activeKeys Active keys.
* @param remaps Remaps count.
* @param remapNode Node for remap. In case update with {@code allowOverride() == false} fails on one node,
* we don't need to send update request to all affinity nodes again, if topology version does not changed.
* @param remapTopVer Topology version.
*/
private void load0(Collection<? extends DataStreamerEntry> entries, final GridFutureAdapter<Object> resFut, @Nullable final Collection<KeyCacheObjectWrapper> activeKeys, final int remaps, ClusterNode remapNode, AffinityTopologyVersion remapTopVer) {
try {
assert entries != null;
final boolean remap = remaps > 0;
if (!remap) {
// Failed data should be processed prior to new data.
acquireRemapSemaphore();
}
if (!isWarningPrinted) {
synchronized (this) {
if (!allowOverwrite() && !isWarningPrinted) {
U.warn(log, "Data streamer will not overwrite existing cache entries for better performance " + "(to change, set allowOverwrite to true)");
}
isWarningPrinted = true;
}
}
Map<ClusterNode, Collection<DataStreamerEntry>> mappings = new HashMap<>();
boolean initPda = ctx.deploy().enabled() && jobPda == null;
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache == null)
throw new IgniteCheckedException("Cache not created or already destroyed.");
GridCacheContext cctx = cache.context();
GridCacheGateway gate = null;
AffinityTopologyVersion topVer;
if (!cctx.isLocal()) {
GridDhtPartitionsExchangeFuture exchFut = ctx.cache().context().exchange().lastTopologyFuture();
if (!exchFut.isDone()) {
ExchangeActions acts = exchFut.exchangeActions();
if (acts != null && acts.cacheStopped(CU.cacheId(cacheName)))
throw new CacheStoppedException(cacheName);
}
// It is safe to block here even if the cache gate is acquired.
topVer = exchFut.get();
} else
topVer = ctx.cache().context().exchange().readyAffinityVersion();
List<List<ClusterNode>> assignments = cctx.affinity().assignments(topVer);
if (!allowOverwrite() && !cctx.isLocal()) {
// Cases where cctx required.
gate = cctx.gate();
gate.enter();
}
try {
for (DataStreamerEntry entry : entries) {
List<ClusterNode> nodes;
try {
KeyCacheObject key = entry.getKey();
assert key != null;
if (initPda) {
if (cacheObjCtx.addDeploymentInfo())
jobPda = new DataStreamerPda(key.value(cacheObjCtx, false), entry.getValue() != null ? entry.getValue().value(cacheObjCtx, false) : null, rcvr);
else if (rcvr != null)
jobPda = new DataStreamerPda(rcvr);
initPda = false;
}
if (key.partition() == -1)
key.partition(cctx.affinity().partition(key, false));
if (!allowOverwrite() && remapNode != null && F.eq(topVer, remapTopVer))
nodes = Collections.singletonList(remapNode);
else
nodes = nodes(key, topVer, cctx);
} catch (IgniteCheckedException e) {
resFut.onDone(e);
return;
}
if (F.isEmpty(nodes)) {
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
return;
}
for (ClusterNode node : nodes) {
Collection<DataStreamerEntry> col = mappings.get(node);
if (col == null)
mappings.put(node, col = new ArrayList<>());
col.add(entry);
}
}
for (final Map.Entry<ClusterNode, Collection<DataStreamerEntry>> e : mappings.entrySet()) {
final ClusterNode node = e.getKey();
final UUID nodeId = e.getKey().id();
Buffer buf = bufMappings.get(nodeId);
if (buf == null) {
Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));
if (old != null)
buf = old;
}
final Collection<DataStreamerEntry> entriesForNode = e.getValue();
IgniteInClosure<IgniteInternalFuture<?>> lsnr = new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
t.get();
if (activeKeys != null) {
for (DataStreamerEntry e : entriesForNode) activeKeys.remove(new KeyCacheObjectWrapper(e.getKey()));
if (activeKeys.isEmpty())
resFut.onDone();
} else {
assert entriesForNode.size() == 1;
// That has been a single key,
// so complete result future right away.
resFut.onDone();
}
} catch (IgniteClientDisconnectedCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with disconnect error [nodeId=" + nodeId + ", err=" + e1 + ']');
resFut.onDone(e1);
} catch (IgniteCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');
if (cancelled) {
resFut.onDone(new IgniteCheckedException("Data streamer has been cancelled: " + DataStreamerImpl.this, e1));
} else if (remaps + 1 > maxRemapCnt) {
resFut.onDone(new IgniteCheckedException("Failed to finish operation (too many remaps): " + remaps, e1));
} else if (X.hasCause(e1, IgniteClusterReadOnlyException.class)) {
resFut.onDone(new IgniteClusterReadOnlyException("Failed to finish operation. Cluster in read-only mode!", e1));
} else {
try {
remapSem.acquire();
final Runnable r = new Runnable() {
@Override
public void run() {
try {
if (cancelled)
closedException();
load0(entriesForNode, resFut, activeKeys, remaps + 1, node, topVer);
} catch (Throwable ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer remapping failed. ", ex));
} finally {
remapSem.release();
}
}
};
dataToRemap.add(r);
if (!remapOwning.get() && remapOwning.compareAndSet(false, true)) {
ctx.closure().callLocalSafe(new GPC<Boolean>() {
@Override
public Boolean call() {
boolean locked = true;
while (locked || !dataToRemap.isEmpty()) {
if (!locked && !remapOwning.compareAndSet(false, true))
return false;
try {
Runnable r = dataToRemap.poll();
if (r != null)
r.run();
} finally {
if (!dataToRemap.isEmpty())
locked = true;
else {
remapOwning.set(false);
locked = false;
}
}
}
return true;
}
}, true);
}
} catch (InterruptedException e2) {
resFut.onDone(e2);
}
}
}
}
};
GridCompoundFuture opFut = new SilentCompoundFuture();
opFut.listen(lsnr);
final List<GridFutureAdapter<?>> futs;
try {
futs = buf.update(entriesForNode, topVer, assignments, opFut, remap);
opFut.markInitialized();
} catch (IgniteInterruptedCheckedException e1) {
resFut.onDone(e1);
return;
}
if (ctx.discovery().node(nodeId) == null) {
if (bufMappings.remove(nodeId, buf)) {
final Buffer buf0 = buf;
waitAffinityAndRun(new GridPlainRunnable() {
@Override
public void run() {
buf0.onNodeLeft();
if (futs != null) {
Throwable ex = new ClusterTopologyCheckedException("Failed to wait for request completion (node has left): " + nodeId);
for (int i = 0; i < futs.size(); i++) futs.get(i).onDone(ex);
}
}
}, ctx.discovery().topologyVersion(), false);
}
}
}
} finally {
if (gate != null)
gate.leave();
}
} catch (Exception ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer data loading failed.", ex));
}
}
use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.
the class ClientImpl method pingNode.
/**
* {@inheritDoc}
*/
@Override
public boolean pingNode(@NotNull final UUID nodeId) {
if (nodeId.equals(getLocalNodeId()))
return true;
TcpDiscoveryNode node = rmtNodes.get(nodeId);
if (node == null || !node.visible())
return false;
GridFutureAdapter<Boolean> fut = pingFuts.get(nodeId);
if (fut == null) {
fut = new GridFutureAdapter<>();
GridFutureAdapter<Boolean> oldFut = pingFuts.putIfAbsent(nodeId, fut);
if (oldFut != null)
fut = oldFut;
else {
State state = this.state;
if (spi.getSpiContext().isStopping() || state == STOPPED || state == SEGMENTED) {
if (pingFuts.remove(nodeId, fut))
fut.onDone(false);
return false;
} else if (state == DISCONNECTED) {
if (pingFuts.remove(nodeId, fut))
fut.onDone(new IgniteClientDisconnectedCheckedException(null, "Failed to ping node, client node disconnected."));
} else {
final GridFutureAdapter<Boolean> finalFut = fut;
executorService.schedule(() -> {
if (pingFuts.remove(nodeId, finalFut)) {
if (ClientImpl.this.state == DISCONNECTED)
finalFut.onDone(new IgniteClientDisconnectedCheckedException(null, "Failed to ping node, client node disconnected."));
else
finalFut.onDone(false);
}
}, spi.netTimeout, MILLISECONDS);
sockWriter.sendMessage(new TcpDiscoveryClientPingRequest(getLocalNodeId(), nodeId));
}
}
}
try {
return fut.get();
} catch (IgniteInterruptedCheckedException ignored) {
return false;
} catch (IgniteCheckedException e) {
throw new IgniteSpiException(e);
}
}
Aggregations