use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class GridDhtPartitionTopologyImpl method beforeExchange.
/** {@inheritDoc} */
@Override
public void beforeExchange(GridDhtPartitionsExchangeFuture exchFut, boolean affReady) throws IgniteCheckedException {
DiscoveryEvent discoEvt = exchFut.discoveryEvent();
ClusterState newState = exchFut.newClusterState();
treatAllPartAsLoc = (newState != null && newState == ClusterState.ACTIVE) || (cctx.kernalContext().state().active() && discoEvt.type() == EventType.EVT_NODE_JOINED && discoEvt.eventNode().isLocal() && !cctx.kernalContext().clientNode());
// Wait for rent outside of checkpoint lock.
waitForRent();
ClusterNode loc = cctx.localNode();
cctx.shared().database().checkpointReadLock();
synchronized (cctx.shared().exchange().interruptLock()) {
if (Thread.currentThread().isInterrupted())
throw new IgniteInterruptedCheckedException("Thread is interrupted: " + Thread.currentThread());
try {
U.writeLock(lock);
} catch (IgniteInterruptedCheckedException e) {
cctx.shared().database().checkpointReadUnlock();
throw e;
}
try {
GridDhtPartitionExchangeId exchId = exchFut.exchangeId();
if (stopping)
return;
assert topVer.equals(exchId.topologyVersion()) : "Invalid topology version [topVer=" + topVer + ", exchId=" + exchId + ']';
if (exchId.isLeft())
removeNode(exchId.nodeId());
ClusterNode oldest = discoCache.oldestAliveServerNodeWithCache();
if (log.isDebugEnabled())
log.debug("Partition map beforeExchange [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
long updateSeq = this.updateSeq.incrementAndGet();
cntrMap.clear();
// If this is the oldest node.
if (oldest != null && (loc.equals(oldest) || exchFut.cacheAddedOnExchange(cctx.cacheId(), cctx.receivedFrom()))) {
if (node2part == null) {
node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq);
if (log.isDebugEnabled())
log.debug("Created brand new full topology map on oldest node [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
} else if (!node2part.valid()) {
node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq, node2part, false);
if (log.isDebugEnabled())
log.debug("Created new full topology map on oldest node [exchId=" + exchId + ", fullMap=" + node2part + ']');
} else if (!node2part.nodeId().equals(loc.id())) {
node2part = new GridDhtPartitionFullMap(oldest.id(), oldest.order(), updateSeq, node2part, false);
if (log.isDebugEnabled())
log.debug("Copied old map into new map on oldest node (previous oldest node left) [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
}
}
if (affReady)
initPartitions0(exchFut, updateSeq);
else {
List<List<ClusterNode>> aff = cctx.affinity().idealAssignment();
createPartitions(aff, updateSeq);
}
consistencyCheck();
if (log.isDebugEnabled())
log.debug("Partition map after beforeExchange [exchId=" + exchId + ", fullMap=" + fullMapString() + ']');
} finally {
lock.writeLock().unlock();
cctx.shared().database().checkpointReadUnlock();
}
}
// Wait for evictions.
waitForRent();
}
use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method init.
/**
* Starts activity.
*
* @throws IgniteInterruptedCheckedException If interrupted.
*/
public void init() throws IgniteInterruptedCheckedException {
if (isDone())
return;
initTs = U.currentTimeMillis();
U.await(evtLatch);
assert discoEvt != null : this;
assert exchId.nodeId().equals(discoEvt.eventNode().id()) : this;
assert !dummy && !forcePreload : this;
try {
discoCache.updateAlives(cctx.discovery());
AffinityTopologyVersion topVer = topologyVersion();
srvNodes = new ArrayList<>(discoCache.serverNodes());
remaining.addAll(F.nodeIds(F.view(srvNodes, F.remoteNodes(cctx.localNodeId()))));
crd = srvNodes.isEmpty() ? null : srvNodes.get(0);
boolean crdNode = crd != null && crd.isLocal();
skipPreload = cctx.kernalContext().clientNode();
ExchangeType exchange;
if (discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
DiscoveryCustomMessage msg = ((DiscoveryCustomEvent) discoEvt).customMessage();
if (msg instanceof DynamicCacheChangeBatch) {
assert exchActions != null && !exchActions.empty();
exchange = onCacheChangeRequest(crdNode);
} else if (msg instanceof StartFullSnapshotAckDiscoveryMessage)
exchange = CU.clientNode(discoEvt.eventNode()) ? onClientNodeEvent(crdNode) : onServerNodeEvent(crdNode);
else {
assert affChangeMsg != null : this;
exchange = onAffinityChangeRequest(crdNode);
}
} else {
if (discoEvt.type() == EVT_NODE_JOINED) {
if (!discoEvt.eventNode().isLocal()) {
Collection<DynamicCacheDescriptor> receivedCaches = cctx.cache().startReceivedCaches(discoEvt.eventNode().id(), topVer);
cctx.affinity().initStartedCaches(crdNode, this, receivedCaches);
} else
cctx.cache().startCachesOnLocalJoin(topVer);
}
exchange = CU.clientNode(discoEvt.eventNode()) ? onClientNodeEvent(crdNode) : onServerNodeEvent(crdNode);
}
updateTopologies(crdNode);
if (exchActions != null && exchActions.hasStop())
cctx.cache().context().database().beforeCachesStop();
switch(exchange) {
case ALL:
{
distributedExchange();
break;
}
case CLIENT:
{
initTopologies();
clientOnlyExchange();
break;
}
case NONE:
{
initTopologies();
onDone(topVer);
break;
}
default:
assert false;
}
} catch (IgniteInterruptedCheckedException e) {
onDone(e);
throw e;
} catch (IgniteNeedReconnectException e) {
onDone(e);
} catch (Throwable e) {
if (reconnectOnError(e))
onDone(new IgniteNeedReconnectException(cctx.localNode(), e));
else {
U.error(log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e);
onDone(e);
}
if (e instanceof Error)
throw (Error) e;
}
}
use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method onDone.
/** {@inheritDoc} */
@Override
public boolean onDone(@Nullable AffinityTopologyVersion res, @Nullable Throwable err) {
boolean realExchange = !dummy && !forcePreload;
if (err == null && realExchange && !cctx.kernalContext().clientNode() && (serverNodeDiscoveryEvent() || affChangeMsg != null)) {
for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
if (!cacheCtx.affinityNode() || cacheCtx.isLocal())
continue;
cacheCtx.continuousQueries().flushBackupQueue(exchId.topologyVersion());
}
}
if (err == null && realExchange) {
for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
if (cacheCtx.isLocal())
continue;
try {
if (centralizedAff)
cacheCtx.topology().initPartitions(this);
} catch (IgniteInterruptedCheckedException e) {
U.error(log, "Failed to initialize partitions.", e);
}
GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;
if (drCacheCtx.isDrEnabled()) {
try {
drCacheCtx.dr().onExchange(topologyVersion(), exchId.isLeft());
} catch (IgniteCheckedException e) {
U.error(log, "Failed to notify DR: " + e, e);
}
}
}
if (discoEvt.type() == EVT_NODE_LEFT || discoEvt.type() == EVT_NODE_FAILED || discoEvt.type() == EVT_NODE_JOINED)
detectLostPartitions();
Map<Integer, CacheValidation> m = new HashMap<>(cctx.cacheContexts().size());
for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
Collection<Integer> lostParts = cacheCtx.isLocal() ? Collections.<Integer>emptyList() : cacheCtx.topology().lostPartitions();
boolean valid = true;
if (cacheCtx.config().getTopologyValidator() != null && !CU.isSystemCache(cacheCtx.name()))
valid = cacheCtx.config().getTopologyValidator().validate(discoEvt.topologyNodes());
m.put(cacheCtx.cacheId(), new CacheValidation(valid, lostParts));
}
cacheValidRes = m;
}
cctx.cache().onExchangeDone(exchId.topologyVersion(), exchActions, err, false);
cctx.exchange().onExchangeDone(this, err);
if (exchActions != null && err == null)
exchActions.completeRequestFutures(cctx);
if (exchangeOnChangeGlobalState && err == null)
cctx.kernalContext().state().onExchangeDone();
if (super.onDone(res, err) && realExchange) {
if (log.isDebugEnabled())
log.debug("Completed partition exchange [localNode=" + cctx.localNodeId() + ", exchange= " + this + ", durationFromInit=" + (U.currentTimeMillis() - initTs) + ']');
initFut.onDone(err == null);
if (exchId.isLeft()) {
for (GridCacheContext cacheCtx : cctx.cacheContexts()) cacheCtx.config().getAffinity().removeNode(exchId.nodeId());
}
exchActions = null;
if (discoEvt instanceof DiscoveryCustomEvent)
((DiscoveryCustomEvent) discoEvt).customMessage(null);
cctx.exchange().lastFinishedFuture(this);
return true;
}
return dummy;
}
use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class GridCacheDistributedQueryManager method sendQueryResponse.
/**
* Sends cache query response.
*
* @param nodeId Node to send response.
* @param res Cache query response.
* @param timeout Message timeout.
* @return {@code true} if response was sent, {@code false} otherwise.
*/
private boolean sendQueryResponse(UUID nodeId, GridCacheQueryResponse res, long timeout) {
ClusterNode node = cctx.node(nodeId);
if (node == null)
return false;
int attempt = 1;
IgniteCheckedException err = null;
while (!Thread.currentThread().isInterrupted()) {
try {
if (log.isDebugEnabled())
log.debug("Send query response: " + res);
Object topic = topic(nodeId, res.requestId());
cctx.io().sendOrderedMessage(node, topic, res, GridIoPolicy.QUERY_POOL, timeout > 0 ? timeout : Long.MAX_VALUE);
return true;
} catch (ClusterTopologyCheckedException ignored) {
if (log.isDebugEnabled())
log.debug("Failed to send query response since node left grid [nodeId=" + nodeId + ", res=" + res + "]");
return false;
} catch (IgniteCheckedException e) {
if (err == null)
err = e;
if (Thread.currentThread().isInterrupted())
break;
if (attempt < RESEND_ATTEMPTS) {
if (log.isDebugEnabled())
log.debug("Failed to send queries response (will try again) [nodeId=" + nodeId + ", res=" + res + ", attempt=" + attempt + ", err=" + e + "]");
if (!Thread.currentThread().isInterrupted())
try {
U.sleep(RESEND_FREQ);
} catch (IgniteInterruptedCheckedException e1) {
U.error(log, "Waiting for queries response resending was interrupted (response will not be sent) " + "[nodeId=" + nodeId + ", response=" + res + "]", e1);
return false;
}
} else {
U.error(log, "Failed to sender cache response [nodeId=" + nodeId + ", response=" + res + "]", err);
return false;
}
}
attempt++;
}
return false;
}
use of org.apache.ignite.internal.IgniteInterruptedCheckedException in project ignite by apache.
the class DataStreamerImpl method load0.
/**
* @param entries Entries.
* @param resFut Result future.
* @param activeKeys Active keys.
* @param remaps Remaps count.
*/
private void load0(Collection<? extends DataStreamerEntry> entries, final GridFutureAdapter<Object> resFut, @Nullable final Collection<KeyCacheObjectWrapper> activeKeys, final int remaps) {
try {
assert entries != null;
final boolean remap = remaps > 0;
if (!remap) {
// Failed data should be processed prior to new data.
acquireRemapSemaphore();
}
if (!isWarningPrinted) {
synchronized (this) {
if (!allowOverwrite() && !isWarningPrinted) {
U.warn(log, "Data streamer will not overwrite existing cache entries for better performance " + "(to change, set allowOverwrite to true)");
}
isWarningPrinted = true;
}
}
Map<ClusterNode, Collection<DataStreamerEntry>> mappings = new HashMap<>();
boolean initPda = ctx.deploy().enabled() && jobPda == null;
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache == null)
throw new IgniteCheckedException("Cache not created or already destroyed.");
GridCacheContext cctx = cache.context();
GridCacheGateway gate = null;
if (!allowOverwrite() && !cctx.isLocal()) {
// Cases where cctx required.
gate = cctx.gate();
gate.enter();
}
try {
AffinityTopologyVersion topVer = allowOverwrite() || cctx.isLocal() ? ctx.cache().context().exchange().readyAffinityVersion() : cctx.topology().topologyVersion();
for (DataStreamerEntry entry : entries) {
List<ClusterNode> nodes;
try {
KeyCacheObject key = entry.getKey();
assert key != null;
if (initPda) {
if (cacheObjCtx.addDeploymentInfo())
jobPda = new DataStreamerPda(key.value(cacheObjCtx, false), entry.getValue() != null ? entry.getValue().value(cacheObjCtx, false) : null, rcvr);
else if (rcvr != null)
jobPda = new DataStreamerPda(rcvr);
initPda = false;
}
nodes = nodes(key, topVer, cctx);
} catch (IgniteCheckedException e) {
resFut.onDone(e);
return;
}
if (F.isEmpty(nodes)) {
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
return;
}
for (ClusterNode node : nodes) {
Collection<DataStreamerEntry> col = mappings.get(node);
if (col == null)
mappings.put(node, col = new ArrayList<>());
col.add(entry);
}
}
for (final Map.Entry<ClusterNode, Collection<DataStreamerEntry>> e : mappings.entrySet()) {
final UUID nodeId = e.getKey().id();
Buffer buf = bufMappings.get(nodeId);
if (buf == null) {
Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));
if (old != null)
buf = old;
}
final Collection<DataStreamerEntry> entriesForNode = e.getValue();
IgniteInClosure<IgniteInternalFuture<?>> lsnr = new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
t.get();
if (activeKeys != null) {
for (DataStreamerEntry e : entriesForNode) activeKeys.remove(new KeyCacheObjectWrapper(e.getKey()));
if (activeKeys.isEmpty())
resFut.onDone();
} else {
assert entriesForNode.size() == 1;
// That has been a single key,
// so complete result future right away.
resFut.onDone();
}
} catch (IgniteClientDisconnectedCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with disconnect error [nodeId=" + nodeId + ", err=" + e1 + ']');
resFut.onDone(e1);
} catch (IgniteCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');
if (cancelled) {
resFut.onDone(new IgniteCheckedException("Data streamer has been cancelled: " + DataStreamerImpl.this, e1));
} else if (remaps + 1 > maxRemapCnt) {
resFut.onDone(new IgniteCheckedException("Failed to finish operation (too many remaps): " + remaps, e1));
} else {
try {
remapSem.acquire();
final Runnable r = new Runnable() {
@Override
public void run() {
try {
if (cancelled)
throw new IllegalStateException("DataStreamer closed.");
load0(entriesForNode, resFut, activeKeys, remaps + 1);
} catch (Throwable ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer remapping failed. ", ex));
} finally {
remapSem.release();
}
}
};
dataToRemap.add(r);
if (!remapOwning.get() && remapOwning.compareAndSet(false, true)) {
ctx.closure().callLocalSafe(new GPC<Boolean>() {
@Override
public Boolean call() {
boolean locked = true;
while (locked || !dataToRemap.isEmpty()) {
if (!locked && !remapOwning.compareAndSet(false, true))
return false;
try {
Runnable r = dataToRemap.poll();
if (r != null)
r.run();
} finally {
if (!dataToRemap.isEmpty())
locked = true;
else {
remapOwning.set(false);
locked = false;
}
}
}
return true;
}
}, true);
}
} catch (InterruptedException e2) {
resFut.onDone(e2);
}
}
}
}
};
final GridFutureAdapter<?> f;
try {
f = buf.update(entriesForNode, topVer, lsnr, remap);
} catch (IgniteInterruptedCheckedException e1) {
resFut.onDone(e1);
return;
}
if (ctx.discovery().node(nodeId) == null) {
if (bufMappings.remove(nodeId, buf)) {
final Buffer buf0 = buf;
waitAffinityAndRun(new Runnable() {
@Override
public void run() {
buf0.onNodeLeft();
if (f != null)
f.onDone(new ClusterTopologyCheckedException("Failed to wait for request completion " + "(node has left): " + nodeId));
}
}, ctx.discovery().topologyVersion(), false);
}
}
}
} finally {
if (gate != null)
gate.leave();
}
} catch (Exception ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer data loading failed.", ex));
}
}
Aggregations