use of org.apache.ignite.internal.IgniteInternalFuture in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method lockAllAsync.
/**
* @param cacheCtx Cache context.
* @param nearNode Near node.
* @param req Request.
* @param filter0 Filter.
* @return Future.
*/
public IgniteInternalFuture<GridNearLockResponse> lockAllAsync(final GridCacheContext<?, ?> cacheCtx, final ClusterNode nearNode, final GridNearLockRequest req, @Nullable final CacheEntryPredicate[] filter0) {
final List<KeyCacheObject> keys = req.keys();
CacheEntryPredicate[] filter = filter0;
// Set message into thread context.
GridDhtTxLocal tx = null;
try {
int cnt = keys.size();
if (req.inTx()) {
GridCacheVersion dhtVer = ctx.tm().mappedVersion(req.version());
if (dhtVer != null)
tx = ctx.tm().tx(dhtVer);
}
final List<GridCacheEntryEx> entries = new ArrayList<>(cnt);
// Unmarshal filter first.
if (filter == null)
filter = req.filter();
GridDhtLockFuture fut = null;
if (!req.inTx()) {
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert CU.clientNode(nearNode);
top = topology();
top.readLock();
if (!top.topologyVersionFuture().isDone()) {
top.readUnlock();
return null;
}
}
try {
if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion())) {
if (log.isDebugEnabled()) {
log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
GridNearLockResponse res = sendClientLockRemapResponse(nearNode, req, top.lastTopologyChangeVersion());
return new GridFinishedFuture<>(res);
}
fut = new GridDhtLockFuture(ctx, nearNode.id(), req.version(), req.topologyVersion(), cnt, req.txRead(), req.needReturnValue(), req.timeout(), tx, req.threadId(), req.createTtl(), req.accessTtl(), filter, req.skipStore(), req.keepBinary());
// Add before mapping.
if (!ctx.mvcc().addFuture(fut))
throw new IllegalStateException("Duplicate future ID: " + fut);
} finally {
if (top != null)
top.readUnlock();
}
}
boolean timedout = false;
for (KeyCacheObject key : keys) {
if (timedout)
break;
while (true) {
// Specify topology version to make sure containment is checked
// based on the requested version, not the latest.
GridDhtCacheEntry entry = entryExx(key, req.topologyVersion());
try {
if (fut != null) {
// This method will add local candidate.
// Entry cannot become obsolete after this method succeeded.
fut.addEntry(key == null ? null : entry);
if (fut.isDone()) {
timedout = true;
break;
}
}
entries.add(entry);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when adding lock (will retry): " + entry);
} catch (GridDistributedLockCancelledException e) {
if (log.isDebugEnabled())
log.debug("Got lock request for cancelled lock (will ignore): " + entry);
fut.onError(e);
return new GridDhtFinishedFuture<>(e);
}
}
}
// Handle implicit locks for pessimistic transactions.
if (req.inTx()) {
if (tx == null) {
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert CU.clientNode(nearNode);
top = topology();
top.readLock();
if (!top.topologyVersionFuture().isDone()) {
top.readUnlock();
return null;
}
}
try {
if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion())) {
if (log.isDebugEnabled()) {
log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
GridNearLockResponse res = sendClientLockRemapResponse(nearNode, req, top.lastTopologyChangeVersion());
return new GridFinishedFuture<>(res);
}
tx = new GridDhtTxLocal(ctx.shared(), req.topologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), /*implicitTx*/
false, /*implicitSingleTx*/
false, ctx.systemTx(), false, ctx.ioPolicy(), PESSIMISTIC, req.isolation(), req.timeout(), req.isInvalidate(), !req.skipStore(), false, req.txSize(), null, req.subjectId(), req.taskNameHash());
if (req.syncCommit())
tx.syncMode(FULL_SYNC);
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !tx.init()) {
String msg = "Failed to acquire lock (transaction has been completed): " + req.version();
U.warn(log, msg);
if (tx != null)
tx.rollbackDhtLocal();
return new GridDhtFinishedFuture<>(new IgniteCheckedException(msg));
}
tx.topologyVersion(req.topologyVersion());
} finally {
if (top != null)
top.readUnlock();
}
}
ctx.tm().txContext(tx);
if (log.isDebugEnabled())
log.debug("Performing DHT lock [tx=" + tx + ", entries=" + entries + ']');
IgniteInternalFuture<GridCacheReturn> txFut = tx.lockAllAsync(cacheCtx, entries, req.messageId(), req.txRead(), req.needReturnValue(), req.createTtl(), req.accessTtl(), req.skipStore(), req.keepBinary(), req.nearCache());
final GridDhtTxLocal t = tx;
return new GridDhtEmbeddedFuture(txFut, new C2<GridCacheReturn, Exception, IgniteInternalFuture<GridNearLockResponse>>() {
@Override
public IgniteInternalFuture<GridNearLockResponse> apply(GridCacheReturn o, Exception e) {
if (e != null)
e = U.unwrap(e);
assert !t.empty();
// Create response while holding locks.
final GridNearLockResponse resp = createLockReply(nearNode, entries, req, t, t.xidVersion(), e);
assert !t.implicit() : t;
assert !t.onePhaseCommit() : t;
sendLockReply(nearNode, t, req, resp);
return new GridFinishedFuture<>(resp);
}
});
} else {
assert fut != null;
// This will send remote messages.
fut.map();
final GridCacheVersion mappedVer = fut.version();
return new GridDhtEmbeddedFuture<>(new C2<Boolean, Exception, GridNearLockResponse>() {
@Override
public GridNearLockResponse apply(Boolean b, Exception e) {
if (e != null)
e = U.unwrap(e);
else if (!b)
e = new GridCacheLockTimeoutException(req.version());
GridNearLockResponse res = createLockReply(nearNode, entries, req, null, mappedVer, e);
sendLockReply(nearNode, null, req, res);
return res;
}
}, fut);
}
} catch (IgniteCheckedException | RuntimeException e) {
String err = "Failed to unmarshal at least one of the keys for lock request message: " + req;
U.error(log, err, e);
if (tx != null) {
try {
tx.rollbackDhtLocal();
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to rollback the transaction: " + tx, ex);
}
}
return new GridDhtFinishedFuture<>(new IgniteCheckedException(err, e));
}
}
use of org.apache.ignite.internal.IgniteInternalFuture in project ignite by apache.
the class GridDhtTxFinishFuture method addDiagnosticRequest.
/**
* {@inheritDoc}
*/
@SuppressWarnings("unchecked")
@Override
public void addDiagnosticRequest(IgniteDiagnosticPrepareContext ctx) {
if (!isDone()) {
for (IgniteInternalFuture fut : futures()) {
if (!fut.isDone()) {
MiniFuture f = (MiniFuture) fut;
if (!f.node().isLocal()) {
GridCacheVersion dhtVer = tx.xidVersion();
GridCacheVersion nearVer = tx.nearXidVersion();
ctx.remoteTxInfo(f.node().id(), dhtVer, nearVer, "GridDhtTxFinishFuture " + "waiting for response [node=" + f.node().id() + ", topVer=" + tx.topologyVersion() + ", dhtVer=" + dhtVer + ", nearVer=" + nearVer + ", futId=" + futId + ", miniId=" + f.futId + ", tx=" + tx + ']');
return;
}
}
}
}
}
use of org.apache.ignite.internal.IgniteInternalFuture in project ignite by apache.
the class GridDhtGetSingleFuture method getAsync.
/**
*/
@SuppressWarnings({ "unchecked", "IfMayBeConditional" })
private void getAsync() {
assert part != -1;
String taskName0 = cctx.kernalContext().job().currentTaskName();
if (taskName0 == null)
taskName0 = cctx.kernalContext().task().resolveTaskName(taskNameHash);
final String taskName = taskName0;
IgniteInternalFuture<Boolean> rdrFut = null;
ReaderArguments readerArgs = null;
if (addRdr && !skipVals && !cctx.localNodeId().equals(reader)) {
while (true) {
GridDhtCacheEntry e = cache().entryExx(key, topVer);
try {
if (e.obsolete())
continue;
boolean addReader = !e.deleted();
if (addReader) {
e.unswap(false);
// we have to add reader again later.
if (readerArgs == null)
readerArgs = new ReaderArguments(reader, msgId, topVer);
}
// Register reader. If there are active transactions for this entry,
// then will wait for their completion before proceeding.
// TODO: IGNITE-3498:
// TODO: What if any transaction we wait for actually removes this entry?
// TODO: In this case seems like we will be stuck with untracked near entry.
// TODO: To fix, check that reader is contained in the list of readers once
// TODO: again after the returned future completes - if not, try again.
rdrFut = addReader ? e.addReader(reader, msgId, topVer) : null;
break;
} catch (IgniteCheckedException err) {
onDone(err);
return;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when getting a DHT value: " + e);
} finally {
cctx.evicts().touch(e, topVer);
}
}
}
IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>> fut;
if (rdrFut == null || rdrFut.isDone()) {
fut = cache().getDhtAllAsync(Collections.singleton(key), readerArgs, readThrough, subjId, taskName, expiryPlc, skipVals, recovery);
} else {
final ReaderArguments args = readerArgs;
rdrFut.listen(new IgniteInClosure<IgniteInternalFuture<Boolean>>() {
@Override
public void apply(IgniteInternalFuture<Boolean> fut) {
Throwable e = fut.error();
if (e != null) {
onDone(e);
return;
}
IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>> fut0 = cache().getDhtAllAsync(Collections.singleton(key), args, readThrough, subjId, taskName, expiryPlc, skipVals, recovery);
fut0.listen(createGetFutureListener());
}
});
return;
}
if (fut.isDone())
onResult(fut);
else
fut.listen(createGetFutureListener());
}
use of org.apache.ignite.internal.IgniteInternalFuture in project ignite by apache.
the class DataStreamProcessor method processRequest.
/**
* @param nodeId Sender ID.
* @param req Request.
*/
private void processRequest(final UUID nodeId, final DataStreamerRequest req) {
if (!busyLock.enterBusy()) {
if (log.isDebugEnabled())
log.debug("Ignoring data load request (node is stopping): " + req);
return;
}
try {
if (log.isDebugEnabled())
log.debug("Processing data load request: " + req);
AffinityTopologyVersion locAffVer = ctx.cache().context().exchange().readyAffinityVersion();
AffinityTopologyVersion rmtAffVer = req.topologyVersion();
if (locAffVer.compareTo(rmtAffVer) < 0) {
if (log.isDebugEnabled())
log.debug("Received request has higher affinity topology version [request=" + req + ", locTopVer=" + locAffVer + ", rmtTopVer=" + rmtAffVer + ']');
IgniteInternalFuture<?> fut = ctx.cache().context().exchange().affinityReadyFuture(rmtAffVer);
if (fut != null && !fut.isDone()) {
final byte plc = threadIoPolicy();
fut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
ctx.closure().runLocalSafe(new Runnable() {
@Override
public void run() {
processRequest(nodeId, req);
}
}, plc);
}
});
return;
}
}
Object topic;
try {
topic = U.unmarshal(marsh, req.responseTopicBytes(), U.resolveClassLoader(null, ctx.config()));
} catch (IgniteCheckedException e) {
U.error(log, "Failed to unmarshal topic from request: " + req, e);
return;
}
ClassLoader clsLdr;
if (req.forceLocalDeployment())
clsLdr = U.gridClassLoader();
else {
GridDeployment dep = ctx.deploy().getGlobalDeployment(req.deploymentMode(), req.sampleClassName(), req.sampleClassName(), req.userVersion(), nodeId, req.classLoaderId(), req.participants(), null);
if (dep == null) {
sendResponse(nodeId, topic, req.requestId(), new IgniteCheckedException("Failed to get deployment for request [sndId=" + nodeId + ", req=" + req + ']'), false);
return;
}
clsLdr = dep.classLoader();
}
StreamReceiver<K, V> updater;
try {
updater = U.unmarshal(marsh, req.updaterBytes(), U.resolveClassLoader(clsLdr, ctx.config()));
if (updater != null)
ctx.resource().injectGeneric(updater);
} catch (IgniteCheckedException e) {
U.error(log, "Failed to unmarshal message [nodeId=" + nodeId + ", req=" + req + ']', e);
sendResponse(nodeId, topic, req.requestId(), e, false);
return;
}
localUpdate(nodeId, req, updater, topic);
} finally {
busyLock.leaveBusy();
}
}
use of org.apache.ignite.internal.IgniteInternalFuture in project ignite by apache.
the class DataStreamerImpl method load0.
/**
* @param entries Entries.
* @param resFut Result future.
* @param activeKeys Active keys.
* @param remaps Remaps count.
*/
private void load0(Collection<? extends DataStreamerEntry> entries, final GridFutureAdapter<Object> resFut, @Nullable final Collection<KeyCacheObjectWrapper> activeKeys, final int remaps) {
try {
assert entries != null;
final boolean remap = remaps > 0;
if (!remap) {
// Failed data should be processed prior to new data.
acquireRemapSemaphore();
}
if (!isWarningPrinted) {
synchronized (this) {
if (!allowOverwrite() && !isWarningPrinted) {
U.warn(log, "Data streamer will not overwrite existing cache entries for better performance " + "(to change, set allowOverwrite to true)");
}
isWarningPrinted = true;
}
}
Map<ClusterNode, Collection<DataStreamerEntry>> mappings = new HashMap<>();
boolean initPda = ctx.deploy().enabled() && jobPda == null;
GridCacheAdapter cache = ctx.cache().internalCache(cacheName);
if (cache == null)
throw new IgniteCheckedException("Cache not created or already destroyed.");
GridCacheContext cctx = cache.context();
GridCacheGateway gate = null;
AffinityTopologyVersion topVer;
if (!cctx.isLocal())
topVer = ctx.cache().context().exchange().lastTopologyFuture().get();
else
topVer = ctx.cache().context().exchange().readyAffinityVersion();
List<List<ClusterNode>> assignments = cctx.affinity().assignments(topVer);
if (!allowOverwrite() && !cctx.isLocal()) {
// Cases where cctx required.
gate = cctx.gate();
gate.enter();
}
try {
for (DataStreamerEntry entry : entries) {
List<ClusterNode> nodes;
try {
KeyCacheObject key = entry.getKey();
assert key != null;
if (initPda) {
if (cacheObjCtx.addDeploymentInfo())
jobPda = new DataStreamerPda(key.value(cacheObjCtx, false), entry.getValue() != null ? entry.getValue().value(cacheObjCtx, false) : null, rcvr);
else if (rcvr != null)
jobPda = new DataStreamerPda(rcvr);
initPda = false;
}
if (key.partition() == -1)
key.partition(cctx.affinity().partition(key, false));
nodes = nodes(key, topVer, cctx);
} catch (IgniteCheckedException e) {
resFut.onDone(e);
return;
}
if (F.isEmpty(nodes)) {
resFut.onDone(new ClusterTopologyException("Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']'));
return;
}
for (ClusterNode node : nodes) {
Collection<DataStreamerEntry> col = mappings.get(node);
if (col == null)
mappings.put(node, col = new ArrayList<>());
col.add(entry);
}
}
for (final Map.Entry<ClusterNode, Collection<DataStreamerEntry>> e : mappings.entrySet()) {
final UUID nodeId = e.getKey().id();
Buffer buf = bufMappings.get(nodeId);
if (buf == null) {
Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));
if (old != null)
buf = old;
}
final Collection<DataStreamerEntry> entriesForNode = e.getValue();
IgniteInClosure<IgniteInternalFuture<?>> lsnr = new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> t) {
try {
t.get();
if (activeKeys != null) {
for (DataStreamerEntry e : entriesForNode) activeKeys.remove(new KeyCacheObjectWrapper(e.getKey()));
if (activeKeys.isEmpty())
resFut.onDone();
} else {
assert entriesForNode.size() == 1;
// That has been a single key,
// so complete result future right away.
resFut.onDone();
}
} catch (IgniteClientDisconnectedCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with disconnect error [nodeId=" + nodeId + ", err=" + e1 + ']');
resFut.onDone(e1);
} catch (IgniteCheckedException e1) {
if (log.isDebugEnabled())
log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');
if (cancelled) {
resFut.onDone(new IgniteCheckedException("Data streamer has been cancelled: " + DataStreamerImpl.this, e1));
} else if (remaps + 1 > maxRemapCnt) {
resFut.onDone(new IgniteCheckedException("Failed to finish operation (too many remaps): " + remaps, e1));
} else {
try {
remapSem.acquire();
final Runnable r = new Runnable() {
@Override
public void run() {
try {
if (cancelled)
closedException();
load0(entriesForNode, resFut, activeKeys, remaps + 1);
} catch (Throwable ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer remapping failed. ", ex));
} finally {
remapSem.release();
}
}
};
dataToRemap.add(r);
if (!remapOwning.get() && remapOwning.compareAndSet(false, true)) {
ctx.closure().callLocalSafe(new GPC<Boolean>() {
@Override
public Boolean call() {
boolean locked = true;
while (locked || !dataToRemap.isEmpty()) {
if (!locked && !remapOwning.compareAndSet(false, true))
return false;
try {
Runnable r = dataToRemap.poll();
if (r != null)
r.run();
} finally {
if (!dataToRemap.isEmpty())
locked = true;
else {
remapOwning.set(false);
locked = false;
}
}
}
return true;
}
}, true);
}
} catch (InterruptedException e2) {
resFut.onDone(e2);
}
}
}
}
};
GridCompoundFuture opFut = new SilentCompoundFuture();
opFut.listen(lsnr);
final List<GridFutureAdapter<?>> futs;
try {
futs = buf.update(entriesForNode, topVer, assignments, opFut, remap);
opFut.markInitialized();
} catch (IgniteInterruptedCheckedException e1) {
resFut.onDone(e1);
return;
}
if (ctx.discovery().node(nodeId) == null) {
if (bufMappings.remove(nodeId, buf)) {
final Buffer buf0 = buf;
waitAffinityAndRun(new Runnable() {
@Override
public void run() {
buf0.onNodeLeft();
if (futs != null) {
Throwable ex = new ClusterTopologyCheckedException("Failed to wait for request completion (node has left): " + nodeId);
for (int i = 0; i < futs.size(); i++) futs.get(i).onDone(ex);
}
}
}, ctx.discovery().topologyVersion(), false);
}
}
}
} finally {
if (gate != null)
gate.leave();
}
} catch (Exception ex) {
resFut.onDone(new IgniteCheckedException("DataStreamer data loading failed.", ex));
}
}
Aggregations