use of org.apache.ignite.internal.processors.affinity.AffinityAssignment in project ignite by apache.
the class GridDhtPartitionDemander method handleSupplyMessage.
/**
* Handles supply message from {@code nodeId} with specified {@code topicId}.
*
* Supply message contains entries to populate rebalancing partitions.
*
* There is a cyclic process:
* Populate rebalancing partitions with entries from Supply message.
* If not all partitions specified in {@link #rebalanceFut} were rebalanced or marked as missed
* send new Demand message to request next batch of entries.
*
* @param nodeId Node id.
* @param supplyMsg Supply message.
*/
public void handleSupplyMessage(final UUID nodeId, final GridDhtPartitionSupplyMessage supplyMsg) {
AffinityTopologyVersion topVer = supplyMsg.topologyVersion();
RebalanceFuture fut = rebalanceFut;
ClusterNode node = ctx.node(nodeId);
fut.cancelLock.readLock().lock();
try {
String errMsg = null;
if (fut.isDone())
errMsg = "rebalance completed";
else if (node == null)
errMsg = "supplier has left cluster";
else if (!rebalanceFut.isActual(supplyMsg.rebalanceId()))
errMsg = "topology changed";
if (errMsg != null) {
if (log.isDebugEnabled()) {
log.debug("Supply message has been ignored (" + errMsg + ") [" + demandRoutineInfo(nodeId, supplyMsg) + ']');
}
return;
}
if (log.isDebugEnabled())
log.debug("Received supply message [" + demandRoutineInfo(nodeId, supplyMsg) + ']');
// Check whether there were error during supplying process.
Throwable msgExc = null;
final GridDhtPartitionTopology top = grp.topology();
if (supplyMsg.classError() != null)
msgExc = supplyMsg.classError();
else if (supplyMsg.error() != null)
msgExc = supplyMsg.error();
if (msgExc != null) {
GridDhtPartitionMap partMap = top.localPartitionMap();
Set<Integer> unstableParts = supplyMsg.infos().keySet().stream().filter(p -> partMap.get(p) == MOVING).collect(Collectors.toSet());
U.error(log, "Rebalancing routine has failed, some partitions could be unavailable for reading" + " [" + demandRoutineInfo(nodeId, supplyMsg) + ", unavailablePartitions=" + S.compact(unstableParts) + ']', msgExc);
fut.error(nodeId);
return;
}
fut.receivedBytes.addAndGet(supplyMsg.messageSize());
if (grp.sharedGroup()) {
for (GridCacheContext cctx : grp.caches()) {
if (cctx.statisticsEnabled()) {
long keysCnt = supplyMsg.keysForCache(cctx.cacheId());
if (keysCnt != -1)
cctx.cache().metrics0().onRebalancingKeysCountEstimateReceived(keysCnt);
// Can not be calculated per cache.
cctx.cache().metrics0().onRebalanceBatchReceived(supplyMsg.messageSize());
}
}
} else {
GridCacheContext cctx = grp.singleCacheContext();
if (cctx.statisticsEnabled()) {
if (supplyMsg.estimatedKeysCount() != -1)
cctx.cache().metrics0().onRebalancingKeysCountEstimateReceived(supplyMsg.estimatedKeysCount());
cctx.cache().metrics0().onRebalanceBatchReceived(supplyMsg.messageSize());
}
}
try {
AffinityAssignment aff = grp.affinity().cachedAffinity(topVer);
// Preload.
for (Map.Entry<Integer, CacheEntryInfoCollection> e : supplyMsg.infos().entrySet()) {
int p = e.getKey();
if (aff.get(p).contains(ctx.localNode())) {
GridDhtLocalPartition part;
try {
part = top.localPartition(p, topVer, true);
} catch (GridDhtInvalidPartitionException err) {
assert !topVer.equals(top.lastTopologyChangeVersion());
if (log.isDebugEnabled()) {
log.debug("Failed to get partition for rebalancing [" + "grp=" + grp.cacheOrGroupName() + ", err=" + err + ", p=" + p + ", topVer=" + topVer + ", lastTopVer=" + top.lastTopologyChangeVersion() + ']');
}
continue;
}
assert part != null;
boolean last = supplyMsg.last().containsKey(p);
if (part.state() == MOVING) {
boolean reserved = part.reserve();
assert reserved : "Failed to reserve partition [igniteInstanceName=" + ctx.igniteInstanceName() + ", grp=" + grp.cacheOrGroupName() + ", part=" + part + ']';
part.beforeApplyBatch(last);
try {
long[] byteRcv = { 0 };
GridIterableAdapter<GridCacheEntryInfo> infosWrap = new GridIterableAdapter<>(new IteratorWrapper<GridCacheEntryInfo>(e.getValue().infos().iterator()) {
/**
* {@inheritDoc}
*/
@Override
public GridCacheEntryInfo nextX() throws IgniteCheckedException {
GridCacheEntryInfo i = super.nextX();
byteRcv[0] += i.marshalledSize(ctx.cacheObjectContext(i.cacheId()));
return i;
}
});
try {
if (grp.mvccEnabled())
mvccPreloadEntries(topVer, node, p, infosWrap);
else {
preloadEntries(topVer, part, infosWrap);
rebalanceFut.onReceivedKeys(p, e.getValue().infos().size(), node);
}
} catch (GridDhtInvalidPartitionException ignored) {
if (log.isDebugEnabled())
log.debug("Partition became invalid during rebalancing (will ignore): " + p);
}
fut.processed.get(p).increment();
fut.onReceivedBytes(p, byteRcv[0], node);
// If message was last for this partition, then we take ownership.
if (last)
ownPartition(fut, p, nodeId, supplyMsg);
} finally {
part.release();
}
} else {
if (last)
fut.partitionDone(nodeId, p, false);
if (log.isDebugEnabled())
log.debug("Skipping rebalancing partition (state is not MOVING): " + '[' + demandRoutineInfo(nodeId, supplyMsg) + ", p=" + p + ']');
}
} else {
fut.partitionDone(nodeId, p, false);
if (log.isDebugEnabled())
log.debug("Skipping rebalancing partition (affinity changed): " + '[' + demandRoutineInfo(nodeId, supplyMsg) + ", p=" + p + ']');
}
}
// Only request partitions based on latest topology version.
for (Integer miss : supplyMsg.missed()) {
if (aff.get(miss).contains(ctx.localNode()))
fut.partitionMissed(nodeId, miss);
}
for (Integer miss : supplyMsg.missed()) fut.partitionDone(nodeId, miss, false);
GridDhtPartitionDemandMessage d = new GridDhtPartitionDemandMessage(supplyMsg.rebalanceId(), supplyMsg.topologyVersion(), grp.groupId());
d.timeout(grp.preloader().timeout());
if (!fut.isDone()) {
// Send demand message.
try {
ctx.io().sendOrderedMessage(node, d.topic(), d.convertIfNeeded(node.version()), grp.ioPolicy(), grp.preloader().timeout());
if (log.isDebugEnabled())
log.debug("Send next demand message [" + demandRoutineInfo(nodeId, supplyMsg) + "]");
} catch (ClusterTopologyCheckedException e) {
if (log.isDebugEnabled())
log.debug("Supplier has left [" + demandRoutineInfo(nodeId, supplyMsg) + ", errMsg=" + e.getMessage() + ']');
}
} else {
if (log.isDebugEnabled())
log.debug("Will not request next demand message [" + demandRoutineInfo(nodeId, supplyMsg) + ", rebalanceFuture=" + fut + ']');
}
} catch (IgniteSpiException | IgniteCheckedException e) {
fut.error(nodeId);
LT.error(log, e, "Error during rebalancing [" + demandRoutineInfo(nodeId, supplyMsg) + ", err=" + e + ']');
}
} finally {
fut.cancelLock.readLock().unlock();
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityAssignment in project ignite by apache.
the class GridDhtAtomicCache method updatePartialBatch.
/**
* @param hasNear {@code True} if originating node has near cache.
* @param firstEntryIdx Index of the first entry in the request keys collection.
* @param entries Entries to update.
* @param ver Version to set.
* @param nearNode Originating node.
* @param writeVals Write values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param entryProcessorMap Entry processors.
* @param req Request.
* @param res Response.
* @param replicate Whether replication is enabled.
* @param dhtUpdRes Batch update result.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
*/
@Nullable
private void updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final DhtAtomicUpdateResult dhtUpdRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
assert putMap == null ^ rmvKeys == null;
assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
AffinityTopologyVersion topVer = req.topologyVersion();
CacheStorePartialUpdateException storeErr = null;
try {
GridCacheOperation op;
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
final GridDhtAtomicAbstractUpdateFuture dhtFut = dhtUpdRes.dhtFuture();
Collection<Object> failedToUnwrapKeys = null;
// Avoid iterator creation.
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
assert entry.lockedByCurrentThread();
if (entry.obsolete()) {
assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
continue;
}
if (storeErr != null) {
Object key = entry.key();
try {
key = entry.key().value(ctx.cacheObjectContext(), false);
} catch (BinaryInvalidTypeException e) {
if (log.isDebugEnabled()) {
if (failedToUnwrapKeys == null)
failedToUnwrapKeys = new ArrayList<>();
// To limit keys count in log message.
if (failedToUnwrapKeys.size() < 5)
failedToUnwrapKeys.add(key);
}
}
if (storeErr.failedKeys().contains(key))
continue;
}
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
// Get readers before innerUpdate (reader cleared after remove).
GridDhtCacheEntry.ReaderId[] readers = entry.readersLocked();
EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
sndPrevVal, req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
false, /*intercept*/
false, taskName, null, null, dhtFut, entryProcessor != null);
assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
if (intercept) {
if (op == UPDATE) {
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
} else {
assert op == DELETE : op;
// Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
}
}
dhtUpdRes.addDeleted(entry, updRes, entries);
if (dhtFut != null) {
dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter(), op);
if (readers != null)
dhtFut.addNearWriteEntries(nearNode, readers, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
}
if (hasNear) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
int idx = firstEntryIdx + i;
if (req.operation() == TRANSFORM) {
res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
} else
res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
if (writeVal != null || entry.hasValue()) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (GridDhtCacheEntry.ReaderId.contains(readers, nearNode.id())) {
// Reader became primary or backup.
entry.removeReader(nearNode.id(), req.messageId());
} else
res.addSkippedIndex(firstEntryIdx + i);
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry cannot become obsolete while holding lock.";
e.printStackTrace();
}
dhtUpdRes.processedEntriesCount(firstEntryIdx + i + 1);
}
if (failedToUnwrapKeys != null) {
log.warning("Failed to get values of keys: " + failedToUnwrapKeys + " (the binary objects will be used instead).");
}
} catch (IgniteCheckedException e) {
res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
}
if (storeErr != null) {
ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
res.addFailedKeys(failed, storeErr.getCause());
}
}
use of org.apache.ignite.internal.processors.affinity.AffinityAssignment in project ignite by apache.
the class GridDhtAtomicCache method updateSingle.
/**
* Updates locked entries one-by-one.
*
* @param nearNode Originating node.
* @param hasNear {@code True} if originating node has near cache.
* @param req Update request.
* @param res Update response.
* @param locked Locked entries.
* @param ver Assigned update version.
* @param replicate Whether DR is enabled for that cache.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @param dhtUpdRes Dht update result
* @throws GridCacheEntryRemovedException Should be never thrown.
*/
private void updateSingle(ClusterNode nearNode, boolean hasNear, GridNearAtomicAbstractUpdateRequest req, GridNearAtomicUpdateResponse res, List<GridDhtCacheEntry> locked, GridCacheVersion ver, boolean replicate, String taskName, @Nullable IgniteCacheExpiryPolicy expiry, boolean sndPrevVal, DhtAtomicUpdateResult dhtUpdRes) throws GridCacheEntryRemovedException {
GridCacheReturn retVal = dhtUpdRes.returnValue();
GridDhtAtomicAbstractUpdateFuture dhtFut = dhtUpdRes.dhtFuture();
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = dhtUpdRes.deleted();
AffinityTopologyVersion topVer = req.topologyVersion();
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
// Avoid iterator creation.
for (int i = dhtUpdRes.processedEntriesCount(); i < req.size(); i++) {
KeyCacheObject k = req.key(i);
GridCacheOperation op = req.operation();
// No GridCacheEntryRemovedException can be thrown.
try {
GridDhtCacheEntry entry = locked.get(i);
GridCacheVersion newConflictVer = req.conflictVersion(i);
long newConflictTtl = req.conflictTtl(i);
long newConflictExpireTime = req.conflictExpireTime(i);
assert !(newConflictVer instanceof GridCacheVersionEx) : newConflictVer;
Object writeVal = op == TRANSFORM ? req.entryProcessor(i) : req.writeValue(i);
// Get readers before innerUpdate (reader cleared after remove).
GridDhtCacheEntry.ReaderId[] readers = entry.readersLocked();
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, req.invokeArguments(), writeThrough() && !req.skipStore(), !req.skipStore(), sndPrevVal || req.returnValue(), req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, req.filter(), replicate ? DR_PRIMARY : DR_NONE, newConflictTtl, newConflictExpireTime, newConflictVer, /*conflictResolve*/
true, intercept, taskName, /*prevVal*/
null, /*updateCntr*/
null, dhtFut, false);
if (dhtFut != null) {
if (updRes.sendToDht()) {
// Send to backups even in case of remove-remove scenarios.
GridCacheVersionConflictContext<?, ?> conflictCtx = updRes.conflictResolveResult();
if (conflictCtx == null)
newConflictVer = null;
else if (conflictCtx.isMerge())
// Conflict version is discarded in case of merge.
newConflictVer = null;
EntryProcessor<Object, Object, Object> entryProcessor = null;
dhtFut.addWriteEntry(affAssignment, entry, updRes.newValue(), entryProcessor, updRes.newTtl(), updRes.conflictExpireTime(), newConflictVer, sndPrevVal, updRes.oldValue(), updRes.updateCounter(), op);
if (readers != null)
dhtFut.addNearWriteEntries(nearNode, readers, entry, updRes.newValue(), entryProcessor, updRes.newTtl(), updRes.conflictExpireTime());
} else {
if (log.isDebugEnabled())
log.debug("Entry did not pass the filter or conflict resolution (will skip write) " + "[entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ']');
}
}
if (hasNear) {
if (updRes.sendToDht()) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
// If put the same value as in request then do not need to send it back.
if (op == TRANSFORM || writeVal != updRes.newValue()) {
res.addNearValue(i, updRes.newValue(), updRes.newTtl(), updRes.conflictExpireTime());
} else
res.addNearTtl(i, updRes.newTtl(), updRes.conflictExpireTime());
if (updRes.newValue() != null) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (GridDhtCacheEntry.ReaderId.contains(readers, nearNode.id())) {
// Reader became primary or backup.
entry.removeReader(nearNode.id(), req.messageId());
} else
res.addSkippedIndex(i);
} else
res.addSkippedIndex(i);
}
if (updRes.removeVersion() != null) {
if (deleted == null)
deleted = new ArrayList<>(req.size());
deleted.add(F.t(entry, updRes.removeVersion()));
}
if (op == TRANSFORM) {
assert !req.returnValue();
IgniteBiTuple<Object, Exception> compRes = updRes.computedResult();
if (compRes != null && (compRes.get1() != null || compRes.get2() != null)) {
if (retVal == null)
retVal = new GridCacheReturn(nearNode.isLocal());
retVal.addEntryProcessResult(ctx, k, null, compRes.get1(), compRes.get2(), req.keepBinary());
}
} else {
// Create only once.
if (retVal == null) {
CacheObject ret = updRes.oldValue();
retVal = new GridCacheReturn(ctx, nearNode.isLocal(), req.keepBinary(), U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())), req.returnValue() ? ret : null, updRes.success());
}
}
} catch (IgniteCheckedException e) {
res.addFailedKey(k, e);
}
dhtUpdRes.processedEntriesCount(i + 1);
}
dhtUpdRes.returnValue(retVal);
dhtUpdRes.deleted(deleted);
dhtUpdRes.dhtFuture(dhtFut);
}
use of org.apache.ignite.internal.processors.affinity.AffinityAssignment in project ignite by apache.
the class JdbcRequestHandler method getPartitionsMap.
/**
* Get partition map for a cache.
* @param cacheDesc Cache descriptor.
* @return Partitions mapping for cache.
*/
private Map<UUID, Set<Integer>> getPartitionsMap(DynamicCacheDescriptor cacheDesc, AffinityTopologyVersion affVer) {
GridCacheContext cacheCtx = connCtx.kernalContext().cache().context().cacheContext(cacheDesc.cacheId());
AffinityAssignment assignment = cacheCtx.affinity().assignment(affVer);
Set<ClusterNode> nodes = assignment.primaryPartitionNodes();
HashMap<UUID, Set<Integer>> res = new HashMap<>(nodes.size());
for (ClusterNode node : nodes) {
UUID nodeId = node.id();
Set<Integer> parts = assignment.primaryPartitions(nodeId);
res.put(nodeId, parts);
}
return res;
}
use of org.apache.ignite.internal.processors.affinity.AffinityAssignment in project ignite by apache.
the class GridNearTxQueryEnlistFuture method map.
/**
* @param topLocked Topology locked flag.
*/
@Override
protected void map(final boolean topLocked) {
try {
Map<ClusterNode, IntArrayHolder> map;
boolean locallyMapped = false;
AffinityAssignment assignment = cctx.affinity().assignment(topVer);
if (parts != null) {
map = U.newHashMap(parts.length);
for (int i = 0; i < parts.length; i++) {
ClusterNode pNode = assignment.get(parts[i]).get(0);
map.computeIfAbsent(pNode, n -> new IntArrayHolder()).add(parts[i]);
updateMappings(pNode);
if (!locallyMapped && pNode.isLocal())
locallyMapped = true;
}
} else {
Set<ClusterNode> nodes = assignment.primaryPartitionNodes();
map = U.newHashMap(nodes.size());
for (ClusterNode pNode : nodes) {
map.put(pNode, null);
updateMappings(pNode);
if (!locallyMapped && pNode.isLocal())
locallyMapped = true;
}
}
if (map.isEmpty())
throw new ClusterTopologyServerNotFoundException("Failed to find data nodes for cache (all partition " + "nodes left the grid). [fut=" + toString() + ']');
int idx = 0;
boolean first = true, clientFirst = false;
GridDhtTxQueryEnlistFuture localFut = null;
for (Map.Entry<ClusterNode, IntArrayHolder> entry : map.entrySet()) {
MiniFuture mini;
ClusterNode node = entry.getKey();
IntArrayHolder parts = entry.getValue();
add(mini = new MiniFuture(node));
if (node.isLocal()) {
localFut = new GridDhtTxQueryEnlistFuture(cctx.localNode().id(), lockVer, mvccSnapshot, threadId, futId, // The common tx logic expects non-zero mini-future ids (negative local and positive non-local).
-(++idx), tx, cacheIds, parts == null ? null : parts.array(), schema, qry, params, flags, pageSize, remainingTime(), cctx);
updateLocalFuture(localFut);
localFut.listen(new CI1<IgniteInternalFuture<Long>>() {
@Override
public void apply(IgniteInternalFuture<Long> fut) {
assert fut.error() != null || fut.result() != null : fut;
try {
clearLocalFuture((GridDhtTxQueryEnlistFuture) fut);
GridNearTxQueryEnlistResponse res = fut.error() == null ? createResponse(fut) : null;
mini.onResult(res, fut.error());
} catch (IgniteCheckedException e) {
mini.onResult(null, e);
} finally {
CU.unwindEvicts(cctx);
}
}
});
} else {
if (first) {
clientFirst = cctx.localNode().isClient() && !topLocked && !tx.hasRemoteLocks();
first = false;
}
GridNearTxQueryEnlistRequest req = new GridNearTxQueryEnlistRequest(cctx.cacheId(), threadId, futId, // The common tx logic expects non-zero mini-future ids (negative local and positive non-local).
++idx, topVer, lockVer, mvccSnapshot, cacheIds, parts == null ? null : parts.array(), schema, qry, params, flags, pageSize, remainingTime(), tx.remainingTime(), tx.taskNameHash(), clientFirst);
sendRequest(req, node.id());
}
}
markInitialized();
if (localFut != null)
localFut.init();
} catch (Throwable e) {
onDone(e);
if (e instanceof Error)
throw (Error) e;
}
}
Aggregations