use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtTransactionalCacheAdapter method startRemoteTx.
/**
* @param nodeId Primary node ID.
* @param req Request.
* @param res Response.
* @return Remote transaction.
* @throws IgniteCheckedException If failed.
* @throws GridDistributedLockCancelledException If lock has been cancelled.
*/
@Nullable
private GridDhtTxRemote startRemoteTx(UUID nodeId, GridDhtLockRequest req, GridDhtLockResponse res) throws IgniteCheckedException, GridDistributedLockCancelledException {
List<KeyCacheObject> keys = req.keys();
GridDhtTxRemote tx = null;
int size = F.size(keys);
for (int i = 0; i < size; i++) {
KeyCacheObject key = keys.get(i);
if (key == null)
continue;
IgniteTxKey txKey = ctx.txKey(key);
if (log.isDebugEnabled())
log.debug("Unmarshalled key: " + key);
GridDistributedCacheEntry entry = null;
while (true) {
try {
int part = ctx.affinity().partition(key);
GridDhtLocalPartition locPart = ctx.topology().localPartition(part, req.topologyVersion(), false);
if (locPart == null || !locPart.reserve()) {
if (log.isDebugEnabled())
log.debug("Local partition for given key is already evicted (will add to invalid " + "partition list) [key=" + key + ", part=" + part + ", locPart=" + locPart + ']');
res.addInvalidPartition(part);
// Invalidate key in near cache, if any.
if (isNearEnabled(cacheCfg))
obsoleteNearEntry(key);
break;
}
try {
// Handle implicit locks for pessimistic transactions.
if (req.inTx()) {
if (tx == null)
tx = ctx.tm().tx(req.version());
if (tx == null) {
tx = new GridDhtTxRemote(ctx.shared(), req.nodeId(), req.futureId(), nodeId, req.nearXidVersion(), req.topologyVersion(), req.version(), /*commitVer*/
null, ctx.systemTx(), ctx.ioPolicy(), PESSIMISTIC, req.isolation(), req.isInvalidate(), req.timeout(), req.txSize(), securitySubjectId(ctx), req.taskNameHash(), !req.skipStore() && req.storeUsed(), req.txLabel());
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !ctx.tm().onStarted(tx))
throw new IgniteTxRollbackCheckedException("Failed to acquire lock (transaction " + "has been completed) [ver=" + req.version() + ", tx=" + tx + ']');
}
tx.addWrite(ctx, NOOP, txKey, null, null, req.accessTtl(), req.skipStore(), req.keepBinary());
}
entry = entryExx(key, req.topologyVersion());
// Add remote candidate before reordering.
entry.addRemote(req.nodeId(), nodeId, req.threadId(), req.version(), tx != null, tx != null && tx.implicitSingle(), null);
// Invalidate key in near cache, if any.
if (isNearEnabled(cacheCfg) && req.invalidateNearEntry(i))
invalidateNearEntry(key, req.version());
// Get entry info after candidate is added.
if (req.needPreloadKey(i)) {
entry.unswap();
GridCacheEntryInfo info = entry.info();
if (info != null && !info.isNew() && !info.isDeleted())
res.addPreloadEntry(info);
}
// Double-check in case if sender node left the grid.
if (ctx.discovery().node(req.nodeId()) == null) {
if (log.isDebugEnabled())
log.debug("Node requesting lock left grid (lock request will be ignored): " + req);
entry.removeLock(req.version());
if (tx != null) {
tx.clearEntry(txKey);
// COMMITTING state, but this lock is never acquired.
if (tx.state() == COMMITTING)
tx.forceCommit();
else
tx.rollbackRemoteTx();
}
return null;
}
// Entry is legit.
break;
} finally {
locPart.release();
}
} catch (GridDhtInvalidPartitionException e) {
if (log.isDebugEnabled())
log.debug("Received invalid partition exception [e=" + e + ", req=" + req + ']');
res.addInvalidPartition(e.partition());
// Invalidate key in near cache, if any.
if (isNearEnabled(cacheCfg))
obsoleteNearEntry(key);
if (tx != null) {
tx.clearEntry(txKey);
if (log.isDebugEnabled())
log.debug("Cleared invalid entry from remote transaction (will skip) [entry=" + entry + ", tx=" + tx + ']');
}
break;
} catch (GridCacheEntryRemovedException ignored) {
assert entry.obsoleteVersion() != null : "Obsolete flag not set on removed entry: " + entry;
if (log.isDebugEnabled())
log.debug("Received entry removed exception (will retry on renewed entry): " + entry);
if (tx != null) {
tx.clearEntry(txKey);
if (log.isDebugEnabled())
log.debug("Cleared removed entry from remote transaction (will retry) [entry=" + entry + ", tx=" + tx + ']');
}
}
}
}
if (tx != null && tx.empty()) {
if (log.isDebugEnabled())
log.debug("Rolling back remote DHT transaction because it is empty [req=" + req + ", res=" + res + ']');
tx.rollbackRemoteTx();
tx = null;
}
return tx;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridPartitionedGetFuture method localGet.
/**
* @param topVer Topology version.
* @param key Key.
* @param part Partition.
* @param locVals Local values.
* @return {@code True} if there is no need to further search value.
*/
private boolean localGet(AffinityTopologyVersion topVer, KeyCacheObject key, int part, Map<K, V> locVals) {
assert cctx.affinityNode() : this;
GridDhtCacheAdapter<K, V> cache = cache();
boolean readNoEntry = cctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
while (true) {
cctx.shared().database().checkpointReadLock();
try {
boolean skipEntry = readNoEntry;
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (readNoEntry) {
KeyCacheObject key0 = (KeyCacheObject) cctx.cacheObjects().prepareForCache(key, cctx);
CacheDataRow row = cctx.mvccEnabled() ? cctx.offheap().mvccRead(cctx, key0, mvccSnapshot()) : cctx.offheap().read(cctx, key0);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
v = row.value();
if (needVer)
ver = row.version();
if (evt) {
cctx.events().readEvent(key, null, txLbl, row.value(), taskName, !deserializeBinary);
}
} else
skipEntry = false;
}
}
if (!skipEntry) {
GridCacheEntryEx entry = cache.entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiryPlc, !deserializeBinary, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiryPlc, !deserializeBinary);
}
entry.touch();
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(ver))
cache.removeEntry(entry);
}
}
}
if (v != null) {
cctx.addResult(locVals, key, v, skipVals, keepCacheObjects, deserializeBinary, true, getRes, ver, 0, 0, needVer, U.deploymentClassLoader(cctx.kernalContext(), deploymentLdrId));
return true;
}
boolean topStable = cctx.isReplicated() || topVer.equals(cctx.topology().lastTopologyChangeVersion());
// Entry not found, do not continue search if topology did not change and there is no store.
if (!cctx.readThroughConfigured() && (topStable || partitionOwned(part))) {
if (!skipVals && cctx.statisticsEnabled())
cache.metrics0().onRead(false);
return true;
}
return false;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, will retry.
} catch (GridDhtInvalidPartitionException ignored) {
return false;
} catch (IgniteCheckedException e) {
onDone(e);
return true;
} finally {
cctx.shared().database().checkpointReadUnlock();
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtTxLocalAdapter method addEntry.
/**
* @param msgId Message ID.
* @param e Entry to add.
* @return Future for active transactions for the time when reader was added.
* @throws IgniteCheckedException If failed.
*/
@Nullable
public IgniteInternalFuture<Boolean> addEntry(long msgId, IgniteTxEntry e) throws IgniteCheckedException {
init();
TransactionState state = state();
assert state == PREPARING : "Invalid tx state for " + "adding entry [msgId=" + msgId + ", e=" + e + ", tx=" + this + ']';
e.unmarshal(cctx, false, cctx.deploy().globalLoader());
checkInternal(e.txKey());
GridCacheContext cacheCtx = e.context();
GridDhtCacheAdapter dhtCache = cacheCtx.isNear() ? cacheCtx.near().dht() : cacheCtx.dht();
try {
IgniteTxEntry existing = entry(e.txKey());
if (existing != null) {
// Absolutely must set operation, as default is DELETE.
existing.op(e.op());
existing.value(e.value(), e.hasWriteValue(), e.hasReadValue());
existing.entryProcessors(e.entryProcessors());
existing.ttl(e.ttl());
existing.filters(e.filters());
existing.expiry(e.expiry());
existing.conflictExpireTime(e.conflictExpireTime());
existing.conflictVersion(e.conflictVersion());
} else {
existing = e;
addActiveCache(dhtCache.context(), false);
GridDhtCacheEntry cached = dhtCache.entryExx(existing.key(), topologyVersion());
existing.cached(cached);
GridCacheVersion explicit = existing.explicitVersion();
if (explicit != null) {
GridCacheVersion dhtVer = cctx.mvcc().mappedVersion(explicit);
if (dhtVer == null)
throw new IgniteCheckedException("Failed to find dht mapping for explicit entry version: " + existing);
existing.explicitVersion(dhtVer);
}
txState.addEntry(existing);
if (log.isDebugEnabled())
log.debug("Added entry to transaction: " + existing);
}
return addReader(msgId, dhtCache.entryExx(existing.key()), existing, topologyVersion());
} catch (GridDhtInvalidPartitionException ex) {
throw new IgniteCheckedException(ex);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridPartitionedSingleGetFuture method localGet.
/**
* @param topVer Topology version.
* @param part Partition.
* @return {@code True} if future completed.
*/
private boolean localGet(AffinityTopologyVersion topVer, KeyCacheObject key, int part) {
assert cctx.affinityNode() : this;
GridDhtCacheAdapter colocated = cctx.dht();
boolean readNoEntry = cctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
while (true) {
cctx.shared().database().checkpointReadLock();
try {
CacheObject v = null;
GridCacheVersion ver = null;
boolean skipEntry = readNoEntry;
if (readNoEntry) {
KeyCacheObject key0 = (KeyCacheObject) cctx.cacheObjects().prepareForCache(key, cctx);
CacheDataRow row = mvccSnapshot != null ? cctx.offheap().mvccRead(cctx, key0, mvccSnapshot) : cctx.offheap().read(cctx, key0);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
v = row.value();
if (needVer)
ver = row.version();
if (evt) {
cctx.events().readEvent(key, null, txLbl, row.value(), taskName, !deserializeBinary);
}
} else
skipEntry = false;
}
}
if (!skipEntry) {
GridCacheEntryEx entry = colocated.entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
if (needVer) {
EntryGetResult res = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiryPlc, true, null);
if (res != null) {
v = res.value();
ver = res.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiryPlc, true);
}
entry.touch();
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(ver))
colocated.removeEntry(entry);
}
}
}
if (v != null) {
if (!skipVals && cctx.statisticsEnabled())
cctx.cache().metrics0().onRead(true);
if (!skipVals)
setResult(v, ver);
else
setSkipValueResult(true, ver);
return true;
}
boolean topStable = cctx.isReplicated() || topVer.equals(cctx.topology().lastTopologyChangeVersion());
// Entry not found, complete future with null result if topology did not change and there is no store.
if (!cctx.readThroughConfigured() && (topStable || partitionOwned(part))) {
if (!skipVals && cctx.statisticsEnabled())
colocated.metrics0().onRead(false);
if (skipVals)
setSkipValueResult(false, null);
else
setResult(null, null);
return true;
}
return false;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, will retry.
} catch (GridDhtInvalidPartitionException ignored) {
return false;
} catch (IgniteCheckedException e) {
onDone(e);
return true;
} finally {
cctx.shared().database().checkpointReadUnlock();
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDistributedTxRemoteAdapter method commitIfLocked.
/**
* @throws IgniteCheckedException If commit failed.
*/
private void commitIfLocked() throws IgniteCheckedException {
if (state() == COMMITTING) {
for (IgniteTxEntry txEntry : writeEntries()) {
assert txEntry != null : "Missing transaction entry for tx: " + this;
while (true) {
GridCacheEntryEx entry = txEntry.cached();
assert entry != null : "Missing cached entry for transaction entry: " + txEntry;
try {
GridCacheVersion ver = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer;
// If locks haven't been acquired yet, keep waiting.
if (!entry.lockedBy(ver)) {
if (log.isDebugEnabled())
log.debug("Transaction does not own lock for entry (will wait) [entry=" + entry + ", tx=" + this + ']');
return;
}
// While.
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry while committing (will retry): " + txEntry);
try {
txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), topologyVersion()));
} catch (GridDhtInvalidPartitionException e) {
break;
}
}
}
}
// Only one thread gets to commit.
if (COMMIT_ALLOWED_UPD.compareAndSet(this, 0, 1)) {
IgniteCheckedException err = null;
Map<IgniteTxKey, IgniteTxEntry> writeMap = txState.writeMap();
GridCacheReturnCompletableWrapper wrapper = null;
if (!F.isEmpty(writeMap) || mvccSnapshot != null) {
GridCacheReturn ret = null;
if (!near() && !local() && onePhaseCommit()) {
if (needReturnValue()) {
ret = new GridCacheReturn(null, cctx.localNodeId().equals(otherNodeId()), true, null, null, true);
// Originating node.
UUID origNodeId = otherNodeId();
cctx.tm().addCommittedTxReturn(this, wrapper = new GridCacheReturnCompletableWrapper(!cctx.localNodeId().equals(origNodeId) ? origNodeId : null));
} else
cctx.tm().addCommittedTx(this, this.nearXidVersion(), null);
}
// Register this transaction as completed prior to write-phase to
// ensure proper lock ordering for removed entries.
cctx.tm().addCommittedTx(this);
AffinityTopologyVersion topVer = topologyVersion();
WALPointer ptr = null;
cctx.database().checkpointReadLock();
// Reserved partitions (necessary to prevent race due to updates in RENTING state).
Set<GridDhtLocalPartition> reservedParts = new HashSet<>();
try {
assert !txState.mvccEnabled() || mvccSnapshot != null : "Mvcc is not initialized: " + this;
Collection<IgniteTxEntry> entries = near() || cctx.snapshot().needTxReadLogging() ? allEntries() : writeEntries();
// Data entry to write to WAL and associated with it TxEntry.
List<T2<DataEntry, IgniteTxEntry>> dataEntries = null;
batchStoreCommit(writeMap().values());
// Node that for near transactions we grab all entries.
for (IgniteTxEntry txEntry : entries) {
GridCacheContext cacheCtx = txEntry.context();
// Prevent stale updates.
GridDhtLocalPartition locPart = cacheCtx.group().topology().localPartition(txEntry.cached().partition());
if (!near()) {
if (locPart == null)
continue;
if (!reservedParts.contains(locPart) && locPart.reserve()) {
assert locPart.state() != EVICTED && locPart.reservations() > 0 : locPart;
reservedParts.add(locPart);
}
if (locPart.state() == RENTING || locPart.state() == EVICTED) {
LT.warn(log(), "Skipping update to partition that is concurrently evicting " + "[grp=" + cacheCtx.group().cacheOrGroupName() + ", part=" + locPart + "]");
continue;
}
}
boolean replicate = cacheCtx.isDrEnabled();
while (true) {
try {
GridCacheEntryEx cached = txEntry.cached();
if (cached == null)
txEntry.cached(cached = cacheCtx.cache().entryEx(txEntry.key(), topologyVersion()));
if (near() && cacheCtx.dr().receiveEnabled()) {
cached.markObsolete(xidVer);
break;
}
GridNearCacheEntry nearCached = null;
if (updateNearCache(cacheCtx, txEntry.key(), topVer))
nearCached = cacheCtx.dht().near().peekExx(txEntry.key());
if (!F.isEmpty(txEntry.entryProcessors()))
txEntry.cached().unswap(false);
IgniteBiTuple<GridCacheOperation, CacheObject> res = applyTransformClosures(txEntry, false, ret);
GridCacheOperation op = res.get1();
CacheObject val = res.get2();
GridCacheVersion explicitVer = txEntry.conflictVersion();
if (explicitVer == null)
explicitVer = writeVersion();
if (txEntry.ttl() == CU.TTL_ZERO)
op = DELETE;
boolean conflictNeedResolve = cacheCtx.conflictNeedResolve();
GridCacheVersionConflictContext conflictCtx = null;
if (conflictNeedResolve) {
IgniteBiTuple<GridCacheOperation, GridCacheVersionConflictContext> drRes = conflictResolve(op, txEntry, val, explicitVer, cached);
assert drRes != null;
conflictCtx = drRes.get2();
if (conflictCtx.isUseOld())
op = NOOP;
else if (conflictCtx.isUseNew()) {
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
} else if (conflictCtx.isMerge()) {
op = drRes.get1();
val = txEntry.context().toCacheObject(conflictCtx.mergeValue());
explicitVer = writeVersion();
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
}
} else
// Nullify explicit version so that innerSet/innerRemove will work as usual.
explicitVer = null;
GridCacheVersion dhtVer = cached.isNear() ? writeVersion() : null;
if (!near() && cacheCtx.group().persistenceEnabled() && cacheCtx.group().walEnabled() && op != NOOP && op != RELOAD && (op != READ || cctx.snapshot().needTxReadLogging())) {
if (dataEntries == null)
dataEntries = new ArrayList<>(entries.size());
dataEntries.add(new T2<>(new DataEntry(cacheCtx.cacheId(), txEntry.key(), val, op, nearXidVersion(), addConflictVersion(writeVersion(), txEntry.conflictVersion()), 0, txEntry.key().partition(), txEntry.updateCounter(), DataEntry.flags(CU.txOnPrimary(this))), txEntry));
}
if (op == CREATE || op == UPDATE) {
// Invalidate only for near nodes (backups cannot be invalidated).
if (isSystemInvalidate() || (isInvalidate() && cacheCtx.isNear()))
cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, resolveTaskName(), dhtVer, txEntry.updateCounter());
else {
assert val != null : txEntry;
GridCacheUpdateTxResult updRes = cached.innerSet(this, eventNodeId(), nodeId, val, false, false, txEntry.ttl(), true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, txEntry.conflictExpireTime(), near() ? null : explicitVer, resolveTaskName(), dhtVer, txEntry.updateCounter());
txEntry.updateCounter(updRes.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
// Keep near entry up to date.
if (nearCached != null) {
CacheObject val0 = cached.valueBytes();
nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
}
} else if (op == DELETE) {
GridCacheUpdateTxResult updRes = cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, resolveTaskName(), dhtVer, txEntry.updateCounter());
txEntry.updateCounter(updRes.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
// Keep near entry up to date.
if (nearCached != null)
nearCached.updateOrEvict(xidVer, null, 0, 0, nodeId, topVer);
} else if (op == RELOAD) {
CacheObject reloaded = cached.innerReload();
if (nearCached != null) {
nearCached.innerReload();
nearCached.updateOrEvict(cached.version(), reloaded, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
} else if (op == READ) {
assert near();
if (log.isDebugEnabled())
log.debug("Ignoring READ entry when committing: " + txEntry);
} else // No-op.
{
if (conflictCtx == null || !conflictCtx.isUseOld()) {
if (txEntry.ttl() != CU.TTL_NOT_CHANGED)
cached.updateTtl(null, txEntry.ttl());
if (nearCached != null) {
CacheObject val0 = cached.valueBytes();
nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
}
}
// that if we replaced removed entries.
assert txEntry.op() == READ || onePhaseCommit() || // and we simply allow the commit to proceed.
!cached.hasLockCandidateUnsafe(xidVer) || cached.lockedByUnsafe(xidVer) : "Transaction does not own lock for commit [entry=" + cached + ", tx=" + this + ']';
// Break out of while loop.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Attempting to commit a removed entry (will retry): " + txEntry);
// Renew cached entry.
txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), topologyVersion()));
}
}
}
// Apply cache size deltas.
applyTxSizes();
TxCounters txCntrs = txCounters(false);
// Apply update counters.
if (txCntrs != null)
cctx.tm().txHandler().applyPartitionsUpdatesCounters(txCntrs.updateCounters());
cctx.mvccCaching().onTxFinished(this, true);
if (!near() && !F.isEmpty(dataEntries) && cctx.wal() != null) {
// Set new update counters for data entries received from persisted tx entries.
List<DataEntry> entriesWithCounters = dataEntries.stream().map(tuple -> tuple.get1().partitionCounter(tuple.get2().updateCounter())).collect(Collectors.toList());
ptr = cctx.wal().log(new DataRecord(entriesWithCounters));
}
if (ptr != null)
cctx.wal().flush(ptr, false);
} catch (Throwable ex) {
state(UNKNOWN);
if (X.hasCause(ex, NodeStoppingException.class)) {
U.warn(log, "Failed to commit transaction, node is stopping [tx=" + CU.txString(this) + ", err=" + ex + ']');
return;
}
err = heuristicException(ex);
try {
uncommit();
} catch (Throwable e) {
err.addSuppressed(e);
}
throw err;
} finally {
for (GridDhtLocalPartition locPart : reservedParts) locPart.release();
cctx.database().checkpointReadUnlock();
if (wrapper != null)
wrapper.initialize(ret);
}
}
cctx.tm().commitTx(this);
state(COMMITTED);
}
}
}
Aggregations