use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtGetFuture method map.
/**
* @param key Key.
* @return {@code True} if mapped.
*/
private boolean map(KeyCacheObject key, boolean forceKeys) {
try {
int keyPart = cctx.affinity().partition(key);
if (cctx.mvccEnabled()) {
boolean noOwners = cctx.topology().owners(keyPart, topVer).isEmpty();
// request with no results and therefore forceKeys flag may be set to true here.
if (noOwners)
forceKeys = true;
}
GridDhtLocalPartition part = topVer.topologyVersion() > 0 ? cache().topology().localPartition(keyPart, topVer, true) : cache().topology().localPartition(keyPart);
if (part == null)
return false;
if (parts == null || !F.contains(parts, part.id())) {
// By reserving, we make sure that partition won't be unloaded while processed.
if (part.reserve()) {
if (forceKeys || (part.state() == OWNING || part.state() == LOST)) {
parts = parts == null ? new int[1] : Arrays.copyOf(parts, parts.length + 1);
parts[parts.length - 1] = part.id();
return true;
} else {
part.release();
return false;
}
} else
return false;
} else
return true;
} catch (GridDhtInvalidPartitionException e) {
if (log.isDebugEnabled())
log.debug("Attempted to create a partition which does not belong to local node, will remap " + "[key=" + key + ", part=" + e.partition() + ']');
return false;
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class GridDhtLockFuture method map.
/**
* @param entries Entries.
*/
private void map(Iterable<GridDhtCacheEntry> entries) {
synchronized (this) {
if (mapped)
return;
mapped = true;
}
try {
if (log.isDebugEnabled())
log.debug("Mapping entry for DHT lock future: " + this);
// Assign keys to primary nodes.
for (GridDhtCacheEntry entry : entries) {
try {
while (true) {
try {
cctx.dhtMap(nearNodeId, topVer, entry, tx == null ? lockVer : null, log, dhtMap, null);
GridCacheMvccCandidate cand = entry.candidate(lockVer);
// Possible in case of lock cancellation.
if (cand == null) {
onFailed(false);
// Will mark initialized in finally block.
return;
}
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry when mapping DHT lock future (will retry): " + entry);
entry = cctx.dht().entryExx(entry.key(), topVer);
}
}
} catch (GridDhtInvalidPartitionException e) {
assert false : "DHT lock should never get invalid partition [err=" + e + ", fut=" + this + ']';
}
}
if (checkDone())
return;
if (log.isDebugEnabled())
log.debug("Mapped DHT lock future [dhtMap=" + F.nodeIds(dhtMap.keySet()) + ", dhtLockFut=" + this + ']');
long timeout = inTx() ? tx.remainingTime() : this.timeout;
synchronized (this) {
// Prevents entry removal on concurrent rollback.
if (checkDone())
return;
// Create mini futures.
for (Map.Entry<ClusterNode, List<GridDhtCacheEntry>> mapped : dhtMap.entrySet()) {
ClusterNode n = mapped.getKey();
List<GridDhtCacheEntry> dhtMapping = mapped.getValue();
int cnt = F.size(dhtMapping);
if (cnt > 0) {
assert !n.id().equals(cctx.localNodeId());
if (inTx() && tx.remainingTime() == -1)
return;
MiniFuture fut = new MiniFuture(n, dhtMapping);
GridDhtLockRequest req = new GridDhtLockRequest(cctx.cacheId(), nearNodeId, inTx() ? tx.nearXidVersion() : null, threadId, futId, fut.futureId(), lockVer, topVer, inTx(), read, isolation(), isInvalidate(), timeout, cnt, 0, inTx() ? tx.size() : cnt, inTx() ? tx.taskNameHash() : 0, read ? accessTtl : -1L, skipStore, cctx.store().configured(), keepBinary, cctx.deploymentEnabled(), inTx() ? tx.label() : null);
try {
for (ListIterator<GridDhtCacheEntry> it = dhtMapping.listIterator(); it.hasNext(); ) {
GridDhtCacheEntry e = it.next();
boolean needVal = false;
try {
// Must unswap entry so that isNewLocked returns correct value.
e.unswap(false);
needVal = e.isNewLocked();
if (needVal) {
List<ClusterNode> owners = cctx.topology().owners(e.partition(), tx != null ? tx.topologyVersion() : cctx.affinity().affinityTopologyVersion());
// Do not preload if local node is partition owner.
if (owners.contains(cctx.localNode()))
needVal = false;
}
// Skip entry if it is not new and is not present in updated mapping.
if (tx != null && !needVal)
continue;
boolean invalidateRdr = e.readerId(n.id()) != null;
req.addDhtKey(e.key(), invalidateRdr, cctx);
if (needVal) {
// Mark last added key as needed to be preloaded.
req.markLastKeyForPreload();
if (tx != null) {
IgniteTxEntry txEntry = tx.entry(e.txKey());
// NOOP entries will be sent to backups on prepare step.
if (txEntry.op() == GridCacheOperation.READ)
txEntry.op(GridCacheOperation.NOOP);
}
}
GridCacheMvccCandidate added = e.candidate(lockVer);
assert added != null;
assert added.dhtLocal();
if (added.ownerVersion() != null)
req.owned(e.key(), added.ownerVersion());
} catch (GridCacheEntryRemovedException ex) {
assert false : "Entry cannot become obsolete when DHT local candidate is added " + "[e=" + e + ", ex=" + ex + ']';
}
}
if (!F.isEmpty(req.keys())) {
if (tx != null)
tx.addLockTransactionNode(n);
// Append new future.
add(fut);
cctx.io().send(n, req, cctx.ioPolicy());
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT lock fut, sent request [txId=" + nearLockVer + ", dhtTxId=" + lockVer + ", inTx=" + inTx() + ", nodeId=" + n.id() + ']');
}
}
} catch (IgniteCheckedException e) {
// Fail the whole thing.
if (e instanceof ClusterTopologyCheckedException)
fut.onResult();
else {
if (msgLog.isDebugEnabled()) {
msgLog.debug("DHT lock fut, failed to send request [txId=" + nearLockVer + ", dhtTxId=" + lockVer + ", inTx=" + inTx() + ", node=" + n.id() + ", err=" + e + ']');
}
fut.onResult(e);
}
}
}
}
}
} finally {
markInitialized();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class IgniteTxHandler method mvccEnlistBatch.
/**
* Writes updated values on the backup node.
*
* @param tx Transaction.
* @param ctx Cache context.
* @param op Operation.
* @param keys Keys.
* @param vals Values sent from the primary node.
* @param snapshot Mvcc snapshot.
* @param batchNum Batch number.
* @param futId Future id.
* @throws IgniteCheckedException If failed.
*/
public void mvccEnlistBatch(GridDhtTxRemote tx, GridCacheContext ctx, EnlistOperation op, List<KeyCacheObject> keys, List<Message> vals, MvccSnapshot snapshot, IgniteUuid futId, int batchNum) throws IgniteCheckedException {
assert keys != null && (vals == null || vals.size() == keys.size());
assert tx != null;
GridDhtCacheAdapter dht = ctx.dht();
tx.addActiveCache(ctx, false);
for (int i = 0; i < keys.size(); i++) {
KeyCacheObject key = keys.get(i);
assert key != null;
int part = ctx.affinity().partition(key);
try {
GridDhtLocalPartition locPart = ctx.topology().localPartition(part, tx.topologyVersion(), false);
if (locPart != null && locPart.reserve()) {
try {
// Skip renting partitions.
if (locPart.state() == RENTING) {
tx.addInvalidPartition(ctx.cacheId(), part);
continue;
}
CacheObject val = null;
EntryProcessor entryProc = null;
Object[] invokeArgs = null;
boolean needOldVal = tx.txState().useMvccCaching(ctx.cacheId());
Message val0 = vals != null ? vals.get(i) : null;
CacheEntryInfoCollection entries = val0 instanceof CacheEntryInfoCollection ? (CacheEntryInfoCollection) val0 : null;
if (entries == null && !op.isDeleteOrLock() && !op.isInvoke())
val = (val0 instanceof CacheObject) ? (CacheObject) val0 : null;
if (entries == null && op.isInvoke()) {
assert val0 instanceof GridInvokeValue;
GridInvokeValue invokeVal = (GridInvokeValue) val0;
entryProc = invokeVal.entryProcessor();
invokeArgs = invokeVal.invokeArgs();
}
assert entries != null || entryProc != null || !op.isInvoke() : "entryProc=" + entryProc + ", op=" + op;
GridDhtCacheEntry entry = dht.entryExx(key, tx.topologyVersion());
GridCacheUpdateTxResult updRes;
while (true) {
ctx.shared().database().checkpointReadLock();
try {
if (entries == null) {
switch(op) {
case DELETE:
updRes = entry.mvccRemove(tx, ctx.localNodeId(), tx.topologyVersion(), snapshot, false, needOldVal, null, false);
break;
case INSERT:
case TRANSFORM:
case UPSERT:
case UPDATE:
updRes = entry.mvccSet(tx, ctx.localNodeId(), val, entryProc, invokeArgs, 0, tx.topologyVersion(), snapshot, op.cacheOperation(), false, false, needOldVal, null, false, false);
break;
default:
throw new IgniteSQLException("Cannot acquire lock for operation [op= " + op + "]" + "Operation is unsupported at the moment ", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
}
} else {
updRes = entry.mvccUpdateRowsWithPreloadInfo(tx, ctx.localNodeId(), tx.topologyVersion(), entries.infos(), op.cacheOperation(), snapshot, futId, batchNum);
}
break;
} catch (GridCacheEntryRemovedException ignore) {
entry = dht.entryExx(key);
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
if (!updRes.filtered())
ctx.shared().mvccCaching().addEnlisted(key, updRes.newValue(), 0, 0, tx.xidVersion(), updRes.oldValue(), tx.local(), tx.topologyVersion(), snapshot, ctx.cacheId(), tx, futId, batchNum);
assert updRes.updateFuture() == null : "Entry should not be locked on the backup";
} finally {
locPart.release();
}
} else
tx.addInvalidPartition(ctx.cacheId(), part);
} catch (GridDhtInvalidPartitionException e) {
tx.addInvalidPartition(ctx.cacheId(), e.partition());
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class IgniteTxHandler method startRemoteTx.
/**
* @param nodeId Node ID.
* @param req Request.
* @param res Response.
* @return Remote transaction.
* @throws IgniteCheckedException If failed.
*/
@Nullable
GridDhtTxRemote startRemoteTx(UUID nodeId, GridDhtTxPrepareRequest req, GridDhtTxPrepareResponse res) throws IgniteCheckedException {
if (req.queryUpdate() || !F.isEmpty(req.writes())) {
GridDhtTxRemote tx = ctx.tm().tx(req.version());
if (tx == null) {
boolean single = req.last() && req.writes().size() == 1;
tx = new GridDhtTxRemote(ctx, req.nearNodeId(), req.futureId(), nodeId, req.topologyVersion(), req.version(), null, req.system(), req.policy(), req.concurrency(), req.isolation(), req.isInvalidate(), req.timeout(), req.writes() != null ? Math.max(req.writes().size(), req.txSize()) : req.txSize(), req.nearXidVersion(), req.transactionNodes(), securitySubjectId(ctx), req.taskNameHash(), single, req.storeWriteThrough(), req.txLabel());
tx.onePhaseCommit(req.onePhaseCommit());
tx.writeVersion(req.writeVersion());
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !ctx.tm().onStarted(tx)) {
if (log.isDebugEnabled())
log.debug("Attempt to start a completed transaction (will ignore): " + tx);
applyPartitionsUpdatesCounters(req.updateCounters(), true, false);
return null;
}
if (ctx.discovery().node(nodeId) == null) {
tx.state(ROLLING_BACK);
tx.state(ROLLED_BACK);
ctx.tm().uncommitTx(tx);
applyPartitionsUpdatesCounters(req.updateCounters(), true, false);
return null;
}
ctx.versions().onReceived(nodeId, req.writeVersion());
} else {
tx.writeVersion(req.writeVersion());
tx.transactionNodes(req.transactionNodes());
}
TxCounters txCounters = null;
if (req.updateCounters() != null) {
txCounters = tx.txCounters(true);
txCounters.updateCounters(req.updateCounters());
}
Set<GridDhtLocalPartition> reservedParts = new HashSet<>();
try {
if (!tx.isSystemInvalidate()) {
int idx = 0;
for (IgniteTxEntry entry : req.writes()) {
GridCacheContext cacheCtx = entry.context();
int part = cacheCtx.affinity().partition(entry.key());
try {
GridDhtLocalPartition locPart = cacheCtx.topology().localPartition(part, req.topologyVersion(), false);
// Avoid enlisting to invalid partition.
boolean reserved = locPart != null && reservedParts.contains(locPart);
if (!reserved) {
if ((reserved = locPart != null && locPart.reserve()))
reservedParts.add(locPart);
}
if (reserved) {
tx.addWrite(entry, ctx.deploy().globalLoader());
if (txCounters != null) {
Long cntr = txCounters.generateNextCounter(entry.cacheId(), part);
if (// Counter is null if entry is no-op.
cntr != null)
entry.updateCounter(cntr);
}
if (isNearEnabled(cacheCtx) && req.invalidateNearEntry(idx))
invalidateNearEntry(cacheCtx, entry.key(), req.version());
if (req.needPreloadKey(idx)) {
GridCacheEntryEx cached = entry.cached();
if (cached == null)
cached = cacheCtx.cache().entryEx(entry.key(), req.topologyVersion());
GridCacheEntryInfo info = cached.info();
if (info != null && !info.isNew() && !info.isDeleted())
res.addPreloadEntry(info);
}
if (cacheCtx.readThroughConfigured() && !entry.skipStore() && entry.op() == TRANSFORM && entry.oldValueOnPrimary() && !entry.hasValue()) {
while (true) {
try {
GridCacheEntryEx cached = entry.cached();
if (cached == null) {
cached = cacheCtx.cache().entryEx(entry.key(), req.topologyVersion());
entry.cached(cached);
}
CacheObject val = cached.innerGet(/*ver*/
null, tx, /*readThrough*/
false, /*updateMetrics*/
false, /*evt*/
false, /*transformClo*/
null, tx.resolveTaskName(), /*expiryPlc*/
null, /*keepBinary*/
true);
if (val == null)
val = cacheCtx.toCacheObject(cacheCtx.store().load(null, entry.key()));
if (val != null)
entry.readValue(val);
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got entry removed exception, will retry: " + entry.txKey());
entry.cached(cacheCtx.cache().entryEx(entry.key(), req.topologyVersion()));
}
}
}
} else
tx.addInvalidPartition(cacheCtx.cacheId(), part);
} catch (GridDhtInvalidPartitionException e) {
tx.addInvalidPartition(cacheCtx.cacheId(), part);
}
idx++;
}
}
// Prepare prior to reordering, so the pending locks added
// in prepare phase will get properly ordered as well.
tx.prepareRemoteTx();
} finally {
reservedParts.forEach(GridDhtLocalPartition::release);
}
if (req.last()) {
assert !F.isEmpty(req.transactionNodes()) : "Received last prepare request with empty transaction nodes: " + req;
tx.state(PREPARED);
}
res.invalidPartitionsByCacheId(tx.invalidPartitions());
if (!req.queryUpdate() && tx.empty() && req.last()) {
tx.skipCompletedVersions(req.skipCompletedVersion());
tx.rollbackRemoteTx();
return null;
}
return tx;
}
return null;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException in project ignite by apache.
the class IgniteTxManager method txUnlock.
/**
* @param tx Transaction.
* @param txEntry Entry to unlock.
*/
private void txUnlock(IgniteInternalTx tx, IgniteTxEntry txEntry) {
while (true) {
try {
GridCacheEntryEx entry = txEntry.cached();
assert entry != null;
if (entry.detached())
break;
entry.txUnlock(tx);
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in TM txUnlock(..) method (will retry): " + txEntry);
try {
txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), tx.topologyVersion()));
} catch (GridDhtInvalidPartitionException e) {
// Ignore and proceed to next lock.
return;
}
}
}
}
Aggregations