use of org.apache.ignite.internal.processors.cache.GridCacheEntryEx in project ignite by apache.
the class GridPartitionedSingleGetFuture method localGet.
/**
* @param topVer Topology version.
* @param part Partition.
* @return {@code True} if future completed.
*/
private boolean localGet(AffinityTopologyVersion topVer, int part) {
assert cctx.affinityNode() : this;
GridDhtCacheAdapter colocated = cctx.dht();
boolean readNoEntry = cctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
while (true) {
try {
CacheObject v = null;
GridCacheVersion ver = null;
boolean skipEntry = readNoEntry;
if (readNoEntry) {
CacheDataRow row = cctx.offheap().read(cctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
v = row.value();
if (needVer)
ver = row.version();
if (evt) {
cctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
skipEntry = false;
}
}
if (!skipEntry) {
GridCacheEntryEx entry = colocated.entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
if (needVer) {
EntryGetResult res = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, true, null);
if (res != null) {
v = res.value();
ver = res.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, true);
}
colocated.context().evicts().touch(entry, topVer);
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(ver))
colocated.removeEntry(entry);
}
}
}
if (v != null) {
if (!skipVals && cctx.statisticsEnabled())
cctx.cache().metrics0().onRead(true);
if (!skipVals)
setResult(v, ver);
else
setSkipValueResult(true, ver);
return true;
}
boolean topStable = cctx.isReplicated() || topVer.equals(cctx.topology().lastTopologyChangeVersion());
// Entry not found, complete future with null result if topology did not change and there is no store.
if (!cctx.readThroughConfigured() && (topStable || partitionOwned(part))) {
if (!skipVals && cctx.statisticsEnabled())
colocated.metrics0().onRead(false);
if (skipVals)
setSkipValueResult(false, null);
else
setResult(null, null);
return true;
}
return false;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, will retry.
} catch (GridDhtInvalidPartitionException ignored) {
return false;
} catch (IgniteCheckedException e) {
onDone(e);
return true;
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryEx in project ignite by apache.
the class GridDhtColocatedCache method removeLocks.
/**
* Removes locks regardless of whether they are owned or not for given
* version and keys.
*
* @param threadId Thread ID.
* @param ver Lock version.
* @param keys Keys.
*/
public void removeLocks(long threadId, GridCacheVersion ver, Collection<KeyCacheObject> keys) {
if (keys.isEmpty())
return;
try {
int keyCnt = -1;
Map<ClusterNode, GridNearUnlockRequest> map = null;
Collection<KeyCacheObject> locKeys = new LinkedList<>();
for (KeyCacheObject key : keys) {
IgniteTxKey txKey = ctx.txKey(key);
GridCacheMvccCandidate lock = ctx.mvcc().removeExplicitLock(threadId, txKey, ver);
if (lock != null) {
AffinityTopologyVersion topVer = lock.topologyVersion();
if (map == null) {
Collection<ClusterNode> affNodes = CU.affinityNodes(ctx, topVer);
keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());
map = U.newHashMap(affNodes.size());
}
ClusterNode primary = ctx.affinity().primaryByKey(key, topVer);
if (primary == null) {
if (log.isDebugEnabled())
log.debug("Failed to remove locks (all partition nodes left the grid).");
continue;
}
if (!primary.isLocal()) {
// Send request to remove from remote nodes.
GridNearUnlockRequest req = map.get(primary);
if (req == null) {
map.put(primary, req = new GridNearUnlockRequest(ctx.cacheId(), keyCnt, ctx.deploymentEnabled()));
req.version(ver);
}
GridCacheEntryEx entry = peekEx(key);
KeyCacheObject key0 = entry != null ? entry.key() : key;
req.addKey(key0, ctx);
} else
locKeys.add(key);
}
}
if (!locKeys.isEmpty())
removeLocks(ctx.localNodeId(), ver, locKeys, true);
if (map == null || map.isEmpty())
return;
IgnitePair<Collection<GridCacheVersion>> versPair = ctx.tm().versions(ver);
Collection<GridCacheVersion> committed = versPair.get1();
Collection<GridCacheVersion> rolledback = versPair.get2();
for (Map.Entry<ClusterNode, GridNearUnlockRequest> mapping : map.entrySet()) {
ClusterNode n = mapping.getKey();
GridDistributedUnlockRequest req = mapping.getValue();
if (!F.isEmpty(req.keys())) {
req.completedVersions(committed, rolledback);
try {
// We don't wait for reply to this message.
ctx.io().send(n, req, ctx.ioPolicy());
} catch (ClusterTopologyCheckedException e) {
if (log.isDebugEnabled())
log.debug("Failed to send unlock request (node has left the grid) [keys=" + req.keys() + ", n=" + n + ", e=" + e + ']');
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send unlock request [keys=" + req.keys() + ", n=" + n + ']', e);
}
}
}
} catch (IgniteCheckedException ex) {
U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryEx in project ignite by apache.
the class GridDhtForceKeysFuture method map.
/**
* @param key Key.
* @param exc Exclude nodes.
* @param mappings Mappings.
* @return Mappings.
*/
private Map<ClusterNode, Set<KeyCacheObject>> map(KeyCacheObject key, @Nullable Map<ClusterNode, Set<KeyCacheObject>> mappings, Collection<ClusterNode> exc) {
ClusterNode loc = cctx.localNode();
GridCacheEntryEx e = cctx.dht().peekEx(key);
try {
if (e != null && !e.isNewLocked()) {
if (log.isDebugEnabled()) {
int part = cctx.affinity().partition(key);
log.debug("Will not rebalance key (entry is not new) [cacheName=" + cctx.name() + ", key=" + key + ", part=" + part + ", locId=" + cctx.nodeId() + ']');
}
// Key has been rebalanced or retrieved already.
return mappings;
}
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Received removed DHT entry for force keys request [entry=" + e + ", locId=" + cctx.nodeId() + ']');
}
int part = cctx.affinity().partition(key);
List<ClusterNode> owners = F.isEmpty(exc) ? top.owners(part, topVer) : new ArrayList<>(F.view(top.owners(part, topVer), F.notIn(exc)));
if (owners.isEmpty() || (owners.contains(loc) && cctx.rebalanceEnabled())) {
if (log.isDebugEnabled())
log.debug("Will not rebalance key (local node is owner) [key=" + key + ", part=" + part + "topVer=" + topVer + ", locId=" + cctx.nodeId() + ']');
// Key is already rebalanced.
return mappings;
}
// Create partition.
GridDhtLocalPartition locPart = top.localPartition(part, topVer, false);
if (log.isDebugEnabled())
log.debug("Mapping local partition [loc=" + cctx.localNodeId() + ", topVer" + topVer + ", part=" + locPart + ", owners=" + owners + ", allOwners=" + U.toShortString(top.owners(part)) + ']');
if (locPart == null)
invalidParts.add(part);
else if (!cctx.rebalanceEnabled() || locPart.state() == MOVING) {
Collections.sort(owners, CU.nodeComparator(false));
// Load from youngest owner.
ClusterNode pick = F.first(owners);
assert pick != null;
if (!cctx.rebalanceEnabled() && loc.id().equals(pick.id()))
pick = F.first(F.view(owners, F.remoteNodes(loc.id())));
if (pick == null) {
if (log.isDebugEnabled())
log.debug("Will not rebalance key (no nodes to request from with rebalancing disabled) [key=" + key + ", part=" + part + ", locId=" + cctx.nodeId() + ']');
return mappings;
}
if (mappings == null)
mappings = U.newHashMap(keys.size());
Collection<KeyCacheObject> mappedKeys = F.addIfAbsent(mappings, pick, F.<KeyCacheObject>newSet());
assert mappedKeys != null;
mappedKeys.add(key);
if (log.isDebugEnabled())
log.debug("Will rebalance key from node [cacheName=" + cctx.name() + ", key=" + key + ", part=" + part + ", node=" + pick.id() + ", locId=" + cctx.nodeId() + ']');
} else if (locPart.state() != OWNING)
invalidParts.add(part);
else {
if (log.isDebugEnabled())
log.debug("Will not rebalance key (local partition is not MOVING) [cacheName=" + cctx.name() + ", key=" + key + ", part=" + locPart + ", locId=" + cctx.nodeId() + ']');
}
return mappings;
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryEx in project ignite by apache.
the class GridDhtPartitionDemander method preloadEntry.
/**
* Adds {@code entry} to partition {@code p}.
*
* @param from Node which sent entry.
* @param p Partition id.
* @param entry Preloaded entry.
* @param topVer Topology version.
* @return {@code False} if partition has become invalid during preloading.
* @throws IgniteInterruptedCheckedException If interrupted.
*/
private boolean preloadEntry(ClusterNode from, int p, GridCacheEntryInfo entry, AffinityTopologyVersion topVer) throws IgniteCheckedException {
ctx.database().checkpointReadLock();
try {
GridCacheEntryEx cached = null;
try {
GridCacheContext cctx = grp.sharedGroup() ? ctx.cacheContext(entry.cacheId()) : grp.singleCacheContext();
cached = cctx.dhtCache().entryEx(entry.key());
if (log.isDebugEnabled())
log.debug("Rebalancing key [key=" + entry.key() + ", part=" + p + ", node=" + from.id() + ']');
cctx.shared().database().checkpointReadLock();
try {
if (preloadPred == null || preloadPred.apply(entry)) {
if (cached.initialValue(entry.value(), entry.version(), entry.ttl(), entry.expireTime(), true, topVer, cctx.isDrEnabled() ? DR_PRELOAD : DR_NONE, false)) {
// Start tracking.
cctx.evicts().touch(cached, topVer);
if (cctx.events().isRecordable(EVT_CACHE_REBALANCE_OBJECT_LOADED) && !cached.isInternal())
cctx.events().addEvent(cached.partition(), cached.key(), cctx.localNodeId(), (IgniteUuid) null, null, EVT_CACHE_REBALANCE_OBJECT_LOADED, entry.value(), true, null, false, null, null, null, true);
} else {
// Start tracking.
cctx.evicts().touch(cached, topVer);
if (log.isDebugEnabled())
log.debug("Rebalancing entry is already in cache (will ignore) [key=" + cached.key() + ", part=" + p + ']');
}
} else if (log.isDebugEnabled())
log.debug("Rebalance predicate evaluated to false for entry (will ignore): " + entry);
} finally {
cctx.shared().database().checkpointReadUnlock();
}
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Entry has been concurrently removed while rebalancing (will ignore) [key=" + cached.key() + ", part=" + p + ']');
} catch (GridDhtInvalidPartitionException ignored) {
if (log.isDebugEnabled())
log.debug("Partition became invalid during rebalancing (will ignore): " + p);
return false;
}
} catch (IgniteInterruptedCheckedException e) {
throw e;
} catch (IgniteCheckedException e) {
throw new IgniteCheckedException("Failed to cache rebalanced entry (will stop rebalancing) [local=" + ctx.localNode() + ", node=" + from.id() + ", key=" + entry.key() + ", part=" + p + ']', e);
} finally {
ctx.database().checkpointReadUnlock();
}
return true;
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryEx in project ignite by apache.
the class IgniteTxHandler method startRemoteTx.
/**
* @param nodeId Node ID.
* @param req Request.
* @param res Response.
* @return Remote transaction.
* @throws IgniteCheckedException If failed.
*/
@Nullable
GridDhtTxRemote startRemoteTx(UUID nodeId, GridDhtTxPrepareRequest req, GridDhtTxPrepareResponse res) throws IgniteCheckedException {
if (!F.isEmpty(req.writes())) {
GridDhtTxRemote tx = ctx.tm().tx(req.version());
if (tx == null) {
boolean single = req.last() && req.writes().size() == 1;
tx = new GridDhtTxRemote(ctx, req.nearNodeId(), req.futureId(), nodeId, req.topologyVersion(), req.version(), null, req.system(), req.policy(), req.concurrency(), req.isolation(), req.isInvalidate(), req.timeout(), req.writes() != null ? Math.max(req.writes().size(), req.txSize()) : req.txSize(), req.nearXidVersion(), req.transactionNodes(), req.subjectId(), req.taskNameHash(), single, req.storeWriteThrough());
tx.writeVersion(req.writeVersion());
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !ctx.tm().onStarted(tx)) {
if (log.isDebugEnabled())
log.debug("Attempt to start a completed transaction (will ignore): " + tx);
return null;
}
if (ctx.discovery().node(nodeId) == null) {
tx.state(ROLLING_BACK);
tx.state(ROLLED_BACK);
ctx.tm().uncommitTx(tx);
return null;
}
} else {
tx.writeVersion(req.writeVersion());
tx.transactionNodes(req.transactionNodes());
}
if (!tx.isSystemInvalidate()) {
int idx = 0;
for (IgniteTxEntry entry : req.writes()) {
GridCacheContext cacheCtx = entry.context();
int part = cacheCtx.affinity().partition(entry.key());
GridDhtLocalPartition locPart = cacheCtx.topology().localPartition(part, req.topologyVersion(), false);
if (locPart != null && locPart.reserve()) {
try {
tx.addWrite(entry, ctx.deploy().globalLoader());
if (isNearEnabled(cacheCtx) && req.invalidateNearEntry(idx))
invalidateNearEntry(cacheCtx, entry.key(), req.version());
if (req.needPreloadKey(idx)) {
GridCacheEntryEx cached = entry.cached();
if (cached == null)
cached = cacheCtx.cache().entryEx(entry.key(), req.topologyVersion());
GridCacheEntryInfo info = cached.info();
if (info != null && !info.isNew() && !info.isDeleted())
res.addPreloadEntry(info);
}
if (cacheCtx.readThroughConfigured() && !entry.skipStore() && entry.op() == TRANSFORM && entry.oldValueOnPrimary() && !entry.hasValue()) {
while (true) {
try {
GridCacheEntryEx cached = entry.cached();
if (cached == null) {
cached = cacheCtx.cache().entryEx(entry.key(), req.topologyVersion());
entry.cached(cached);
}
CacheObject val = cached.innerGet(/*ver*/
null, tx, /*readThrough*/
false, /*updateMetrics*/
false, /*evt*/
false, tx.subjectId(), /*transformClo*/
null, tx.resolveTaskName(), /*expiryPlc*/
null, /*keepBinary*/
true);
if (val == null)
val = cacheCtx.toCacheObject(cacheCtx.store().load(null, entry.key()));
if (val != null)
entry.readValue(val);
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got entry removed exception, will retry: " + entry.txKey());
entry.cached(cacheCtx.cache().entryEx(entry.key(), req.topologyVersion()));
}
}
}
} catch (GridDhtInvalidPartitionException e) {
tx.addInvalidPartition(cacheCtx, e.partition());
tx.clearEntry(entry.txKey());
} finally {
locPart.release();
}
} else
tx.addInvalidPartition(cacheCtx, part);
idx++;
}
}
// Prepare prior to reordering, so the pending locks added
// in prepare phase will get properly ordered as well.
tx.prepareRemoteTx();
if (req.last()) {
assert !F.isEmpty(req.transactionNodes()) : "Received last prepare request with empty transaction nodes: " + req;
tx.state(PREPARED);
}
res.invalidPartitionsByCacheId(tx.invalidPartitions());
if (tx.empty() && req.last()) {
tx.rollbackRemoteTx();
return null;
}
return tx;
}
return null;
}
Aggregations