use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridDistributedCacheEntry method removeLock.
/**
* Unlocks local lock.
*
* @return Removed candidate, or <tt>null</tt> if thread still holds the lock.
*/
@Nullable
public GridCacheMvccCandidate removeLock() {
GridCacheMvccCandidate rmvd = null;
CacheLockCandidates prev = null;
CacheLockCandidates owner = null;
CacheObject val;
lockEntry();
try {
GridCacheMvcc mvcc = mvccExtras();
if (mvcc != null) {
prev = mvcc.allOwners();
boolean emptyBefore = mvcc.isEmpty();
rmvd = mvcc.releaseLocal();
boolean emptyAfter = mvcc.isEmpty();
checkCallbacks(emptyBefore, emptyAfter);
if (emptyAfter)
mvccExtras(null);
else
owner = mvcc.allOwners();
}
val = this.val;
} finally {
unlockEntry();
}
if (log.isDebugEnabled()) {
log.debug("Released local candidate from entry [owner=" + owner + ", prev=" + prev + ", rmvd=" + rmvd + ", entry=" + this + ']');
}
if (prev != null) {
for (int i = 0; i < prev.size(); i++) {
GridCacheMvccCandidate cand = prev.candidate(i);
checkThreadChain(cand);
}
}
// This call must be outside of synchronization.
checkOwnerChanged(prev, owner, val);
return rmvd;
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridDistributedTxRemoteAdapter method commitIfLocked.
/**
* @throws IgniteCheckedException If commit failed.
*/
@SuppressWarnings({ "CatchGenericClass" })
private void commitIfLocked() throws IgniteCheckedException {
if (state() == COMMITTING) {
for (IgniteTxEntry txEntry : writeEntries()) {
assert txEntry != null : "Missing transaction entry for tx: " + this;
while (true) {
GridCacheEntryEx entry = txEntry.cached();
assert entry != null : "Missing cached entry for transaction entry: " + txEntry;
try {
GridCacheVersion ver = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer;
// If locks haven't been acquired yet, keep waiting.
if (!entry.lockedBy(ver)) {
if (log.isDebugEnabled())
log.debug("Transaction does not own lock for entry (will wait) [entry=" + entry + ", tx=" + this + ']');
return;
}
// While.
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry while committing (will retry): " + txEntry);
txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), topologyVersion()));
}
}
}
// Only one thread gets to commit.
if (COMMIT_ALLOWED_UPD.compareAndSet(this, 0, 1)) {
IgniteCheckedException err = null;
Map<IgniteTxKey, IgniteTxEntry> writeMap = txState.writeMap();
GridCacheReturnCompletableWrapper wrapper = null;
if (!F.isEmpty(writeMap)) {
GridCacheReturn ret = null;
if (!near() && !local() && onePhaseCommit()) {
if (needReturnValue()) {
ret = new GridCacheReturn(null, cctx.localNodeId().equals(otherNodeId()), true, null, true);
// Originating node.
UUID origNodeId = otherNodeId();
cctx.tm().addCommittedTxReturn(this, wrapper = new GridCacheReturnCompletableWrapper(!cctx.localNodeId().equals(origNodeId) ? origNodeId : null));
} else
cctx.tm().addCommittedTx(this, this.nearXidVersion(), null);
}
// Register this transaction as completed prior to write-phase to
// ensure proper lock ordering for removed entries.
cctx.tm().addCommittedTx(this);
AffinityTopologyVersion topVer = topologyVersion();
WALPointer ptr = null;
cctx.database().checkpointReadLock();
try {
Collection<IgniteTxEntry> entries = near() || cctx.snapshot().needTxReadLogging() ? allEntries() : writeEntries();
List<DataEntry> dataEntries = null;
batchStoreCommit(writeMap().values());
try {
// Node that for near transactions we grab all entries.
for (IgniteTxEntry txEntry : entries) {
GridCacheContext cacheCtx = txEntry.context();
boolean replicate = cacheCtx.isDrEnabled();
try {
while (true) {
try {
GridCacheEntryEx cached = txEntry.cached();
if (cached == null)
txEntry.cached(cached = cacheCtx.cache().entryEx(txEntry.key(), topologyVersion()));
if (near() && cacheCtx.dr().receiveEnabled()) {
cached.markObsolete(xidVer);
break;
}
GridNearCacheEntry nearCached = null;
if (updateNearCache(cacheCtx, txEntry.key(), topVer))
nearCached = cacheCtx.dht().near().peekExx(txEntry.key());
if (!F.isEmpty(txEntry.entryProcessors()))
txEntry.cached().unswap(false);
IgniteBiTuple<GridCacheOperation, CacheObject> res = applyTransformClosures(txEntry, false, ret);
GridCacheOperation op = res.get1();
CacheObject val = res.get2();
GridCacheVersion explicitVer = txEntry.conflictVersion();
if (explicitVer == null)
explicitVer = writeVersion();
if (txEntry.ttl() == CU.TTL_ZERO)
op = DELETE;
boolean conflictNeedResolve = cacheCtx.conflictNeedResolve();
GridCacheVersionConflictContext conflictCtx = null;
if (conflictNeedResolve) {
IgniteBiTuple<GridCacheOperation, GridCacheVersionConflictContext> drRes = conflictResolve(op, txEntry, val, explicitVer, cached);
assert drRes != null;
conflictCtx = drRes.get2();
if (conflictCtx.isUseOld())
op = NOOP;
else if (conflictCtx.isUseNew()) {
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
} else if (conflictCtx.isMerge()) {
op = drRes.get1();
val = txEntry.context().toCacheObject(conflictCtx.mergeValue());
explicitVer = writeVersion();
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
}
} else
// Nullify explicit version so that innerSet/innerRemove will work as usual.
explicitVer = null;
GridCacheVersion dhtVer = cached.isNear() ? writeVersion() : null;
if (!near() && cacheCtx.group().persistenceEnabled() && cacheCtx.group().walEnabled() && op != NOOP && op != RELOAD && (op != READ || cctx.snapshot().needTxReadLogging())) {
if (dataEntries == null)
dataEntries = new ArrayList<>(entries.size());
dataEntries.add(new DataEntry(cacheCtx.cacheId(), txEntry.key(), val, op, nearXidVersion(), writeVersion(), 0, txEntry.key().partition(), txEntry.updateCounter()));
}
if (op == CREATE || op == UPDATE) {
// Invalidate only for near nodes (backups cannot be invalidated).
if (isSystemInvalidate() || (isInvalidate() && cacheCtx.isNear()))
cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter());
else {
assert val != null : txEntry;
GridCacheUpdateTxResult updRes = cached.innerSet(this, eventNodeId(), nodeId, val, false, false, txEntry.ttl(), true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, txEntry.conflictExpireTime(), near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
// Keep near entry up to date.
if (nearCached != null) {
CacheObject val0 = cached.valueBytes();
nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
}
} else if (op == DELETE) {
GridCacheUpdateTxResult updRes = cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
// Keep near entry up to date.
if (nearCached != null)
nearCached.updateOrEvict(xidVer, null, 0, 0, nodeId, topVer);
} else if (op == RELOAD) {
CacheObject reloaded = cached.innerReload();
if (nearCached != null) {
nearCached.innerReload();
nearCached.updateOrEvict(cached.version(), reloaded, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
} else if (op == READ) {
assert near();
if (log.isDebugEnabled())
log.debug("Ignoring READ entry when committing: " + txEntry);
} else // No-op.
{
if (conflictCtx == null || !conflictCtx.isUseOld()) {
if (txEntry.ttl() != CU.TTL_NOT_CHANGED)
cached.updateTtl(null, txEntry.ttl());
if (nearCached != null) {
CacheObject val0 = cached.valueBytes();
nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
}
}
// that if we replaced removed entries.
assert txEntry.op() == READ || onePhaseCommit() || // and we simply allow the commit to proceed.
!cached.hasLockCandidateUnsafe(xidVer) || cached.lockedByUnsafe(xidVer) : "Transaction does not own lock for commit [entry=" + cached + ", tx=" + this + ']';
// Break out of while loop.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Attempting to commit a removed entry (will retry): " + txEntry);
// Renew cached entry.
txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), topologyVersion()));
}
}
} catch (Throwable ex) {
boolean nodeStopping = X.hasCause(ex, NodeStoppingException.class);
// In case of error, we still make the best effort to commit,
// as there is no way to rollback at this point.
err = new IgniteTxHeuristicCheckedException("Commit produced a runtime exception " + "(all transaction entries will be invalidated): " + CU.txString(this), ex);
if (nodeStopping) {
U.warn(log, "Failed to commit transaction, node is stopping [tx=" + this + ", err=" + ex + ']');
} else
U.error(log, "Commit failed.", err);
uncommit(nodeStopping);
state(UNKNOWN);
if (ex instanceof Error)
throw (Error) ex;
}
}
if (!near() && !F.isEmpty(dataEntries) && cctx.wal() != null)
cctx.wal().log(new DataRecord(dataEntries));
if (ptr != null && !cctx.tm().logTxRecords())
cctx.wal().fsync(ptr);
} catch (StorageException e) {
throw new IgniteCheckedException("Failed to log transaction record " + "(transaction will be rolled back): " + this, e);
}
} finally {
cctx.database().checkpointReadUnlock();
if (wrapper != null)
wrapper.initialize(ret);
}
}
if (err != null) {
state(UNKNOWN);
throw err;
}
cctx.tm().commitTx(this);
state(COMMITTED);
}
}
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridDhtTxPrepareFuture method onEntriesLocked.
/**
*/
private void onEntriesLocked() {
ret = new GridCacheReturn(null, tx.localResult(), true, null, true);
for (IgniteTxEntry writeEntry : req.writes()) {
IgniteTxEntry txEntry = tx.entry(writeEntry.txKey());
assert txEntry != null : writeEntry;
GridCacheContext cacheCtx = txEntry.context();
GridCacheEntryEx cached = txEntry.cached();
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry);
cctx.database().checkpointReadLock();
try {
if ((txEntry.op() == CREATE || txEntry.op() == UPDATE) && txEntry.conflictExpireTime() == CU.EXPIRE_TIME_CALCULATE) {
if (expiry != null) {
cached.unswap(true);
Duration duration = cached.hasValue() ? expiry.getExpiryForUpdate() : expiry.getExpiryForCreation();
txEntry.ttl(CU.toTtl(duration));
}
}
boolean hasFilters = !F.isEmptyOrNulls(txEntry.filters()) && !F.isAlwaysTrue(txEntry.filters());
CacheObject val;
CacheObject oldVal = null;
boolean readOld = hasFilters || retVal || txEntry.op() == DELETE || txEntry.op() == TRANSFORM || tx.nearOnOriginatingNode() || tx.hasInterceptor();
if (readOld) {
boolean readThrough = !txEntry.skipStore() && (txEntry.op() == TRANSFORM || ((retVal || hasFilters) && cacheCtx.config().isLoadPreviousValue()));
boolean evt = retVal || txEntry.op() == TRANSFORM;
EntryProcessor entryProc = null;
if (evt && txEntry.op() == TRANSFORM)
entryProc = F.first(txEntry.entryProcessors()).get1();
final boolean keepBinary = txEntry.keepBinary();
val = oldVal = cached.innerGet(null, tx, readThrough, /*metrics*/
retVal, /*event*/
evt, tx.subjectId(), entryProc, tx.resolveTaskName(), null, keepBinary);
if (retVal || txEntry.op() == TRANSFORM) {
if (!F.isEmpty(txEntry.entryProcessors())) {
invoke = true;
if (txEntry.hasValue())
val = txEntry.value();
KeyCacheObject key = txEntry.key();
Object procRes = null;
Exception err = null;
boolean modified = false;
txEntry.oldValueOnPrimary(val != null);
for (T2<EntryProcessor<Object, Object, Object>, Object[]> t : txEntry.entryProcessors()) {
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry<>(key, val, txEntry.cached().version(), keepBinary, txEntry.cached());
try {
EntryProcessor<Object, Object, Object> processor = t.get1();
procRes = processor.process(invokeEntry, t.get2());
val = cacheCtx.toCacheObject(invokeEntry.getValue(true));
if (// no validation for remove case
val != null)
cacheCtx.validateKeyAndValue(key, val);
} catch (Exception e) {
err = e;
break;
}
modified |= invokeEntry.modified();
}
if (modified)
val = cacheCtx.toCacheObject(cacheCtx.unwrapTemporary(val));
GridCacheOperation op = modified ? (val == null ? DELETE : UPDATE) : NOOP;
if (op == NOOP) {
if (expiry != null) {
long ttl = CU.toTtl(expiry.getExpiryForAccess());
txEntry.ttl(ttl);
if (ttl == CU.TTL_ZERO)
op = DELETE;
}
}
txEntry.entryProcessorCalculatedValue(new T2<>(op, op == NOOP ? null : val));
if (retVal) {
if (err != null || procRes != null)
ret.addEntryProcessResult(txEntry.context(), key, null, procRes, err, keepBinary);
else
ret.invokeResult(true);
}
} else if (retVal)
ret.value(cacheCtx, val, keepBinary);
}
if (hasFilters && !cacheCtx.isAll(cached, txEntry.filters())) {
if (expiry != null)
txEntry.ttl(CU.toTtl(expiry.getExpiryForAccess()));
txEntry.op(GridCacheOperation.NOOP);
if (filterFailedKeys == null)
filterFailedKeys = new ArrayList<>();
filterFailedKeys.add(cached.txKey());
ret.success(false);
} else
ret.success(txEntry.op() != DELETE || cached.hasValue());
}
// Send old value in case if rebalancing is not finished.
final boolean sndOldVal = !cacheCtx.isLocal() && !cacheCtx.topology().rebalanceFinished(tx.topologyVersion());
if (sndOldVal) {
if (oldVal == null && !readOld) {
oldVal = cached.innerGet(null, tx, /*readThrough*/
false, /*metrics*/
false, /*event*/
false, /*subjectId*/
tx.subjectId(), /*transformClo*/
null, /*taskName*/
null, /*expiryPlc*/
null, /*keepBinary*/
true);
}
if (oldVal != null)
oldVal.prepareMarshal(cacheCtx.cacheObjectContext());
txEntry.oldValue(oldVal);
}
} catch (IgniteCheckedException e) {
U.error(log, "Failed to get result value for cache entry: " + cached, e);
} catch (GridCacheEntryRemovedException e) {
assert false : "Got entry removed exception while holding transactional lock on entry [e=" + e + ", cached=" + cached + ']';
} finally {
cctx.database().checkpointReadUnlock();
}
}
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridPartitionedGetFuture method localGet.
/**
* @param topVer Topology version.
* @param key Key.
* @param part Partition.
* @param locVals Local values.
* @return {@code True} if there is no need to further search value.
*/
private boolean localGet(AffinityTopologyVersion topVer, KeyCacheObject key, int part, Map<K, V> locVals) {
assert cctx.affinityNode() : this;
GridDhtCacheAdapter<K, V> cache = cache();
boolean readNoEntry = cctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
while (true) {
try {
boolean skipEntry = readNoEntry;
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (readNoEntry) {
CacheDataRow row = cctx.offheap().read(cctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
v = row.value();
if (needVer)
ver = row.version();
if (evt) {
cctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
skipEntry = false;
}
}
if (!skipEntry) {
GridCacheEntryEx entry = cache.entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary);
}
cache.context().evicts().touch(entry, topVer);
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(ver))
cache.removeEntry(entry);
}
}
}
if (v != null) {
cctx.addResult(locVals, key, v, skipVals, keepCacheObjects, deserializeBinary, true, getRes, ver, 0, 0, needVer);
return true;
}
boolean topStable = cctx.isReplicated() || topVer.equals(cctx.topology().lastTopologyChangeVersion());
// Entry not found, do not continue search if topology did not change and there is no store.
if (!cctx.readThroughConfigured() && (topStable || partitionOwned(part))) {
if (!skipVals && cctx.statisticsEnabled())
cache.metrics0().onRead(false);
return true;
}
return false;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, will retry.
} catch (GridDhtInvalidPartitionException ignored) {
return false;
} catch (IgniteCheckedException e) {
onDone(e);
return true;
}
}
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridLocalAtomicCache method updateAllInternal.
/**
* Entry point for all public update methods (put, remove, invoke).
*
* @param op Operation.
* @param keys Keys.
* @param vals Values.
* @param invokeArgs Optional arguments for EntryProcessor.
* @param expiryPlc Expiry policy.
* @param retval Return value required flag.
* @param rawRetval Return {@code GridCacheReturn} instance.
* @param filter Cache entry filter.
* @param writeThrough Write through.
* @param readThrough Read through.
* @return Update result.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("unchecked")
private Object updateAllInternal(GridCacheOperation op, Collection<? extends K> keys, @Nullable Iterable<?> vals, @Nullable Object[] invokeArgs, @Nullable ExpiryPolicy expiryPlc, boolean retval, boolean rawRetval, CacheEntryPredicate filter, boolean writeThrough, boolean readThrough, boolean keepBinary) throws IgniteCheckedException {
if (keyCheck)
validateCacheKeys(keys);
if (op == DELETE)
ctx.checkSecurity(SecurityPermission.CACHE_REMOVE);
else
ctx.checkSecurity(SecurityPermission.CACHE_PUT);
String taskName = ctx.kernalContext().job().currentTaskName();
GridCacheVersion ver = ctx.versions().next();
UUID subjId = ctx.subjectIdPerCall(null);
CacheEntryPredicate[] filters = CU.filterArray(filter);
IgniteBiTuple<Boolean, ?> res = null;
CachePartialUpdateCheckedException err = null;
ctx.shared().database().checkpointReadLock();
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
if (writeThrough && keys.size() > 1) {
return updateWithBatch(op, keys, vals, invokeArgs, expiryPlc, ver, filters, keepBinary, subjId, taskName);
}
Iterator<?> valsIter = vals != null ? vals.iterator() : null;
boolean intercept = ctx.config().getInterceptor() != null;
for (K key : keys) {
if (key == null)
throw new NullPointerException("Null key.");
Object val = valsIter != null ? valsIter.next() : null;
if (val == null && op != DELETE)
throw new NullPointerException("Null value.");
KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
if (op == UPDATE) {
val = ctx.toCacheObject(val);
ctx.validateKeyAndValue(cacheKey, (CacheObject) val);
} else if (op == TRANSFORM)
ctx.kernalContext().resource().inject(val, GridResourceIoc.AnnotationSet.ENTRY_PROCESSOR, ctx.name());
while (true) {
GridCacheEntryEx entry = null;
try {
entry = entryEx(cacheKey);
GridTuple3<Boolean, Object, EntryProcessorResult<Object>> t = entry.innerUpdateLocal(ver, val == null ? DELETE : op, val, invokeArgs, writeThrough, readThrough, retval, keepBinary, expiryPlc, true, true, filters, intercept, subjId, taskName);
if (op == TRANSFORM) {
if (t.get3() != null) {
Map<K, EntryProcessorResult> computedMap;
if (res == null) {
computedMap = U.newHashMap(keys.size());
res = new IgniteBiTuple<>(true, computedMap);
} else
computedMap = (Map<K, EntryProcessorResult>) res.get2();
computedMap.put(key, t.get3());
}
} else if (res == null)
res = new T2(t.get1(), t.get2());
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry while updating (will retry): " + key);
entry = null;
} catch (IgniteCheckedException e) {
if (err == null)
err = partialUpdateException();
err.add(F.asList(key), e);
U.error(log, "Failed to update key : " + key, e);
break;
} finally {
if (entry != null)
ctx.evicts().touch(entry, ctx.affinity().affinityTopologyVersion());
}
}
}
} finally {
ctx.shared().database().checkpointReadUnlock();
}
if (err != null)
throw err;
Object ret = res == null ? null : rawRetval ? new GridCacheReturn(ctx, true, keepBinary, res.get2(), res.get1()) : (retval || op == TRANSFORM) ? res.get2() : res.get1();
if (op == TRANSFORM && ret == null)
ret = Collections.emptyMap();
return ret;
}
Aggregations