use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridDhtAtomicCache method updatePartialBatch.
/**
* @param hasNear {@code True} if originating node has near cache.
* @param firstEntryIdx Index of the first entry in the request keys collection.
* @param entries Entries to update.
* @param ver Version to set.
* @param nearNode Originating node.
* @param writeVals Write values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param entryProcessorMap Entry processors.
* @param dhtFut DHT update future if has backups.
* @param req Request.
* @param res Response.
* @param replicate Whether replication is enabled.
* @param batchRes Batch update result.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Deleted entries.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
@Nullable
private GridDhtAtomicAbstractUpdateFuture updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final UpdateBatchResult batchRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
assert putMap == null ^ rmvKeys == null;
assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
AffinityTopologyVersion topVer = req.topologyVersion();
boolean checkReaders = hasNear || ctx.discovery().hasNearCache(ctx.cacheId(), topVer);
CacheStorePartialUpdateException storeErr = null;
try {
GridCacheOperation op;
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
// Avoid iterator creation.
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
assert Thread.holdsLock(entry);
if (entry.obsolete()) {
assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
continue;
}
if (storeErr != null && storeErr.failedKeys().contains(entry.key().value(ctx.cacheObjectContext(), false)))
continue;
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
Collection<UUID> readers = null;
Collection<UUID> filteredReaders = null;
if (checkReaders) {
readers = entry.readers();
filteredReaders = F.view(entry.readers(), F.notEqualTo(nearNode.id()));
}
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
sndPrevVal, req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
false, /*intercept*/
false, req.subjectId(), taskName, null, null, dhtFut);
assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
if (intercept) {
if (op == UPDATE) {
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
} else {
assert op == DELETE : op;
// Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
}
}
batchRes.addDeleted(entry, updRes, entries);
if (dhtFut != null) {
EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
if (!F.isEmpty(filteredReaders))
dhtFut.addNearWriteEntries(filteredReaders, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
}
if (hasNear) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
int idx = firstEntryIdx + i;
if (req.operation() == TRANSFORM) {
res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
} else
res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
if (writeVal != null || entry.hasValue()) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (// Reader became primary or backup.
readers.contains(nearNode.id()))
entry.removeReader(nearNode.id(), req.messageId());
else
res.addSkippedIndex(firstEntryIdx + i);
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry cannot become obsolete while holding lock.";
e.printStackTrace();
}
}
} catch (IgniteCheckedException e) {
res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
}
if (storeErr != null) {
ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
res.addFailedKeys(failed, storeErr.getCause());
}
return dhtFut;
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class IgniteTxAdapter method conflictResolve.
/**
* Resolve DR conflict.
*
* @param op Initially proposed operation.
* @param txEntry TX entry being updated.
* @param newVal New value.
* @param newVer New version.
* @param old Old entry.
* @return Tuple with adjusted operation type and conflict context.
* @throws IgniteCheckedException In case of eny exception.
* @throws GridCacheEntryRemovedException If entry got removed.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
protected IgniteBiTuple<GridCacheOperation, GridCacheVersionConflictContext> conflictResolve(GridCacheOperation op, IgniteTxEntry txEntry, CacheObject newVal, GridCacheVersion newVer, GridCacheEntryEx old) throws IgniteCheckedException, GridCacheEntryRemovedException {
assert newVer != null;
// 1. Calculate TTL and expire time.
long newTtl = txEntry.ttl();
long newExpireTime = txEntry.conflictExpireTime();
// 1.1. If TTL is not changed, then calculate it based on expiry.
if (newTtl == CU.TTL_NOT_CHANGED) {
ExpiryPolicy expiry = txEntry.context().expiryForTxEntry(txEntry);
if (expiry != null) {
if (op == CREATE)
newTtl = CU.toTtl(expiry.getExpiryForCreation());
else if (op == UPDATE)
newTtl = CU.toTtl(expiry.getExpiryForUpdate());
}
}
// 1.2. If TTL is set to zero, then mark operation as "DELETE".
if (newTtl == CU.TTL_ZERO) {
op = DELETE;
newTtl = CU.TTL_ETERNAL;
}
// 1.3. If TTL is still not changed, then either use old entry TTL or set it to "ETERNAL".
if (newTtl == CU.TTL_NOT_CHANGED) {
if (old.isNewLocked())
newTtl = CU.TTL_ETERNAL;
else {
newTtl = old.rawTtl();
newExpireTime = old.rawExpireTime();
}
}
// TTL must be resolved at this point.
assert newTtl != CU.TTL_ZERO && newTtl != CU.TTL_NOT_CHANGED;
// 1.4 If expire time was not set explicitly, then calculate it.
if (newExpireTime == CU.EXPIRE_TIME_CALCULATE)
newExpireTime = CU.toExpireTime(newTtl);
// Expire time must be resolved at this point.
assert newExpireTime != CU.EXPIRE_TIME_CALCULATE;
// Construct old entry info.
GridCacheVersionedEntryEx oldEntry = old.versionedEntry(txEntry.keepBinary());
// Construct new entry info.
GridCacheContext entryCtx = txEntry.context();
GridCacheVersionedEntryEx newEntry = new GridCacheLazyPlainVersionedEntry(entryCtx, txEntry.key(), newVal, newTtl, newExpireTime, newVer, false, txEntry.keepBinary());
GridCacheVersionConflictContext ctx = old.context().conflictResolve(oldEntry, newEntry, false);
if (ctx.isMerge()) {
Object resVal = ctx.mergeValue();
if ((op == CREATE || op == UPDATE) && resVal == null)
op = DELETE;
else if (op == DELETE && resVal != null)
op = old.isNewLocked() ? CREATE : UPDATE;
}
return F.t(op, ctx);
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class IgniteTxAdapter method applyTransformClosures.
/**
* @param txEntry Entry to process.
* @param metrics {@code True} if metrics should be updated.
* @param ret Optional return value to initialize.
* @return Tuple containing transformation results.
* @throws IgniteCheckedException If failed to get previous value for transform.
* @throws GridCacheEntryRemovedException If entry was concurrently deleted.
*/
protected IgniteBiTuple<GridCacheOperation, CacheObject> applyTransformClosures(IgniteTxEntry txEntry, boolean metrics, @Nullable GridCacheReturn ret) throws GridCacheEntryRemovedException, IgniteCheckedException {
assert txEntry.op() != TRANSFORM || !F.isEmpty(txEntry.entryProcessors()) : txEntry;
GridCacheContext cacheCtx = txEntry.context();
assert cacheCtx != null;
if (isSystemInvalidate())
return F.t(cacheCtx.writeThrough() ? RELOAD : DELETE, null);
if (F.isEmpty(txEntry.entryProcessors())) {
if (ret != null)
ret.value(cacheCtx, txEntry.value(), txEntry.keepBinary());
return F.t(txEntry.op(), txEntry.value());
} else {
T2<GridCacheOperation, CacheObject> calcVal = txEntry.entryProcessorCalculatedValue();
if (calcVal != null)
return calcVal;
boolean recordEvt = cctx.gridEvents().isRecordable(EVT_CACHE_OBJECT_READ);
final boolean keepBinary = txEntry.keepBinary();
CacheObject cacheVal;
if (txEntry.hasValue())
cacheVal = txEntry.value();
else if (txEntry.hasOldValue())
cacheVal = txEntry.oldValue();
else {
cacheVal = txEntry.cached().innerGet(null, this, /*read through*/
false, /*metrics*/
metrics, /*event*/
recordEvt, /*subjId*/
subjId, /*closure name */
recordEvt ? F.first(txEntry.entryProcessors()).get1() : null, resolveTaskName(), null, keepBinary);
}
boolean modified = false;
Object val = null;
Object key = null;
GridCacheVersion ver;
try {
ver = txEntry.cached().version();
} catch (GridCacheEntryRemovedException e) {
assert optimistic() : txEntry;
if (log.isDebugEnabled())
log.debug("Failed to get entry version: [msg=" + e.getMessage() + ']');
ver = null;
}
for (T2<EntryProcessor<Object, Object, Object>, Object[]> t : txEntry.entryProcessors()) {
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry<>(txEntry.key(), key, cacheVal, val, ver, keepBinary, txEntry.cached());
Object procRes = null;
Exception err = null;
try {
EntryProcessor<Object, Object, Object> processor = t.get1();
procRes = processor.process(invokeEntry, t.get2());
val = invokeEntry.getValue();
key = invokeEntry.key();
} catch (Exception e) {
err = e;
}
if (ret != null) {
if (err != null || procRes != null)
ret.addEntryProcessResult(txEntry.context(), txEntry.key(), null, procRes, err, keepBinary);
else
ret.invokeResult(true);
}
modified |= invokeEntry.modified();
}
if (modified)
cacheVal = cacheCtx.toCacheObject(cacheCtx.unwrapTemporary(val));
GridCacheOperation op = modified ? (cacheVal == null ? DELETE : UPDATE) : NOOP;
if (op == NOOP) {
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry);
if (expiry != null) {
long ttl = CU.toTtl(expiry.getExpiryForAccess());
txEntry.ttl(ttl);
if (ttl == CU.TTL_ZERO)
op = DELETE;
}
}
return F.t(op, cacheVal);
}
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridBinaryAffinityKeySelfTest method checkAffinity.
/**
* @param ignite Ignite.
* @throws Exception If failed.
*/
private void checkAffinity(Ignite ignite) throws Exception {
Affinity<Object> aff = ignite.affinity(DEFAULT_CACHE_NAME);
GridAffinityProcessor affProc = ((IgniteKernal) ignite).context().affinity();
IgniteCacheObjectProcessor cacheObjProc = ((IgniteKernal) ignite).context().cacheObjects();
CacheObjectContext cacheObjCtx = cacheObjProc.contextForCache(ignite.cache(DEFAULT_CACHE_NAME).getConfiguration(CacheConfiguration.class));
for (int i = 0; i < 1000; i++) {
assertEquals(i, aff.affinityKey(i));
assertEquals(i, aff.affinityKey(new TestObject(i)));
assertEquals(i, aff.affinityKey(ignite.binary().toBinary(new TestObject(i))));
assertEquals(i, aff.affinityKey(new AffinityKey(0, i)));
BinaryObjectBuilder bldr = ignite.binary().builder("TestObject2");
bldr.setField("affKey", i);
assertEquals(i, aff.affinityKey(bldr.build()));
CacheObject cacheObj = cacheObjProc.toCacheObject(cacheObjCtx, new TestObject(i), true);
assertEquals(i, aff.affinityKey(cacheObj));
assertEquals(aff.mapKeyToNode(i), aff.mapKeyToNode(new TestObject(i)));
assertEquals(aff.mapKeyToNode(i), aff.mapKeyToNode(cacheObj));
assertEquals(i, affProc.affinityKey(DEFAULT_CACHE_NAME, i));
assertEquals(i, affProc.affinityKey(DEFAULT_CACHE_NAME, new TestObject(i)));
assertEquals(i, affProc.affinityKey(DEFAULT_CACHE_NAME, cacheObj));
assertEquals(affProc.mapKeyToNode(DEFAULT_CACHE_NAME, i), affProc.mapKeyToNode(DEFAULT_CACHE_NAME, new TestObject(i)));
assertEquals(affProc.mapKeyToNode(DEFAULT_CACHE_NAME, i), affProc.mapKeyToNode(DEFAULT_CACHE_NAME, cacheObj));
assertEquals(affProc.mapKeyToNode(DEFAULT_CACHE_NAME, new AffinityKey(0, i)), affProc.mapKeyToNode(DEFAULT_CACHE_NAME, i));
}
}
use of org.apache.ignite.internal.processors.cache.CacheObject in project ignite by apache.
the class GridDistributedCacheEntry method removeLock.
/**
* {@inheritDoc}
*/
@Override
public boolean removeLock(GridCacheVersion ver) throws GridCacheEntryRemovedException {
CacheLockCandidates prev = null;
CacheLockCandidates owner = null;
GridCacheMvccCandidate doomed;
CacheObject val;
lockEntry();
try {
GridCacheMvcc mvcc = mvccExtras();
doomed = mvcc == null ? null : mvcc.candidate(ver);
if (doomed == null)
addRemoved(ver);
GridCacheVersion obsoleteVer = obsoleteVersionExtras();
if (obsoleteVer != null && !obsoleteVer.equals(ver))
checkObsolete();
if (doomed != null) {
prev = mvcc.allOwners();
boolean emptyBefore = mvcc.isEmpty();
mvcc.remove(doomed.version());
boolean emptyAfter = mvcc.isEmpty();
if (!doomed.local())
refreshRemotes();
checkCallbacks(emptyBefore, emptyAfter);
if (emptyAfter)
mvccExtras(null);
else
owner = mvcc.allOwners();
}
val = this.val;
} finally {
unlockEntry();
}
if (log.isDebugEnabled())
log.debug("Removed lock candidate from entry [doomed=" + doomed + ", owner=" + owner + ", prev=" + prev + ", entry=" + this + ']');
if (doomed != null && doomed.nearLocal())
cctx.mvcc().removeExplicitLock(doomed);
if (doomed != null)
checkThreadChain(doomed);
// This call must be outside of synchronization.
checkOwnerChanged(prev, owner, val);
return doomed != null;
}
Aggregations