use of org.apache.ignite.internal.processors.cache.CacheLazyEntry in project ignite by apache.
the class GridDhtAtomicCache method updatePartialBatch.
/**
* @param hasNear {@code True} if originating node has near cache.
* @param firstEntryIdx Index of the first entry in the request keys collection.
* @param entries Entries to update.
* @param ver Version to set.
* @param nearNode Originating node.
* @param writeVals Write values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param entryProcessorMap Entry processors.
* @param dhtFut DHT update future if has backups.
* @param req Request.
* @param res Response.
* @param replicate Whether replication is enabled.
* @param batchRes Batch update result.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Deleted entries.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
@Nullable
private GridDhtAtomicAbstractUpdateFuture updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final DhtAtomicUpdateResult batchRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
assert putMap == null ^ rmvKeys == null;
assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
AffinityTopologyVersion topVer = req.topologyVersion();
CacheStorePartialUpdateException storeErr = null;
try {
GridCacheOperation op;
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
// Avoid iterator creation.
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
assert entry.lockedByCurrentThread();
if (entry.obsolete()) {
assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
continue;
}
if (storeErr != null && storeErr.failedKeys().contains(entry.key().value(ctx.cacheObjectContext(), false)))
continue;
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
// Get readers before innerUpdate (reader cleared after remove).
GridDhtCacheEntry.ReaderId[] readers = entry.readersLocked();
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
sndPrevVal, req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
false, /*intercept*/
false, req.subjectId(), taskName, null, null, dhtFut);
assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
if (intercept) {
if (op == UPDATE) {
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
} else {
assert op == DELETE : op;
// Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
}
}
batchRes.addDeleted(entry, updRes, entries);
if (dhtFut != null) {
EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
if (readers != null)
dhtFut.addNearWriteEntries(nearNode, readers, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
}
if (hasNear) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
int idx = firstEntryIdx + i;
if (req.operation() == TRANSFORM) {
res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
} else
res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
if (writeVal != null || entry.hasValue()) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (GridDhtCacheEntry.ReaderId.contains(readers, nearNode.id())) {
// Reader became primary or backup.
entry.removeReader(nearNode.id(), req.messageId());
} else
res.addSkippedIndex(firstEntryIdx + i);
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry cannot become obsolete while holding lock.";
e.printStackTrace();
}
}
} catch (IgniteCheckedException e) {
res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
}
if (storeErr != null) {
ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
res.addFailedKeys(failed, storeErr.getCause());
}
return dhtFut;
}
use of org.apache.ignite.internal.processors.cache.CacheLazyEntry in project ignite by apache.
the class IgniteTxAdapter method batchStoreCommit.
/**
* Performs batch database operations. This commit must be called
* before cache update. This way if there is a DB failure,
* cache transaction can still be rolled back.
*
* @param writeEntries Transaction write set.
* @throws IgniteCheckedException If batch update failed.
*/
protected final void batchStoreCommit(Iterable<IgniteTxEntry> writeEntries) throws IgniteCheckedException {
if (!storeEnabled() || internal() || // No need to work with local store at GridNearTxRemote.
(!local() && near()))
return;
Collection<CacheStoreManager> stores = txState().stores(cctx);
if (stores == null || stores.isEmpty())
return;
assert isWriteToStoreFromDhtValid(stores) : "isWriteToStoreFromDht can't be different within one transaction";
CacheStoreManager first = F.first(stores);
boolean isWriteToStoreFromDht = first.isWriteToStoreFromDht();
if ((local() || first.isLocal()) && (near() || isWriteToStoreFromDht)) {
try {
if (writeEntries != null) {
Map<KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> putMap = null;
List<KeyCacheObject> rmvCol = null;
CacheStoreManager writeStore = null;
boolean skipNonPrimary = near() && isWriteToStoreFromDht;
for (IgniteTxEntry e : writeEntries) {
boolean skip = e.skipStore();
if (!skip && skipNonPrimary) {
skip = e.cached().isNear() || e.cached().detached() || !e.context().affinity().primaryByPartition(e.cached().partition(), topologyVersion()).isLocal();
}
if (// Update local store at backups only if needed.
!skip && !local() && cctx.localStorePrimaryOnly())
skip = true;
if (skip)
continue;
boolean intercept = e.context().config().getInterceptor() != null;
if (intercept || !F.isEmpty(e.entryProcessors()))
e.cached().unswap(false);
IgniteBiTuple<GridCacheOperation, CacheObject> res = applyTransformClosures(e, false, null);
GridCacheContext cacheCtx = e.context();
GridCacheOperation op = res.get1();
KeyCacheObject key = e.key();
CacheObject val = res.get2();
GridCacheVersion ver = writeVersion();
if (op == CREATE || op == UPDATE) {
// Batch-process all removes if needed.
if (rmvCol != null && !rmvCol.isEmpty()) {
assert writeStore != null;
writeStore.removeAll(this, rmvCol);
// Reset.
rmvCol.clear();
writeStore = null;
}
// Batch-process puts if cache ID has changed.
if (writeStore != null && writeStore != cacheCtx.store()) {
if (putMap != null && !putMap.isEmpty()) {
writeStore.putAll(this, putMap);
// Reset.
putMap.clear();
}
writeStore = null;
}
if (intercept) {
Object interceptorVal = cacheCtx.config().getInterceptor().onBeforePut(new CacheLazyEntry(cacheCtx, key, e.cached().rawGet(), e.keepBinary()), cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(val, e.keepBinary(), false, null));
if (interceptorVal == null)
continue;
val = cacheCtx.toCacheObject(cacheCtx.unwrapTemporary(interceptorVal));
}
if (writeStore == null)
writeStore = cacheCtx.store();
if (writeStore.isWriteThrough()) {
if (putMap == null)
putMap = new LinkedHashMap<>(writeMap().size(), 1.0f);
putMap.put(key, F.t(val, ver));
}
} else if (op == DELETE) {
// Batch-process all puts if needed.
if (putMap != null && !putMap.isEmpty()) {
assert writeStore != null;
writeStore.putAll(this, putMap);
// Reset.
putMap.clear();
writeStore = null;
}
if (writeStore != null && writeStore != cacheCtx.store()) {
if (rmvCol != null && !rmvCol.isEmpty()) {
writeStore.removeAll(this, rmvCol);
// Reset.
rmvCol.clear();
}
writeStore = null;
}
if (intercept) {
IgniteBiTuple<Boolean, Object> t = cacheCtx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(cacheCtx, key, e.cached().rawGet(), e.keepBinary()));
if (cacheCtx.cancelRemove(t))
continue;
}
if (writeStore == null)
writeStore = cacheCtx.store();
if (writeStore.isWriteThrough()) {
if (rmvCol == null)
rmvCol = new ArrayList<>();
rmvCol.add(key);
}
} else if (log.isDebugEnabled())
log.debug("Ignoring NOOP entry for batch store commit: " + e);
}
if (putMap != null && !putMap.isEmpty()) {
assert rmvCol == null || rmvCol.isEmpty();
assert writeStore != null;
// Batch put at the end of transaction.
writeStore.putAll(this, putMap);
}
if (rmvCol != null && !rmvCol.isEmpty()) {
assert putMap == null || putMap.isEmpty();
assert writeStore != null;
// Batch remove at the end of transaction.
writeStore.removeAll(this, rmvCol);
}
}
// Commit while locks are held.
sessionEnd(stores, true);
} catch (IgniteCheckedException ex) {
commitError(ex);
errorWhenCommitting();
// Safe to remove transaction from committed tx list because nothing was committed yet.
cctx.tm().removeCommittedTx(this);
throw ex;
} catch (Throwable ex) {
commitError(ex);
errorWhenCommitting();
// Safe to remove transaction from committed tx list because nothing was committed yet.
cctx.tm().removeCommittedTx(this);
if (ex instanceof Error)
throw (Error) ex;
throw new IgniteCheckedException("Failed to commit transaction to database: " + this, ex);
} finally {
if (isRollbackOnly())
sessionEnd(stores, false);
}
} else
sessionEnd(stores, true);
}
use of org.apache.ignite.internal.processors.cache.CacheLazyEntry in project ignite by apache.
the class GridDhtAtomicCache method updatePartialBatch.
/**
* @param hasNear {@code True} if originating node has near cache.
* @param firstEntryIdx Index of the first entry in the request keys collection.
* @param entries Entries to update.
* @param ver Version to set.
* @param nearNode Originating node.
* @param writeVals Write values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param entryProcessorMap Entry processors.
* @param req Request.
* @param res Response.
* @param replicate Whether replication is enabled.
* @param dhtUpdRes Batch update result.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
*/
@Nullable
private void updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final DhtAtomicUpdateResult dhtUpdRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
assert putMap == null ^ rmvKeys == null;
assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
AffinityTopologyVersion topVer = req.topologyVersion();
CacheStorePartialUpdateException storeErr = null;
try {
GridCacheOperation op;
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
final GridDhtAtomicAbstractUpdateFuture dhtFut = dhtUpdRes.dhtFuture();
Collection<Object> failedToUnwrapKeys = null;
// Avoid iterator creation.
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
assert entry.lockedByCurrentThread();
if (entry.obsolete()) {
assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
continue;
}
if (storeErr != null) {
Object key = entry.key();
try {
key = entry.key().value(ctx.cacheObjectContext(), false);
} catch (BinaryInvalidTypeException e) {
if (log.isDebugEnabled()) {
if (failedToUnwrapKeys == null)
failedToUnwrapKeys = new ArrayList<>();
// To limit keys count in log message.
if (failedToUnwrapKeys.size() < 5)
failedToUnwrapKeys.add(key);
}
}
if (storeErr.failedKeys().contains(key))
continue;
}
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
// Get readers before innerUpdate (reader cleared after remove).
GridDhtCacheEntry.ReaderId[] readers = entry.readersLocked();
EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
sndPrevVal, req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
false, /*intercept*/
false, taskName, null, null, dhtFut, entryProcessor != null);
assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
if (intercept) {
if (op == UPDATE) {
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
} else {
assert op == DELETE : op;
// Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
}
}
dhtUpdRes.addDeleted(entry, updRes, entries);
if (dhtFut != null) {
dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter(), op);
if (readers != null)
dhtFut.addNearWriteEntries(nearNode, readers, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
}
if (hasNear) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
int idx = firstEntryIdx + i;
if (req.operation() == TRANSFORM) {
res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
} else
res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
if (writeVal != null || entry.hasValue()) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (GridDhtCacheEntry.ReaderId.contains(readers, nearNode.id())) {
// Reader became primary or backup.
entry.removeReader(nearNode.id(), req.messageId());
} else
res.addSkippedIndex(firstEntryIdx + i);
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry cannot become obsolete while holding lock.";
e.printStackTrace();
}
dhtUpdRes.processedEntriesCount(firstEntryIdx + i + 1);
}
if (failedToUnwrapKeys != null) {
log.warning("Failed to get values of keys: " + failedToUnwrapKeys + " (the binary objects will be used instead).");
}
} catch (IgniteCheckedException e) {
res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
}
if (storeErr != null) {
ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
res.addFailedKeys(failed, storeErr.getCause());
}
}
use of org.apache.ignite.internal.processors.cache.CacheLazyEntry in project ignite by apache.
the class GridDhtAtomicCache method updatePartialBatch.
/**
* @param hasNear {@code True} if originating node has near cache.
* @param firstEntryIdx Index of the first entry in the request keys collection.
* @param entries Entries to update.
* @param ver Version to set.
* @param nearNode Originating node.
* @param writeVals Write values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param entryProcessorMap Entry processors.
* @param dhtFut DHT update future if has backups.
* @param req Request.
* @param res Response.
* @param replicate Whether replication is enabled.
* @param batchRes Batch update result.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Deleted entries.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
@Nullable
private GridDhtAtomicAbstractUpdateFuture updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final UpdateBatchResult batchRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
assert putMap == null ^ rmvKeys == null;
assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
AffinityTopologyVersion topVer = req.topologyVersion();
boolean checkReaders = hasNear || ctx.discovery().hasNearCache(ctx.cacheId(), topVer);
CacheStorePartialUpdateException storeErr = null;
try {
GridCacheOperation op;
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
// Avoid iterator creation.
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
assert Thread.holdsLock(entry);
if (entry.obsolete()) {
assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
continue;
}
if (storeErr != null && storeErr.failedKeys().contains(entry.key().value(ctx.cacheObjectContext(), false)))
continue;
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
Collection<UUID> readers = null;
Collection<UUID> filteredReaders = null;
if (checkReaders) {
readers = entry.readers();
filteredReaders = F.view(entry.readers(), F.notEqualTo(nearNode.id()));
}
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
sndPrevVal, req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
false, /*intercept*/
false, req.subjectId(), taskName, null, null, dhtFut);
assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
if (intercept) {
if (op == UPDATE) {
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
} else {
assert op == DELETE : op;
// Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
}
}
batchRes.addDeleted(entry, updRes, entries);
if (dhtFut != null) {
EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
if (!F.isEmpty(filteredReaders))
dhtFut.addNearWriteEntries(filteredReaders, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
}
if (hasNear) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
int idx = firstEntryIdx + i;
if (req.operation() == TRANSFORM) {
res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
} else
res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
if (writeVal != null || entry.hasValue()) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (// Reader became primary or backup.
readers.contains(nearNode.id()))
entry.removeReader(nearNode.id(), req.messageId());
else
res.addSkippedIndex(firstEntryIdx + i);
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry cannot become obsolete while holding lock.";
e.printStackTrace();
}
}
} catch (IgniteCheckedException e) {
res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
}
if (storeErr != null) {
ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
res.addFailedKeys(failed, storeErr.getCause());
}
return dhtFut;
}
use of org.apache.ignite.internal.processors.cache.CacheLazyEntry in project ignite by apache.
the class GridLocalAtomicCache method updatePartialBatch.
/**
* @param entries Entries to update.
* @param ver Cache version.
* @param writeVals Cache values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param expiryPlc Expiry policy.
* @param err Optional partial update exception.
* @param subjId Subject ID.
* @param taskName Task name.
* @return Partial update exception.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions", "ForLoopReplaceableByForEach" })
@Nullable
private CachePartialUpdateCheckedException updatePartialBatch(List<GridCacheEntryEx> entries, final GridCacheVersion ver, @Nullable List<CacheObject> writeVals, @Nullable Map<KeyCacheObject, CacheObject> putMap, @Nullable Collection<KeyCacheObject> rmvKeys, @Nullable ExpiryPolicy expiryPlc, boolean keepBinary, @Nullable CachePartialUpdateCheckedException err, UUID subjId, String taskName) {
assert putMap == null ^ rmvKeys == null;
GridCacheOperation op;
CacheStorePartialUpdateException storeErr = null;
try {
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
} catch (IgniteCheckedException e) {
if (err == null)
err = partialUpdateException();
err.add(putMap != null ? putMap.keySet() : rmvKeys, e);
return err;
}
boolean intercept = ctx.config().getInterceptor() != null;
for (int i = 0; i < entries.size(); i++) {
GridCacheEntryEx entry = entries.get(i);
assert entry.lockedByCurrentThread();
if (entry.obsolete() || (storeErr != null && storeErr.failedKeys().contains(entry.key().value(ctx.cacheObjectContext(), false))))
continue;
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
GridTuple3<Boolean, Object, EntryProcessorResult<Object>> t = entry.innerUpdateLocal(ver, op, writeVal, null, false, false, false, keepBinary, expiryPlc, true, true, null, false, subjId, taskName);
if (intercept) {
if (op == UPDATE)
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), writeVal, keepBinary));
else
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), t.get2(), keepBinary));
}
} catch (GridCacheEntryRemovedException ignore) {
assert false : "Entry cannot become obsolete while holding lock.";
} catch (IgniteCheckedException e) {
if (err == null)
err = partialUpdateException();
err.add(Collections.singleton(entry.key()), e);
}
}
return err;
}
Aggregations