use of org.apache.ignite.internal.processors.cache.CacheInvokeEntry in project ignite by apache.
the class IgniteTxEntry method applyEntryProcessors.
/**
* @param cacheVal Value.
* @return New value.
*/
@SuppressWarnings("unchecked")
public CacheObject applyEntryProcessors(CacheObject cacheVal) {
GridCacheVersion ver;
try {
ver = entry.version();
} catch (GridCacheEntryRemovedException ignore) {
assert tx == null || tx.optimistic() : tx;
ver = null;
}
Object val = null;
Object keyVal = null;
for (T2<EntryProcessor<Object, Object, Object>, Object[]> t : entryProcessors()) {
IgniteThread.onEntryProcessorEntered(true);
try {
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry(key, keyVal, cacheVal, val, ver, keepBinary(), cached());
EntryProcessor processor = t.get1();
processor.process(invokeEntry, t.get2());
val = invokeEntry.getValue();
keyVal = invokeEntry.key();
} catch (Exception ignore) {
// No-op.
} finally {
IgniteThread.onEntryProcessorLeft();
}
}
return ctx.toCacheObject(val);
}
use of org.apache.ignite.internal.processors.cache.CacheInvokeEntry in project ignite by apache.
the class GridCacheReplicatedQueueRemoveSelfTest method testQueueRemovalProcessor.
/**
* This a unit test of Ignite queue's RemoveProcessor process() method.
*
* @throws Exception If failed.
*/
@Test
@SuppressWarnings("unchecked")
public void testQueueRemovalProcessor() throws Exception {
GridCacheContext cctx = grid(0).context().cache().cache("ignite-sys-cache").context();
IgniteUuid id = IgniteUuid.randomUuid();
CacheInvokeEntry entry = new CacheInvokeEntry<>(null, null, null, false, new GridDhtCacheEntry(cctx, null, new KeyCacheObjectImpl(1, BigInteger.valueOf(1).toByteArray(), 1)));
entry.setValue(new GridCacheQueueHeader(id, 2147483647, false, 0L, 10000L, Collections.singleton(1L)));
GridCacheQueueAdapter.RemoveProcessor rp = new GridCacheQueueAdapter.RemoveProcessor(id, 1L);
rp.process(entry);
GridCacheQueueAdapter.RemoveProcessor externalRP = new GridCacheQueueAdapter.RemoveProcessor();
GridByteArrayOutputStream output = new GridByteArrayOutputStream();
rp.writeExternal(new ObjectOutputStream(output));
externalRP.readExternal(new ObjectInputStream(new GridByteArrayInputStream(output.toByteArray())));
assertEquals(id, GridTestUtils.getFieldValue(externalRP, "id"));
// idx should be null, cause entry was already removed, see GridCacheQueueAdapter.RemoveProcessor.code
// for more details.
assertNull(GridTestUtils.getFieldValue(externalRP, "idx"));
}
use of org.apache.ignite.internal.processors.cache.CacheInvokeEntry in project ignite by apache.
the class GridLocalAtomicCache method updateWithBatch.
/**
* Updates entries using batched write-through.
*
* @param op Operation.
* @param keys Keys.
* @param vals Values.
* @param invokeArgs Optional arguments for EntryProcessor.
* @param expiryPlc Expiry policy.
* @param ver Cache version.
* @param filter Optional filter.
* @param subjId Subject ID.
* @param taskName Task name.
* @return Results map for invoke operation.
* @throws CachePartialUpdateCheckedException If update failed.
*/
@SuppressWarnings({ "ForLoopReplaceableByForEach", "unchecked" })
private Map<K, EntryProcessorResult> updateWithBatch(GridCacheOperation op, Collection<? extends K> keys, @Nullable Iterable<?> vals, @Nullable Object[] invokeArgs, @Nullable ExpiryPolicy expiryPlc, GridCacheVersion ver, @Nullable CacheEntryPredicate[] filter, boolean keepBinary, UUID subjId, String taskName) throws IgniteCheckedException {
List<GridCacheEntryEx> locked = lockEntries(keys);
try {
int size = locked.size();
Map<KeyCacheObject, CacheObject> putMap = null;
Collection<KeyCacheObject> rmvKeys = null;
List<CacheObject> writeVals = null;
Map<K, EntryProcessorResult> invokeResMap = op == TRANSFORM ? U.<K, EntryProcessorResult>newHashMap(size) : null;
List<GridCacheEntryEx> filtered = new ArrayList<>(size);
CachePartialUpdateCheckedException err = null;
Iterator<?> valsIter = vals != null ? vals.iterator() : null;
boolean intercept = ctx.config().getInterceptor() != null;
for (int i = 0; i < size; i++) {
GridCacheEntryEx entry = locked.get(i);
Object val = valsIter != null ? valsIter.next() : null;
if (val == null && op != DELETE)
throw new NullPointerException("Null value.");
try {
try {
if (!ctx.isAllLocked(entry, filter)) {
if (log.isDebugEnabled())
log.debug("Entry did not pass the filter (will skip write) [entry=" + entry + ", filter=" + Arrays.toString(filter) + ']');
continue;
}
} catch (IgniteCheckedException e) {
if (err == null)
err = partialUpdateException();
err.add(F.asList(entry.key()), e);
continue;
}
if (op == TRANSFORM) {
ctx.kernalContext().resource().inject(val, GridResourceIoc.AnnotationSet.ENTRY_PROCESSOR, ctx.name());
EntryProcessor<Object, Object, Object> entryProcessor = (EntryProcessor<Object, Object, Object>) val;
CacheObject old = entry.innerGet(null, null, /*read-through*/
true, /*update-metrics*/
true, /*event*/
true, subjId, entryProcessor, taskName, null, keepBinary);
Object oldVal = null;
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry<>(entry.key(), old, entry.version(), keepBinary, entry);
CacheObject updated;
Object updatedVal = null;
CacheInvokeResult invokeRes = null;
boolean validation = false;
try {
Object computed = entryProcessor.process(invokeEntry, invokeArgs);
updatedVal = ctx.unwrapTemporary(invokeEntry.getValue());
updated = ctx.toCacheObject(updatedVal);
if (computed != null)
invokeRes = CacheInvokeResult.fromResult(ctx.unwrapTemporary(computed));
if (invokeEntry.modified()) {
validation = true;
ctx.validateKeyAndValue(entry.key(), updated);
}
} catch (Exception e) {
invokeRes = CacheInvokeResult.fromError(e);
updated = old;
if (validation) {
invokeResMap.put((K) entry.key().value(ctx.cacheObjectContext(), false), invokeRes);
continue;
}
}
if (invokeRes != null)
invokeResMap.put((K) entry.key().value(ctx.cacheObjectContext(), false), invokeRes);
if (updated == null) {
if (intercept) {
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, keepBinary));
if (ctx.cancelRemove(interceptorRes))
continue;
}
// Update previous batch.
if (putMap != null) {
err = updatePartialBatch(filtered, ver, writeVals, putMap, null, expiryPlc, keepBinary, err, subjId, taskName);
putMap = null;
writeVals = null;
filtered = new ArrayList<>();
}
// Start collecting new batch.
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
} else {
if (intercept) {
Object interceptorVal = ctx.config().getInterceptor().onBeforePut(new CacheLazyEntry(ctx, entry.key(), invokeEntry.getKey(), old, oldVal, keepBinary), updatedVal);
if (interceptorVal == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(interceptorVal));
}
// Update previous batch.
if (rmvKeys != null) {
err = updatePartialBatch(filtered, ver, null, null, rmvKeys, expiryPlc, keepBinary, err, subjId, taskName);
rmvKeys = null;
filtered = new ArrayList<>();
}
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
}
} else if (op == UPDATE) {
CacheObject cacheVal = ctx.toCacheObject(val);
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read-through*/
ctx.loadPreviousValue(), /*update-metrics*/
true, /*event*/
true, subjId, null, taskName, null, keepBinary);
Object interceptorVal = ctx.config().getInterceptor().onBeforePut(new CacheLazyEntry(ctx, entry.key(), old, keepBinary), val);
if (interceptorVal == null)
continue;
cacheVal = ctx.toCacheObject(ctx.unwrapTemporary(interceptorVal));
}
ctx.validateKeyAndValue(entry.key(), cacheVal);
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), cacheVal);
writeVals.add(cacheVal);
} else {
assert op == DELETE;
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read-through*/
ctx.loadPreviousValue(), /*update-metrics*/
true, /*event*/
true, subjId, null, taskName, null, keepBinary);
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), old, keepBinary));
if (ctx.cancelRemove(interceptorRes))
continue;
}
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
}
filtered.add(entry);
} catch (IgniteCheckedException e) {
if (err == null)
err = partialUpdateException();
err.add(F.asList(entry.key()), e);
} catch (GridCacheEntryRemovedException ignore) {
assert false : "Entry cannot become obsolete while holding lock.";
}
}
// Store final batch.
if (putMap != null || rmvKeys != null) {
err = updatePartialBatch(filtered, ver, writeVals, putMap, rmvKeys, expiryPlc, keepBinary, err, subjId, taskName);
} else
assert filtered.isEmpty();
if (err != null)
throw err;
return invokeResMap;
} finally {
unlockEntries(locked);
}
}
use of org.apache.ignite.internal.processors.cache.CacheInvokeEntry in project ignite by apache.
the class GridDhtAtomicCache method updateWithBatch.
/**
* Updates locked entries using batched write-through.
*
* @param node Sender node.
* @param hasNear {@code True} if originating node has near cache.
* @param req Update request.
* @param res Update response.
* @param locked Locked entries.
* @param ver Assigned version.
* @param dhtFut Optional DHT future.
* @param replicate Whether replication is enabled.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Deleted entries.
* @throws GridCacheEntryRemovedException Should not be thrown.
*/
@SuppressWarnings("unchecked")
private DhtAtomicUpdateResult updateWithBatch(final ClusterNode node, final boolean hasNear, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final List<GridDhtCacheEntry> locked, final GridCacheVersion ver, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final boolean replicate, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) throws GridCacheEntryRemovedException {
// Cannot update in batches during DR due to possible conflicts.
assert !ctx.dr().receiveEnabled();
// Should not request return values for putAll.
assert !req.returnValue() || req.operation() == TRANSFORM;
if (!F.isEmpty(req.filter()) && ctx.loadPreviousValue()) {
try {
reloadIfNeeded(locked);
} catch (IgniteCheckedException e) {
res.addFailedKeys(req.keys(), e);
return new DhtAtomicUpdateResult();
}
}
int size = req.size();
Map<KeyCacheObject, CacheObject> putMap = null;
Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap = null;
Collection<KeyCacheObject> rmvKeys = null;
List<CacheObject> writeVals = null;
DhtAtomicUpdateResult updRes = new DhtAtomicUpdateResult();
List<GridDhtCacheEntry> filtered = new ArrayList<>(size);
GridCacheOperation op = req.operation();
GridCacheReturn invokeRes = null;
int firstEntryIdx = 0;
boolean intercept = ctx.config().getInterceptor() != null;
for (int i = 0; i < locked.size(); i++) {
GridDhtCacheEntry entry = locked.get(i);
try {
if (!checkFilter(entry, req, res)) {
if (expiry != null && entry.hasValue()) {
long ttl = expiry.forAccess();
if (ttl != CU.TTL_NOT_CHANGED) {
entry.updateTtl(null, ttl);
expiry.ttlUpdated(entry.key(), entry.version(), entry.readers());
}
}
if (log.isDebugEnabled())
log.debug("Entry did not pass the filter (will skip write) [entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ", res=" + res + ']');
if (hasNear)
res.addSkippedIndex(i);
firstEntryIdx++;
continue;
}
if (op == TRANSFORM) {
EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
CacheObject old = entry.innerGet(ver, null, /*read through*/
true, /*metrics*/
true, /*event*/
true, req.subjectId(), entryProcessor, taskName, null, req.keepBinary());
Object oldVal = null;
Object updatedVal = null;
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry(entry.key(), old, entry.version(), req.keepBinary(), entry);
CacheObject updated = null;
if (invokeRes == null)
invokeRes = new GridCacheReturn(node.isLocal());
CacheInvokeResult curInvokeRes = null;
boolean validation = false;
try {
Object computed = entryProcessor.process(invokeEntry, req.invokeArguments());
if (computed != null) {
computed = ctx.unwrapTemporary(computed);
curInvokeRes = CacheInvokeResult.fromResult(computed);
}
if (!invokeEntry.modified())
continue;
else {
updatedVal = ctx.unwrapTemporary(invokeEntry.getValue());
updated = ctx.toCacheObject(updatedVal);
validation = true;
ctx.validateKeyAndValue(entry.key(), updated);
}
} catch (Exception e) {
curInvokeRes = CacheInvokeResult.fromError(e);
updated = old;
if (validation) {
res.addSkippedIndex(i);
continue;
}
} finally {
if (curInvokeRes != null) {
invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), curInvokeRes.result(), curInvokeRes.error(), req.keepBinary());
}
}
if (updated == null) {
if (intercept) {
CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(e);
if (ctx.cancelRemove(interceptorRes))
continue;
}
// Update previous batch.
if (putMap != null) {
dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, null, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
firstEntryIdx = i;
putMap = null;
writeVals = null;
entryProcessorMap = null;
filtered = new ArrayList<>();
}
// Start collecting new batch.
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
} else {
if (intercept) {
CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
Object val = ctx.config().getInterceptor().onBeforePut(e, updatedVal);
if (val == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
}
// Update previous batch.
if (rmvKeys != null) {
dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, null, null, rmvKeys, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
firstEntryIdx = i;
rmvKeys = null;
entryProcessorMap = null;
filtered = new ArrayList<>();
}
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
}
if (entryProcessorMap == null)
entryProcessorMap = new HashMap<>();
entryProcessorMap.put(entry.key(), entryProcessor);
} else if (op == UPDATE) {
CacheObject updated = req.value(i);
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read through*/
ctx.loadPreviousValue(), /*metrics*/
true, /*event*/
true, req.subjectId(), null, taskName, null, req.keepBinary());
Object val = ctx.config().getInterceptor().onBeforePut(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()), ctx.unwrapBinaryIfNeeded(updated, req.keepBinary(), false));
if (val == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
}
assert updated != null;
ctx.validateKeyAndValue(entry.key(), updated);
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
} else {
assert op == DELETE;
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read through*/
ctx.loadPreviousValue(), /*metrics*/
true, /*event*/
true, req.subjectId(), null, taskName, null, req.keepBinary());
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()));
if (ctx.cancelRemove(interceptorRes))
continue;
}
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
}
filtered.add(entry);
} catch (IgniteCheckedException e) {
res.addFailedKey(entry.key(), e);
}
}
// Store final batch.
if (putMap != null || rmvKeys != null) {
dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, rmvKeys, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
} else
assert filtered.isEmpty();
updRes.dhtFuture(dhtFut);
updRes.returnValue(invokeRes);
return updRes;
}
use of org.apache.ignite.internal.processors.cache.CacheInvokeEntry in project ignite by apache.
the class GridDhtAtomicCache method updateWithBatch.
/**
* Updates locked entries using batched write-through.
*
* @param node Sender node.
* @param hasNear {@code True} if originating node has near cache.
* @param req Update request.
* @param res Update response.
* @param locked Locked entries.
* @param ver Assigned version.
* @param replicate Whether replication is enabled.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @param dhtUpdRes DHT update result.
* @throws GridCacheEntryRemovedException Should not be thrown.
*/
@SuppressWarnings("unchecked")
private void updateWithBatch(final ClusterNode node, final boolean hasNear, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final List<GridDhtCacheEntry> locked, final GridCacheVersion ver, final boolean replicate, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal, final DhtAtomicUpdateResult dhtUpdRes) throws GridCacheEntryRemovedException {
// Cannot update in batches during DR due to possible conflicts.
assert !ctx.dr().receiveEnabled();
// Should not request return values for putAll.
assert !req.returnValue() || req.operation() == TRANSFORM;
if (!F.isEmpty(req.filter()) && ctx.loadPreviousValue()) {
try {
reloadIfNeeded(locked);
} catch (IgniteCheckedException e) {
res.addFailedKeys(req.keys(), e);
return;
}
}
int size = req.size();
Map<KeyCacheObject, CacheObject> putMap = null;
Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap = null;
Collection<KeyCacheObject> rmvKeys = null;
List<CacheObject> writeVals = null;
List<GridDhtCacheEntry> filtered = new ArrayList<>(size);
GridCacheOperation op = req.operation();
GridCacheReturn invokeRes = null;
int firstEntryIdx = 0;
boolean intercept = ctx.config().getInterceptor() != null;
for (int i = dhtUpdRes.processedEntriesCount(); i < locked.size(); i++) {
GridDhtCacheEntry entry = locked.get(i);
try {
if (!checkFilter(entry, req, res)) {
if (expiry != null && entry.hasValue()) {
long ttl = expiry.forAccess();
if (ttl != CU.TTL_NOT_CHANGED) {
entry.updateTtl(null, ttl);
expiry.ttlUpdated(entry.key(), entry.version(), entry.readers());
}
}
if (log.isDebugEnabled())
log.debug("Entry did not pass the filter (will skip write) [entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ", res=" + res + ']');
if (hasNear)
res.addSkippedIndex(i);
firstEntryIdx++;
continue;
}
if (op == TRANSFORM) {
EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
CacheObject old = entry.innerGet(ver, null, /*read through*/
true, /*metrics*/
true, /*event*/
true, entryProcessor, taskName, null, req.keepBinary());
Object oldVal = null;
Object updatedVal = null;
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry(entry.key(), old, entry.version(), req.keepBinary(), entry);
CacheObject updated = null;
if (invokeRes == null)
invokeRes = new GridCacheReturn(node.isLocal());
CacheInvokeResult curInvokeRes = null;
boolean validation = false;
IgniteThread.onEntryProcessorEntered(true);
try {
Object computed = entryProcessor.process(invokeEntry, req.invokeArguments());
if (computed != null) {
computed = ctx.unwrapTemporary(computed);
curInvokeRes = CacheInvokeResult.fromResult(computed);
}
if (!invokeEntry.modified()) {
if (ctx.statisticsEnabled())
ctx.cache().metrics0().onReadOnlyInvoke(old != null);
continue;
} else {
updatedVal = ctx.unwrapTemporary(invokeEntry.getValue());
updated = ctx.toCacheObject(updatedVal);
validation = true;
if (updated != null)
ctx.validateKeyAndValue(entry.key(), updated);
}
} catch (UnregisteredClassException | UnregisteredBinaryTypeException e) {
throw e;
} catch (Exception e) {
curInvokeRes = CacheInvokeResult.fromError(e);
updated = old;
if (validation) {
res.addSkippedIndex(i);
continue;
}
} finally {
IgniteThread.onEntryProcessorLeft();
if (curInvokeRes != null) {
invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), curInvokeRes.result(), curInvokeRes.error(), req.keepBinary());
}
}
if (updated == null) {
if (intercept) {
CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(e);
if (ctx.cancelRemove(interceptorRes))
continue;
}
// Update previous batch.
if (putMap != null) {
updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, null, entryProcessorMap, req, res, replicate, dhtUpdRes, taskName, expiry, sndPrevVal);
firstEntryIdx = i;
putMap = null;
writeVals = null;
entryProcessorMap = null;
filtered = new ArrayList<>();
}
// Start collecting new batch.
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
} else {
if (intercept) {
CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
Object val = ctx.config().getInterceptor().onBeforePut(e, updatedVal);
if (val == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
}
// Update previous batch.
if (rmvKeys != null) {
updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, null, null, rmvKeys, entryProcessorMap, req, res, replicate, dhtUpdRes, taskName, expiry, sndPrevVal);
firstEntryIdx = i;
rmvKeys = null;
entryProcessorMap = null;
filtered = new ArrayList<>();
}
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
}
if (entryProcessorMap == null)
entryProcessorMap = new HashMap<>();
entryProcessorMap.put(entry.key(), entryProcessor);
} else if (op == UPDATE) {
CacheObject updated = req.value(i);
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read through*/
ctx.loadPreviousValue(), /*metrics*/
true, /*event*/
true, null, taskName, null, req.keepBinary());
Object val = ctx.config().getInterceptor().onBeforePut(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()), ctx.unwrapBinaryIfNeeded(updated, req.keepBinary(), false, null));
if (val == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
}
assert updated != null;
ctx.validateKeyAndValue(entry.key(), updated);
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
} else {
assert op == DELETE;
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read through*/
ctx.loadPreviousValue(), /*metrics*/
true, /*event*/
true, null, taskName, null, req.keepBinary());
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()));
if (ctx.cancelRemove(interceptorRes))
continue;
}
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
}
filtered.add(entry);
} catch (IgniteCheckedException e) {
res.addFailedKey(entry.key(), e);
}
}
// Store final batch.
if (putMap != null || rmvKeys != null) {
updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, rmvKeys, entryProcessorMap, req, res, replicate, dhtUpdRes, taskName, expiry, sndPrevVal);
} else
assert filtered.isEmpty();
dhtUpdRes.returnValue(invokeRes);
}
Aggregations