use of javax.cache.processor.EntryProcessor in project ignite by apache.
the class DmlStatementsProcessor method doDelete.
/**
* Perform DELETE operation on top of results of SELECT.
* @param cctx Cache context.
* @param cursor SELECT results.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Results of DELETE (number of items affected AND keys that failed to be updated).
*/
@SuppressWarnings({ "unchecked", "ConstantConditions", "ThrowableResultOfMethodCallIgnored" })
private UpdateResult doDelete(GridCacheContext cctx, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
// With DELETE, we have only two columns - key and value.
long res = 0;
// Keys that failed to DELETE due to concurrent updates.
List<Object> failedKeys = new ArrayList<>();
SQLException resEx = null;
Iterator<List<?>> it = cursor.iterator();
Map<Object, EntryProcessor<Object, Object, Boolean>> rows = new LinkedHashMap<>();
while (it.hasNext()) {
List<?> e = it.next();
if (e.size() != 2) {
U.warn(log, "Invalid row size on DELETE - expected 2, got " + e.size());
continue;
}
rows.put(e.get(0), new ModifyingEntryProcessor(e.get(1), RMV));
if ((pageSize > 0 && rows.size() == pageSize) || (!it.hasNext())) {
PageProcessingResult pageRes = processPage(cctx, rows);
res += pageRes.cnt;
failedKeys.addAll(F.asList(pageRes.errKeys));
if (pageRes.ex != null) {
if (resEx == null)
resEx = pageRes.ex;
else
resEx.setNextException(pageRes.ex);
}
if (it.hasNext())
// No need to clear after the last batch.
rows.clear();
}
}
if (resEx != null) {
if (!F.isEmpty(failedKeys)) {
// Don't go for a re-run if processing of some keys yielded exceptions and report keys that
// had been modified concurrently right away.
String msg = "Failed to DELETE some keys because they had been modified concurrently " + "[keys=" + failedKeys + ']';
SQLException conEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE);
conEx.setNextException(resEx);
resEx = conEx;
}
throw new IgniteSQLException(resEx);
}
return new UpdateResult(res, failedKeys.toArray());
}
use of javax.cache.processor.EntryProcessor in project ignite by apache.
the class GridCacheGetAndTransformStoreAbstractTest method testGetAndTransform.
/**
* @throws Exception If failed.
*/
public void testGetAndTransform() throws Exception {
final AtomicBoolean finish = new AtomicBoolean();
try {
startGrid(0);
startGrid(1);
startGrid(2);
final Processor entryProcessor = new Processor();
IgniteInternalFuture<?> fut = multithreadedAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
IgniteCache<Integer, String> c = jcache(ThreadLocalRandom.current().nextInt(3));
while (!finish.get() && !Thread.currentThread().isInterrupted()) {
c.get(ThreadLocalRandom.current().nextInt(100));
c.put(ThreadLocalRandom.current().nextInt(100), "s");
c.invoke(ThreadLocalRandom.current().nextInt(100), entryProcessor);
}
return null;
}
}, 20);
Thread.sleep(15_000);
finish.set(true);
fut.get();
} finally {
stopGrid(0);
stopGrid(1);
stopGrid(2);
while (jcache().localSize() != 0) jcache().clear();
}
}
use of javax.cache.processor.EntryProcessor in project hazelcast by hazelcast.
the class CacheEntryProcessorMessageTask method prepareOperation.
@Override
protected Operation prepareOperation() {
CacheService service = getService(getServiceName());
CacheOperationProvider operationProvider = getOperationProvider(parameters.name);
EntryProcessor entryProcessor = (EntryProcessor) service.toObject(parameters.entryProcessor);
ArrayList argumentsList = new ArrayList(parameters.arguments.size());
for (Data data : parameters.arguments) {
argumentsList.add(service.toObject(data));
}
return operationProvider.createEntryProcessorOperation(parameters.key, parameters.completionId, entryProcessor, argumentsList.toArray());
}
use of javax.cache.processor.EntryProcessor in project ignite by apache.
the class GridDhtAtomicCache method invokeAll0.
/**
* @param async Async operation flag.
* @param keys Keys.
* @param entryProcessor Entry processor.
* @param args Entry processor arguments.
* @return Future.
*/
private <T> IgniteInternalFuture<Map<K, EntryProcessorResult<T>>> invokeAll0(boolean async, Set<? extends K> keys, final EntryProcessor<K, V, T> entryProcessor, Object... args) {
A.notNull(keys, "keys", entryProcessor, "entryProcessor");
if (keyCheck)
validateCacheKeys(keys);
Map<? extends K, EntryProcessor> invokeMap = F.viewAsMap(keys, new C1<K, EntryProcessor>() {
@Override
public EntryProcessor apply(K k) {
return entryProcessor;
}
});
CacheOperationContext opCtx = ctx.operationContextPerCall();
final boolean keepBinary = opCtx != null && opCtx.isKeepBinary();
IgniteInternalFuture<Map<K, EntryProcessorResult<T>>> resFut = updateAll0(null, invokeMap, args, null, null, false, false, TRANSFORM, async);
return resFut.chain(new CX1<IgniteInternalFuture<Map<K, EntryProcessorResult<T>>>, Map<K, EntryProcessorResult<T>>>() {
@Override
public Map<K, EntryProcessorResult<T>> applyx(IgniteInternalFuture<Map<K, EntryProcessorResult<T>>> fut) throws IgniteCheckedException {
Map<Object, EntryProcessorResult> resMap = (Map) fut.get();
return ctx.unwrapInvokeResult(resMap, keepBinary);
}
});
}
use of javax.cache.processor.EntryProcessor in project ignite by apache.
the class GridDhtAtomicCache method updateWithBatch.
/**
* Updates locked entries using batched write-through.
*
* @param node Sender node.
* @param hasNear {@code True} if originating node has near cache.
* @param req Update request.
* @param res Update response.
* @param locked Locked entries.
* @param ver Assigned version.
* @param dhtFut Optional DHT future.
* @param replicate Whether replication is enabled.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Deleted entries.
* @throws GridCacheEntryRemovedException Should not be thrown.
*/
@SuppressWarnings("unchecked")
private UpdateBatchResult updateWithBatch(final ClusterNode node, final boolean hasNear, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final List<GridDhtCacheEntry> locked, final GridCacheVersion ver, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final boolean replicate, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) throws GridCacheEntryRemovedException {
// Cannot update in batches during DR due to possible conflicts.
assert !ctx.dr().receiveEnabled();
// Should not request return values for putAll.
assert !req.returnValue() || req.operation() == TRANSFORM;
if (!F.isEmpty(req.filter()) && ctx.loadPreviousValue()) {
try {
reloadIfNeeded(locked);
} catch (IgniteCheckedException e) {
res.addFailedKeys(req.keys(), e);
return new UpdateBatchResult();
}
}
int size = req.size();
Map<KeyCacheObject, CacheObject> putMap = null;
Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap = null;
Collection<KeyCacheObject> rmvKeys = null;
List<CacheObject> writeVals = null;
UpdateBatchResult updRes = new UpdateBatchResult();
List<GridDhtCacheEntry> filtered = new ArrayList<>(size);
GridCacheOperation op = req.operation();
GridCacheReturn invokeRes = null;
int firstEntryIdx = 0;
boolean intercept = ctx.config().getInterceptor() != null;
for (int i = 0; i < locked.size(); i++) {
GridDhtCacheEntry entry = locked.get(i);
try {
if (!checkFilter(entry, req, res)) {
if (expiry != null && entry.hasValue()) {
long ttl = expiry.forAccess();
if (ttl != CU.TTL_NOT_CHANGED) {
entry.updateTtl(null, ttl);
expiry.ttlUpdated(entry.key(), entry.version(), entry.readers());
}
}
if (log.isDebugEnabled())
log.debug("Entry did not pass the filter (will skip write) [entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ", res=" + res + ']');
if (hasNear)
res.addSkippedIndex(i);
firstEntryIdx++;
continue;
}
if (op == TRANSFORM) {
EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
CacheObject old = entry.innerGet(ver, null, /*read through*/
true, /*metrics*/
true, /*event*/
true, req.subjectId(), entryProcessor, taskName, null, req.keepBinary());
Object oldVal = null;
Object updatedVal = null;
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry(entry.key(), old, entry.version(), req.keepBinary(), entry);
CacheObject updated;
try {
Object computed = entryProcessor.process(invokeEntry, req.invokeArguments());
if (computed != null) {
if (invokeRes == null)
invokeRes = new GridCacheReturn(node.isLocal());
computed = ctx.unwrapTemporary(computed);
invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), computed, null, req.keepBinary());
}
if (!invokeEntry.modified())
continue;
updatedVal = ctx.unwrapTemporary(invokeEntry.getValue());
updated = ctx.toCacheObject(updatedVal);
} catch (Exception e) {
if (invokeRes == null)
invokeRes = new GridCacheReturn(node.isLocal());
invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), null, e, req.keepBinary());
updated = old;
}
if (updated == null) {
if (intercept) {
CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(e);
if (ctx.cancelRemove(interceptorRes))
continue;
}
// Update previous batch.
if (putMap != null) {
dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, null, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
firstEntryIdx = i;
putMap = null;
writeVals = null;
entryProcessorMap = null;
filtered = new ArrayList<>();
}
// Start collecting new batch.
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
} else {
if (intercept) {
CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
Object val = ctx.config().getInterceptor().onBeforePut(e, updatedVal);
if (val == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
}
// Update previous batch.
if (rmvKeys != null) {
dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, null, null, rmvKeys, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
firstEntryIdx = i;
rmvKeys = null;
entryProcessorMap = null;
filtered = new ArrayList<>();
}
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
}
if (entryProcessorMap == null)
entryProcessorMap = new HashMap<>();
entryProcessorMap.put(entry.key(), entryProcessor);
} else if (op == UPDATE) {
CacheObject updated = req.value(i);
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read through*/
ctx.loadPreviousValue(), /*metrics*/
true, /*event*/
true, req.subjectId(), null, taskName, null, req.keepBinary());
Object val = ctx.config().getInterceptor().onBeforePut(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()), ctx.unwrapBinaryIfNeeded(updated, req.keepBinary(), false));
if (val == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
}
assert updated != null;
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
} else {
assert op == DELETE;
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read through*/
ctx.loadPreviousValue(), /*metrics*/
true, /*event*/
true, req.subjectId(), null, taskName, null, req.keepBinary());
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()));
if (ctx.cancelRemove(interceptorRes))
continue;
}
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
}
filtered.add(entry);
} catch (IgniteCheckedException e) {
res.addFailedKey(entry.key(), e);
}
}
// Store final batch.
if (putMap != null || rmvKeys != null) {
dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, rmvKeys, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
} else
assert filtered.isEmpty();
updRes.dhtFuture(dhtFut);
updRes.invokeResult(invokeRes);
return updRes;
}
Aggregations