Search in sources :

Example 31 with IgniteBiTuple

use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.

the class GridDhtAtomicCache method updateSingle.

/**
     * Updates locked entries one-by-one.
     *
     * @param nearNode Originating node.
     * @param hasNear {@code True} if originating node has near cache.
     * @param req Update request.
     * @param res Update response.
     * @param locked Locked entries.
     * @param ver Assigned update version.
     * @param dhtFut Optional DHT future.
     * @param replicate Whether DR is enabled for that cache.
     * @param taskName Task name.
     * @param expiry Expiry policy.
     * @param sndPrevVal If {@code true} sends previous value to backups.
     * @return Return value.
     * @throws GridCacheEntryRemovedException Should be never thrown.
     */
private UpdateSingleResult updateSingle(ClusterNode nearNode, boolean hasNear, GridNearAtomicAbstractUpdateRequest req, GridNearAtomicUpdateResponse res, List<GridDhtCacheEntry> locked, GridCacheVersion ver, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, boolean replicate, String taskName, @Nullable IgniteCacheExpiryPolicy expiry, boolean sndPrevVal) throws GridCacheEntryRemovedException {
    GridCacheReturn retVal = null;
    Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
    AffinityTopologyVersion topVer = req.topologyVersion();
    boolean checkReaders = hasNear || ctx.discovery().hasNearCache(ctx.cacheId(), topVer);
    boolean intercept = ctx.config().getInterceptor() != null;
    AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
    // Avoid iterator creation.
    for (int i = 0; i < req.size(); i++) {
        KeyCacheObject k = req.key(i);
        GridCacheOperation op = req.operation();
        // No GridCacheEntryRemovedException can be thrown.
        try {
            GridDhtCacheEntry entry = locked.get(i);
            GridCacheVersion newConflictVer = req.conflictVersion(i);
            long newConflictTtl = req.conflictTtl(i);
            long newConflictExpireTime = req.conflictExpireTime(i);
            assert !(newConflictVer instanceof GridCacheVersionEx) : newConflictVer;
            Object writeVal = op == TRANSFORM ? req.entryProcessor(i) : req.writeValue(i);
            Collection<UUID> readers = null;
            Collection<UUID> filteredReaders = null;
            if (checkReaders) {
                readers = entry.readers();
                filteredReaders = F.view(entry.readers(), F.notEqualTo(nearNode.id()));
            }
            GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, req.invokeArguments(), writeThrough() && !req.skipStore(), !req.skipStore(), sndPrevVal || req.returnValue(), req.keepBinary(), expiry, /*event*/
            true, /*metrics*/
            true, /*primary*/
            true, /*verCheck*/
            false, topVer, req.filter(), replicate ? DR_PRIMARY : DR_NONE, newConflictTtl, newConflictExpireTime, newConflictVer, /*conflictResolve*/
            true, intercept, req.subjectId(), taskName, /*prevVal*/
            null, /*updateCntr*/
            null, dhtFut);
            if (dhtFut != null) {
                if (updRes.sendToDht()) {
                    // Send to backups even in case of remove-remove scenarios.
                    GridCacheVersionConflictContext<?, ?> conflictCtx = updRes.conflictResolveResult();
                    if (conflictCtx == null)
                        newConflictVer = null;
                    else if (conflictCtx.isMerge())
                        // Conflict version is discarded in case of merge.
                        newConflictVer = null;
                    EntryProcessor<Object, Object, Object> entryProcessor = null;
                    dhtFut.addWriteEntry(affAssignment, entry, updRes.newValue(), entryProcessor, updRes.newTtl(), updRes.conflictExpireTime(), newConflictVer, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
                    if (!F.isEmpty(filteredReaders))
                        dhtFut.addNearWriteEntries(filteredReaders, entry, updRes.newValue(), entryProcessor, updRes.newTtl(), updRes.conflictExpireTime());
                } else {
                    if (log.isDebugEnabled())
                        log.debug("Entry did not pass the filter or conflict resolution (will skip write) " + "[entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ']');
                }
            }
            if (hasNear) {
                if (updRes.sendToDht()) {
                    if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
                        // If put the same value as in request then do not need to send it back.
                        if (op == TRANSFORM || writeVal != updRes.newValue()) {
                            res.addNearValue(i, updRes.newValue(), updRes.newTtl(), updRes.conflictExpireTime());
                        } else
                            res.addNearTtl(i, updRes.newTtl(), updRes.conflictExpireTime());
                        if (updRes.newValue() != null) {
                            IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
                            assert f == null : f;
                        }
                    } else if (// Reader became primary or backup.
                    F.contains(readers, nearNode.id()))
                        entry.removeReader(nearNode.id(), req.messageId());
                    else
                        res.addSkippedIndex(i);
                } else
                    res.addSkippedIndex(i);
            }
            if (updRes.removeVersion() != null) {
                if (deleted == null)
                    deleted = new ArrayList<>(req.size());
                deleted.add(F.t(entry, updRes.removeVersion()));
            }
            if (op == TRANSFORM) {
                assert !req.returnValue();
                IgniteBiTuple<Object, Exception> compRes = updRes.computedResult();
                if (compRes != null && (compRes.get1() != null || compRes.get2() != null)) {
                    if (retVal == null)
                        retVal = new GridCacheReturn(nearNode.isLocal());
                    retVal.addEntryProcessResult(ctx, k, null, compRes.get1(), compRes.get2(), req.keepBinary());
                }
            } else {
                // Create only once.
                if (retVal == null) {
                    CacheObject ret = updRes.oldValue();
                    retVal = new GridCacheReturn(ctx, nearNode.isLocal(), req.keepBinary(), req.returnValue() ? ret : null, updRes.success());
                }
            }
        } catch (IgniteCheckedException e) {
            res.addFailedKey(k, e);
        }
    }
    return new UpdateSingleResult(retVal, deleted, dhtFut);
}
Also used : AffinityAssignment(org.apache.ignite.internal.processors.affinity.AffinityAssignment) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) ArrayList(java.util.ArrayList) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) UUID(java.util.UUID) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheReturn(org.apache.ignite.internal.processors.cache.GridCacheReturn) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) StorageException(org.apache.ignite.internal.pagemem.wal.StorageException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) NodeStoppingException(org.apache.ignite.internal.NodeStoppingException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException) CacheStorePartialUpdateException(org.apache.ignite.internal.processors.cache.CacheStorePartialUpdateException) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) GridCacheVersionEx(org.apache.ignite.internal.processors.cache.version.GridCacheVersionEx) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridTimeoutObject(org.apache.ignite.internal.processors.timeout.GridTimeoutObject) GridCacheOperation(org.apache.ignite.internal.processors.cache.GridCacheOperation) GridCacheUpdateAtomicResult(org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult)

Example 32 with IgniteBiTuple

use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.

the class GridDhtAtomicCache method updateWithBatch.

/**
     * Updates locked entries using batched write-through.
     *
     * @param node Sender node.
     * @param hasNear {@code True} if originating node has near cache.
     * @param req Update request.
     * @param res Update response.
     * @param locked Locked entries.
     * @param ver Assigned version.
     * @param dhtFut Optional DHT future.
     * @param replicate Whether replication is enabled.
     * @param taskName Task name.
     * @param expiry Expiry policy.
     * @param sndPrevVal If {@code true} sends previous value to backups.
     * @return Deleted entries.
     * @throws GridCacheEntryRemovedException Should not be thrown.
     */
@SuppressWarnings("unchecked")
private UpdateBatchResult updateWithBatch(final ClusterNode node, final boolean hasNear, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final List<GridDhtCacheEntry> locked, final GridCacheVersion ver, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final boolean replicate, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) throws GridCacheEntryRemovedException {
    // Cannot update in batches during DR due to possible conflicts.
    assert !ctx.dr().receiveEnabled();
    // Should not request return values for putAll.
    assert !req.returnValue() || req.operation() == TRANSFORM;
    if (!F.isEmpty(req.filter()) && ctx.loadPreviousValue()) {
        try {
            reloadIfNeeded(locked);
        } catch (IgniteCheckedException e) {
            res.addFailedKeys(req.keys(), e);
            return new UpdateBatchResult();
        }
    }
    int size = req.size();
    Map<KeyCacheObject, CacheObject> putMap = null;
    Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap = null;
    Collection<KeyCacheObject> rmvKeys = null;
    List<CacheObject> writeVals = null;
    UpdateBatchResult updRes = new UpdateBatchResult();
    List<GridDhtCacheEntry> filtered = new ArrayList<>(size);
    GridCacheOperation op = req.operation();
    GridCacheReturn invokeRes = null;
    int firstEntryIdx = 0;
    boolean intercept = ctx.config().getInterceptor() != null;
    for (int i = 0; i < locked.size(); i++) {
        GridDhtCacheEntry entry = locked.get(i);
        try {
            if (!checkFilter(entry, req, res)) {
                if (expiry != null && entry.hasValue()) {
                    long ttl = expiry.forAccess();
                    if (ttl != CU.TTL_NOT_CHANGED) {
                        entry.updateTtl(null, ttl);
                        expiry.ttlUpdated(entry.key(), entry.version(), entry.readers());
                    }
                }
                if (log.isDebugEnabled())
                    log.debug("Entry did not pass the filter (will skip write) [entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ", res=" + res + ']');
                if (hasNear)
                    res.addSkippedIndex(i);
                firstEntryIdx++;
                continue;
            }
            if (op == TRANSFORM) {
                EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
                CacheObject old = entry.innerGet(ver, null, /*read through*/
                true, /*metrics*/
                true, /*event*/
                true, req.subjectId(), entryProcessor, taskName, null, req.keepBinary());
                Object oldVal = null;
                Object updatedVal = null;
                CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry(entry.key(), old, entry.version(), req.keepBinary(), entry);
                CacheObject updated;
                try {
                    Object computed = entryProcessor.process(invokeEntry, req.invokeArguments());
                    if (computed != null) {
                        if (invokeRes == null)
                            invokeRes = new GridCacheReturn(node.isLocal());
                        computed = ctx.unwrapTemporary(computed);
                        invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), computed, null, req.keepBinary());
                    }
                    if (!invokeEntry.modified())
                        continue;
                    updatedVal = ctx.unwrapTemporary(invokeEntry.getValue());
                    updated = ctx.toCacheObject(updatedVal);
                } catch (Exception e) {
                    if (invokeRes == null)
                        invokeRes = new GridCacheReturn(node.isLocal());
                    invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), null, e, req.keepBinary());
                    updated = old;
                }
                if (updated == null) {
                    if (intercept) {
                        CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
                        IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(e);
                        if (ctx.cancelRemove(interceptorRes))
                            continue;
                    }
                    // Update previous batch.
                    if (putMap != null) {
                        dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, null, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
                        firstEntryIdx = i;
                        putMap = null;
                        writeVals = null;
                        entryProcessorMap = null;
                        filtered = new ArrayList<>();
                    }
                    // Start collecting new batch.
                    if (rmvKeys == null)
                        rmvKeys = new ArrayList<>(size);
                    rmvKeys.add(entry.key());
                } else {
                    if (intercept) {
                        CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
                        Object val = ctx.config().getInterceptor().onBeforePut(e, updatedVal);
                        if (val == null)
                            continue;
                        updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
                    }
                    // Update previous batch.
                    if (rmvKeys != null) {
                        dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, null, null, rmvKeys, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
                        firstEntryIdx = i;
                        rmvKeys = null;
                        entryProcessorMap = null;
                        filtered = new ArrayList<>();
                    }
                    if (putMap == null) {
                        putMap = new LinkedHashMap<>(size, 1.0f);
                        writeVals = new ArrayList<>(size);
                    }
                    putMap.put(entry.key(), updated);
                    writeVals.add(updated);
                }
                if (entryProcessorMap == null)
                    entryProcessorMap = new HashMap<>();
                entryProcessorMap.put(entry.key(), entryProcessor);
            } else if (op == UPDATE) {
                CacheObject updated = req.value(i);
                if (intercept) {
                    CacheObject old = entry.innerGet(null, null, /*read through*/
                    ctx.loadPreviousValue(), /*metrics*/
                    true, /*event*/
                    true, req.subjectId(), null, taskName, null, req.keepBinary());
                    Object val = ctx.config().getInterceptor().onBeforePut(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()), ctx.unwrapBinaryIfNeeded(updated, req.keepBinary(), false));
                    if (val == null)
                        continue;
                    updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
                }
                assert updated != null;
                if (putMap == null) {
                    putMap = new LinkedHashMap<>(size, 1.0f);
                    writeVals = new ArrayList<>(size);
                }
                putMap.put(entry.key(), updated);
                writeVals.add(updated);
            } else {
                assert op == DELETE;
                if (intercept) {
                    CacheObject old = entry.innerGet(null, null, /*read through*/
                    ctx.loadPreviousValue(), /*metrics*/
                    true, /*event*/
                    true, req.subjectId(), null, taskName, null, req.keepBinary());
                    IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()));
                    if (ctx.cancelRemove(interceptorRes))
                        continue;
                }
                if (rmvKeys == null)
                    rmvKeys = new ArrayList<>(size);
                rmvKeys.add(entry.key());
            }
            filtered.add(entry);
        } catch (IgniteCheckedException e) {
            res.addFailedKey(entry.key(), e);
        }
    }
    // Store final batch.
    if (putMap != null || rmvKeys != null) {
        dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, rmvKeys, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
    } else
        assert filtered.isEmpty();
    updRes.dhtFuture(dhtFut);
    updRes.invokeResult(invokeRes);
    return updRes;
}
Also used : CacheLazyEntry(org.apache.ignite.internal.processors.cache.CacheLazyEntry) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheReturn(org.apache.ignite.internal.processors.cache.GridCacheReturn) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) StorageException(org.apache.ignite.internal.pagemem.wal.StorageException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) NodeStoppingException(org.apache.ignite.internal.NodeStoppingException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException) CacheStorePartialUpdateException(org.apache.ignite.internal.processors.cache.CacheStorePartialUpdateException) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) EntryProcessor(javax.cache.processor.EntryProcessor) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) CacheInvokeEntry(org.apache.ignite.internal.processors.cache.CacheInvokeEntry) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridTimeoutObject(org.apache.ignite.internal.processors.timeout.GridTimeoutObject) GridCacheOperation(org.apache.ignite.internal.processors.cache.GridCacheOperation)

Example 33 with IgniteBiTuple

use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.

the class GridDhtAtomicCache method updatePartialBatch.

/**
     * @param hasNear {@code True} if originating node has near cache.
     * @param firstEntryIdx Index of the first entry in the request keys collection.
     * @param entries Entries to update.
     * @param ver Version to set.
     * @param nearNode Originating node.
     * @param writeVals Write values.
     * @param putMap Values to put.
     * @param rmvKeys Keys to remove.
     * @param entryProcessorMap Entry processors.
     * @param dhtFut DHT update future if has backups.
     * @param req Request.
     * @param res Response.
     * @param replicate Whether replication is enabled.
     * @param batchRes Batch update result.
     * @param taskName Task name.
     * @param expiry Expiry policy.
     * @param sndPrevVal If {@code true} sends previous value to backups.
     * @return Deleted entries.
     */
@SuppressWarnings("ForLoopReplaceableByForEach")
@Nullable
private GridDhtAtomicAbstractUpdateFuture updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final UpdateBatchResult batchRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
    assert putMap == null ^ rmvKeys == null;
    assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
    AffinityTopologyVersion topVer = req.topologyVersion();
    boolean checkReaders = hasNear || ctx.discovery().hasNearCache(ctx.cacheId(), topVer);
    CacheStorePartialUpdateException storeErr = null;
    try {
        GridCacheOperation op;
        if (putMap != null) {
            try {
                Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {

                    @Override
                    public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
                        return F.t(val, ver);
                    }
                });
                ctx.store().putAll(null, view);
            } catch (CacheStorePartialUpdateException e) {
                storeErr = e;
            }
            op = UPDATE;
        } else {
            try {
                ctx.store().removeAll(null, rmvKeys);
            } catch (CacheStorePartialUpdateException e) {
                storeErr = e;
            }
            op = DELETE;
        }
        boolean intercept = ctx.config().getInterceptor() != null;
        AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
        // Avoid iterator creation.
        for (int i = 0; i < entries.size(); i++) {
            GridDhtCacheEntry entry = entries.get(i);
            assert Thread.holdsLock(entry);
            if (entry.obsolete()) {
                assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
                continue;
            }
            if (storeErr != null && storeErr.failedKeys().contains(entry.key().value(ctx.cacheObjectContext(), false)))
                continue;
            try {
                // We are holding java-level locks on entries at this point.
                CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
                assert writeVal != null || op == DELETE : "null write value found.";
                Collection<UUID> readers = null;
                Collection<UUID> filteredReaders = null;
                if (checkReaders) {
                    readers = entry.readers();
                    filteredReaders = F.view(entry.readers(), F.notEqualTo(nearNode.id()));
                }
                GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
                false, /*read-through*/
                false, /*retval*/
                sndPrevVal, req.keepBinary(), expiry, /*event*/
                true, /*metrics*/
                true, /*primary*/
                true, /*verCheck*/
                false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
                false, /*intercept*/
                false, req.subjectId(), taskName, null, null, dhtFut);
                assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
                if (intercept) {
                    if (op == UPDATE) {
                        ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
                    } else {
                        assert op == DELETE : op;
                        // Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
                        ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
                    }
                }
                batchRes.addDeleted(entry, updRes, entries);
                if (dhtFut != null) {
                    EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
                    dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
                    if (!F.isEmpty(filteredReaders))
                        dhtFut.addNearWriteEntries(filteredReaders, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
                }
                if (hasNear) {
                    if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
                        int idx = firstEntryIdx + i;
                        if (req.operation() == TRANSFORM) {
                            res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
                        } else
                            res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
                        if (writeVal != null || entry.hasValue()) {
                            IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
                            assert f == null : f;
                        }
                    } else if (// Reader became primary or backup.
                    readers.contains(nearNode.id()))
                        entry.removeReader(nearNode.id(), req.messageId());
                    else
                        res.addSkippedIndex(firstEntryIdx + i);
                }
            } catch (GridCacheEntryRemovedException e) {
                assert false : "Entry cannot become obsolete while holding lock.";
                e.printStackTrace();
            }
        }
    } catch (IgniteCheckedException e) {
        res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
    }
    if (storeErr != null) {
        ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
        for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
        res.addFailedKeys(failed, storeErr.getCause());
    }
    return dhtFut;
}
Also used : CacheLazyEntry(org.apache.ignite.internal.processors.cache.CacheLazyEntry) AffinityAssignment(org.apache.ignite.internal.processors.affinity.AffinityAssignment) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) ArrayList(java.util.ArrayList) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) UUID(java.util.UUID) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) CacheStorePartialUpdateException(org.apache.ignite.internal.processors.cache.CacheStorePartialUpdateException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridTimeoutObject(org.apache.ignite.internal.processors.timeout.GridTimeoutObject) GridCacheOperation(org.apache.ignite.internal.processors.cache.GridCacheOperation) GridCacheUpdateAtomicResult(org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult) Nullable(org.jetbrains.annotations.Nullable)

Example 34 with IgniteBiTuple

use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.

the class CacheObjectBinaryProcessorImpl method marshalToBinary.

/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public Object marshalToBinary(@Nullable Object obj) throws BinaryObjectException {
    if (obj == null)
        return null;
    if (BinaryUtils.isBinaryType(obj.getClass()))
        return obj;
    if (obj instanceof Object[]) {
        Object[] arr = (Object[]) obj;
        Object[] pArr = new Object[arr.length];
        for (int i = 0; i < arr.length; i++) pArr[i] = marshalToBinary(arr[i]);
        return pArr;
    }
    if (obj instanceof IgniteBiTuple) {
        IgniteBiTuple tup = (IgniteBiTuple) obj;
        if (obj instanceof T2)
            return new T2<>(marshalToBinary(tup.get1()), marshalToBinary(tup.get2()));
        return new IgniteBiTuple<>(marshalToBinary(tup.get1()), marshalToBinary(tup.get2()));
    }
    {
        Collection<Object> pCol = BinaryUtils.newKnownCollection(obj);
        if (pCol != null) {
            Collection<?> col = (Collection<?>) obj;
            for (Object item : col) pCol.add(marshalToBinary(item));
            return pCol;
        }
    }
    {
        Map<Object, Object> pMap = BinaryUtils.newKnownMap(obj);
        if (pMap != null) {
            Map<?, ?> map = (Map<?, ?>) obj;
            for (Map.Entry<?, ?> e : map.entrySet()) pMap.put(marshalToBinary(e.getKey()), marshalToBinary(e.getValue()));
            return pMap;
        }
    }
    if (obj instanceof Map.Entry) {
        Map.Entry<?, ?> e = (Map.Entry<?, ?>) obj;
        return new GridMapEntry<>(marshalToBinary(e.getKey()), marshalToBinary(e.getValue()));
    }
    if (binaryMarsh.mustDeserialize(obj))
        // No need to go through marshal-unmarshal because result will be the same as initial object.
        return obj;
    byte[] arr = binaryMarsh.marshal(obj);
    assert arr.length > 0;
    Object obj0 = binaryMarsh.unmarshal(arr, null);
    // Possible if a class has writeObject method.
    if (obj0 instanceof BinaryObjectImpl)
        ((BinaryObjectImpl) obj0).detachAllowed(true);
    return obj0;
}
Also used : BinaryObjectImpl(org.apache.ignite.internal.binary.BinaryObjectImpl) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) GridMapEntry(org.apache.ignite.internal.util.lang.GridMapEntry) GridMapEntry(org.apache.ignite.internal.util.lang.GridMapEntry) Collection(java.util.Collection) BinaryObject(org.apache.ignite.binary.BinaryObject) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) T2(org.apache.ignite.internal.util.typedef.T2)

Example 35 with IgniteBiTuple

use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.

the class GridCacheQueryManager method scanQueryLocal.

/**
     * Process local scan query.
     *
     * @param qry Query.
     * @param updStatisticsIfNeeded Update statistics flag.
     */
@SuppressWarnings({ "unchecked", "serial" })
protected GridCloseableIterator scanQueryLocal(final GridCacheQueryAdapter qry, final boolean updStatisticsIfNeeded) throws IgniteCheckedException {
    if (!enterBusy())
        throw new IllegalStateException("Failed to process query request (grid is stopping).");
    final boolean statsEnabled = cctx.config().isStatisticsEnabled();
    boolean needUpdStatistics = updStatisticsIfNeeded && statsEnabled;
    long startTime = U.currentTimeMillis();
    final String namex = cctx.name();
    try {
        assert qry.type() == SCAN;
        if (log.isDebugEnabled())
            log.debug("Running local SCAN query: " + qry);
        final String taskName = cctx.kernalContext().task().resolveTaskName(qry.taskHash());
        final IgniteBiPredicate filter = qry.scanFilter();
        final ClusterNode locNode = cctx.localNode();
        final UUID subjId = qry.subjectId();
        if (cctx.gridEvents().isRecordable(EVT_CACHE_QUERY_EXECUTED)) {
            cctx.gridEvents().record(new CacheQueryExecutedEvent<>(locNode, "Scan query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SCAN.name(), namex, null, null, filter, null, null, subjId, taskName));
        }
        final GridCloseableIterator<IgniteBiTuple<K, V>> iter = scanIterator(qry, true);
        if (updStatisticsIfNeeded)
            needUpdStatistics = false;
        final boolean readEvt = cctx.gridEvents().isRecordable(EVT_CACHE_QUERY_OBJECT_READ);
        return new GridCloseableIteratorAdapter<Object>() {

            @Override
            protected Object onNext() throws IgniteCheckedException {
                long start = statsEnabled ? System.nanoTime() : 0L;
                IgniteBiTuple<K, V> next = iter.nextX();
                if (statsEnabled) {
                    CacheMetricsImpl metrics = cctx.cache().metrics0();
                    metrics.onRead(true);
                    metrics.addGetTimeNanos(System.nanoTime() - start);
                }
                if (readEvt) {
                    cctx.gridEvents().record(new CacheQueryReadEvent<>(cctx.localNode(), "Scan query entry read.", EVT_CACHE_QUERY_OBJECT_READ, CacheQueryType.SCAN.name(), namex, null, null, filter, null, null, subjId, taskName, next.getKey(), next.getValue(), null, null));
                }
                IgniteClosure transform = qry.transform();
                if (transform == null)
                    return next;
                Cache.Entry<K, V> entry;
                if (qry.keepBinary())
                    entry = cctx.cache().keepBinary().getEntry(next.getKey());
                else
                    entry = cctx.cache().getEntry(next.getKey());
                return transform.apply(entry);
            }

            @Override
            protected boolean onHasNext() throws IgniteCheckedException {
                return iter.hasNextX();
            }

            @Override
            protected void onClose() throws IgniteCheckedException {
                iter.close();
            }
        };
    } catch (Exception e) {
        if (needUpdStatistics)
            cctx.queries().collectMetrics(GridCacheQueryType.SCAN, namex, startTime, U.currentTimeMillis() - startTime, true);
        throw e;
    } finally {
        leaveBusy();
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridCloseableIteratorAdapter(org.apache.ignite.internal.util.GridCloseableIteratorAdapter) IgniteClosure(org.apache.ignite.lang.IgniteClosure) IgniteBiPredicate(org.apache.ignite.lang.IgniteBiPredicate) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) CacheMetricsImpl(org.apache.ignite.internal.processors.cache.CacheMetricsImpl) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) SQLException(java.sql.SQLException) IOException(java.io.IOException) IgniteException(org.apache.ignite.IgniteException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) NodeStoppingException(org.apache.ignite.internal.NodeStoppingException) NoSuchElementException(java.util.NoSuchElementException) GridDhtUnreservedPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtUnreservedPartitionException) UUID(java.util.UUID) Cache(javax.cache.Cache) IgniteInternalCache(org.apache.ignite.internal.processors.cache.IgniteInternalCache)

Aggregations

IgniteBiTuple (org.apache.ignite.lang.IgniteBiTuple)64 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)35 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)18 IgniteException (org.apache.ignite.IgniteException)17 ArrayList (java.util.ArrayList)14 CacheObject (org.apache.ignite.internal.processors.cache.CacheObject)13 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)13 ClusterNode (org.apache.ignite.cluster.ClusterNode)11 HashMap (java.util.HashMap)10 UUID (java.util.UUID)10 IOException (java.io.IOException)9 List (java.util.List)9 Map (java.util.Map)8 CacheStorePartialUpdateException (org.apache.ignite.internal.processors.cache.CacheStorePartialUpdateException)8 GridCacheEntryRemovedException (org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException)8 LinkedHashMap (java.util.LinkedHashMap)7 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)7 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)6 Event (org.apache.ignite.events.Event)6 IgfsPath (org.apache.ignite.igfs.IgfsPath)6