Search in sources :

Example 21 with GridDhtCacheEntry

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.

the class GridDhtAtomicCache method processDhtAtomicUpdateRequest.

/**
 * @param nodeId Sender node ID.
 * @param req Dht atomic update request.
 */
private void processDhtAtomicUpdateRequest(UUID nodeId, GridDhtAtomicAbstractUpdateRequest req) {
    assert Thread.currentThread().getName().startsWith("sys-stripe-") : Thread.currentThread().getName();
    if (msgLog.isDebugEnabled()) {
        msgLog.debug("Received DHT atomic update request [futId=" + req.futureId() + ", writeVer=" + req.writeVersion() + ", node=" + nodeId + ']');
    }
    assert req.partition() >= 0 : req;
    GridCacheVersion ver = req.writeVersion();
    ctx.versions().onReceived(nodeId, ver);
    GridDhtAtomicNearResponse nearRes = null;
    if (req.nearNodeId() != null) {
        nearRes = new GridDhtAtomicNearResponse(ctx.cacheId(), req.partition(), req.nearFutureId(), nodeId, req.flags());
    }
    boolean replicate = ctx.isDrEnabled();
    boolean intercept = req.forceTransformBackups() && ctx.config().getInterceptor() != null;
    boolean needTaskName = ctx.events().isRecordable(EVT_CACHE_OBJECT_READ) || ctx.events().isRecordable(EVT_CACHE_OBJECT_PUT) || ctx.events().isRecordable(EVT_CACHE_OBJECT_REMOVED);
    String taskName = needTaskName ? ctx.kernalContext().task().resolveTaskName(req.taskNameHash()) : null;
    ctx.shared().database().checkpointReadLock();
    try {
        for (int i = 0; i < req.size(); i++) {
            KeyCacheObject key = req.key(i);
            try {
                while (true) {
                    GridDhtCacheEntry entry = null;
                    try {
                        entry = entryExx(key);
                        CacheObject val = req.value(i);
                        CacheObject prevVal = req.previousValue(i);
                        EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
                        Long updateIdx = req.updateCounter(i);
                        GridCacheOperation op = entryProcessor != null ? TRANSFORM : (val != null) ? UPDATE : DELETE;
                        long ttl = req.ttl(i);
                        long expireTime = req.conflictExpireTime(i);
                        GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nodeId, nodeId, op, op == TRANSFORM ? entryProcessor : val, op == TRANSFORM ? req.invokeArguments() : null, /*write-through*/
                        (ctx.store().isLocal() && !ctx.shared().localStorePrimaryOnly()) && writeThrough() && !req.skipStore(), /*read-through*/
                        false, /*retval*/
                        false, req.keepBinary(), /*expiry policy*/
                        null, /*event*/
                        true, /*metrics*/
                        true, /*primary*/
                        false, /*check version*/
                        !req.forceTransformBackups(), req.topologyVersion(), CU.empty0(), replicate ? DR_BACKUP : DR_NONE, ttl, expireTime, req.conflictVersion(i), false, intercept, taskName, prevVal, updateIdx, null, req.transformOperation());
                        if (updRes.removeVersion() != null)
                            ctx.onDeferredDelete(entry, updRes.removeVersion());
                        entry.onUnlock();
                        // While.
                        break;
                    } catch (GridCacheEntryRemovedException ignored) {
                        if (log.isDebugEnabled())
                            log.debug("Got removed entry while updating backup value (will retry): " + key);
                        entry = null;
                    } finally {
                        if (entry != null)
                            entry.touch();
                    }
                }
            } catch (NodeStoppingException e) {
                U.warn(log, "Failed to update key on backup (local node is stopping): " + key);
                return;
            } catch (GridDhtInvalidPartitionException ignored) {
            // Ignore.
            } catch (IgniteCheckedException | RuntimeException e) {
                if (e instanceof RuntimeException && !X.hasCause(e, IgniteOutOfMemoryException.class))
                    throw (RuntimeException) e;
                IgniteCheckedException err = new IgniteCheckedException("Failed to update key on backup node: " + key, e);
                if (nearRes != null)
                    nearRes.addFailedKey(key, err);
                U.error(log, "Failed to update key on backup node: " + key, e);
            }
        }
    } finally {
        ctx.shared().database().checkpointReadUnlock();
    }
    GridDhtAtomicUpdateResponse dhtRes = null;
    if (req.nearSize() > 0 || req.obsoleteNearKeysSize() > 0) {
        List<KeyCacheObject> nearEvicted = null;
        if (isNearEnabled(ctx))
            nearEvicted = ((GridNearAtomicCache<K, V>) near()).processDhtAtomicUpdateRequest(nodeId, req, nearRes);
        else if (req.nearSize() > 0) {
            nearEvicted = new ArrayList<>(req.nearSize());
            for (int i = 0; i < req.nearSize(); i++) nearEvicted.add(req.nearKey(i));
        }
        if (nearEvicted != null) {
            dhtRes = new GridDhtAtomicUpdateResponse(ctx.cacheId(), req.partition(), req.futureId(), ctx.deploymentEnabled());
            dhtRes.nearEvicted(nearEvicted);
        }
    }
    try {
        // TODO fire events only after successful fsync
        if (ctx.shared().wal() != null)
            ctx.shared().wal().flush(null, false);
    } catch (StorageException e) {
        if (dhtRes != null)
            dhtRes.onError(new IgniteCheckedException(e));
        if (nearRes != null)
            nearRes.onClassError(e);
    } catch (IgniteCheckedException e) {
        if (dhtRes != null)
            dhtRes.onError(e);
        if (nearRes != null)
            nearRes.onClassError(e);
    }
    if (nearRes != null)
        sendDhtNearResponse(req, nearRes);
    if (dhtRes == null && req.replyWithoutDelay()) {
        dhtRes = new GridDhtAtomicUpdateResponse(ctx.cacheId(), req.partition(), req.futureId(), ctx.deploymentEnabled());
    }
    if (dhtRes != null)
        sendDhtPrimaryResponse(nodeId, req, dhtRes);
    else
        sendDeferredUpdateResponse(req.partition(), nodeId, req.futureId());
}
Also used : NodeStoppingException(org.apache.ignite.internal.NodeStoppingException) ArrayList(java.util.ArrayList) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) GridNearAtomicCache(org.apache.ignite.internal.processors.cache.distributed.near.GridNearAtomicCache) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException) IgniteOutOfMemoryException(org.apache.ignite.internal.mem.IgniteOutOfMemoryException) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridTimeoutObject(org.apache.ignite.internal.processors.timeout.GridTimeoutObject) GridCacheOperation(org.apache.ignite.internal.processors.cache.GridCacheOperation) GridCacheUpdateAtomicResult(org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult) StorageException(org.apache.ignite.internal.processors.cache.persistence.StorageException)

Example 22 with GridDhtCacheEntry

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.

the class GridDhtAtomicCache method updateWithBatch.

/**
 * Updates locked entries using batched write-through.
 *
 * @param node Sender node.
 * @param hasNear {@code True} if originating node has near cache.
 * @param req Update request.
 * @param res Update response.
 * @param locked Locked entries.
 * @param ver Assigned version.
 * @param replicate Whether replication is enabled.
 * @param taskName Task name.
 * @param expiry Expiry policy.
 * @param sndPrevVal If {@code true} sends previous value to backups.
 * @param dhtUpdRes DHT update result.
 * @throws GridCacheEntryRemovedException Should not be thrown.
 */
@SuppressWarnings("unchecked")
private void updateWithBatch(final ClusterNode node, final boolean hasNear, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final List<GridDhtCacheEntry> locked, final GridCacheVersion ver, final boolean replicate, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal, final DhtAtomicUpdateResult dhtUpdRes) throws GridCacheEntryRemovedException {
    // Cannot update in batches during DR due to possible conflicts.
    assert !ctx.dr().receiveEnabled();
    // Should not request return values for putAll.
    assert !req.returnValue() || req.operation() == TRANSFORM;
    if (!F.isEmpty(req.filter()) && ctx.loadPreviousValue()) {
        try {
            reloadIfNeeded(locked);
        } catch (IgniteCheckedException e) {
            res.addFailedKeys(req.keys(), e);
            return;
        }
    }
    int size = req.size();
    Map<KeyCacheObject, CacheObject> putMap = null;
    Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap = null;
    Collection<KeyCacheObject> rmvKeys = null;
    List<CacheObject> writeVals = null;
    List<GridDhtCacheEntry> filtered = new ArrayList<>(size);
    GridCacheOperation op = req.operation();
    GridCacheReturn invokeRes = null;
    int firstEntryIdx = 0;
    boolean intercept = ctx.config().getInterceptor() != null;
    for (int i = dhtUpdRes.processedEntriesCount(); i < locked.size(); i++) {
        GridDhtCacheEntry entry = locked.get(i);
        try {
            if (!checkFilter(entry, req, res)) {
                if (expiry != null && entry.hasValue()) {
                    long ttl = expiry.forAccess();
                    if (ttl != CU.TTL_NOT_CHANGED) {
                        entry.updateTtl(null, ttl);
                        expiry.ttlUpdated(entry.key(), entry.version(), entry.readers());
                    }
                }
                if (log.isDebugEnabled())
                    log.debug("Entry did not pass the filter (will skip write) [entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ", res=" + res + ']');
                if (hasNear)
                    res.addSkippedIndex(i);
                firstEntryIdx++;
                continue;
            }
            if (op == TRANSFORM) {
                EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
                CacheObject old = entry.innerGet(ver, null, /*read through*/
                true, /*metrics*/
                true, /*event*/
                true, entryProcessor, taskName, null, req.keepBinary());
                Object oldVal = null;
                Object updatedVal = null;
                CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry(entry.key(), old, entry.version(), req.keepBinary(), entry);
                CacheObject updated = null;
                if (invokeRes == null)
                    invokeRes = new GridCacheReturn(node.isLocal());
                CacheInvokeResult curInvokeRes = null;
                boolean validation = false;
                IgniteThread.onEntryProcessorEntered(true);
                try {
                    Object computed = entryProcessor.process(invokeEntry, req.invokeArguments());
                    if (computed != null) {
                        computed = ctx.unwrapTemporary(computed);
                        curInvokeRes = CacheInvokeResult.fromResult(computed);
                    }
                    if (!invokeEntry.modified()) {
                        if (ctx.statisticsEnabled())
                            ctx.cache().metrics0().onReadOnlyInvoke(old != null);
                        continue;
                    } else {
                        updatedVal = ctx.unwrapTemporary(invokeEntry.getValue());
                        updated = ctx.toCacheObject(updatedVal);
                        validation = true;
                        if (updated != null)
                            ctx.validateKeyAndValue(entry.key(), updated);
                    }
                } catch (UnregisteredClassException | UnregisteredBinaryTypeException e) {
                    throw e;
                } catch (Exception e) {
                    curInvokeRes = CacheInvokeResult.fromError(e);
                    updated = old;
                    if (validation) {
                        res.addSkippedIndex(i);
                        continue;
                    }
                } finally {
                    IgniteThread.onEntryProcessorLeft();
                    if (curInvokeRes != null) {
                        invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), curInvokeRes.result(), curInvokeRes.error(), req.keepBinary());
                    }
                }
                if (updated == null) {
                    if (intercept) {
                        CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
                        IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(e);
                        if (ctx.cancelRemove(interceptorRes))
                            continue;
                    }
                    // Update previous batch.
                    if (putMap != null) {
                        updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, null, entryProcessorMap, req, res, replicate, dhtUpdRes, taskName, expiry, sndPrevVal);
                        firstEntryIdx = i;
                        putMap = null;
                        writeVals = null;
                        entryProcessorMap = null;
                        filtered = new ArrayList<>();
                    }
                    // Start collecting new batch.
                    if (rmvKeys == null)
                        rmvKeys = new ArrayList<>(size);
                    rmvKeys.add(entry.key());
                } else {
                    if (intercept) {
                        CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
                        Object val = ctx.config().getInterceptor().onBeforePut(e, updatedVal);
                        if (val == null)
                            continue;
                        updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
                    }
                    // Update previous batch.
                    if (rmvKeys != null) {
                        updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, null, null, rmvKeys, entryProcessorMap, req, res, replicate, dhtUpdRes, taskName, expiry, sndPrevVal);
                        firstEntryIdx = i;
                        rmvKeys = null;
                        entryProcessorMap = null;
                        filtered = new ArrayList<>();
                    }
                    if (putMap == null) {
                        putMap = new LinkedHashMap<>(size, 1.0f);
                        writeVals = new ArrayList<>(size);
                    }
                    putMap.put(entry.key(), updated);
                    writeVals.add(updated);
                }
                if (entryProcessorMap == null)
                    entryProcessorMap = new HashMap<>();
                entryProcessorMap.put(entry.key(), entryProcessor);
            } else if (op == UPDATE) {
                CacheObject updated = req.value(i);
                if (intercept) {
                    CacheObject old = entry.innerGet(null, null, /*read through*/
                    ctx.loadPreviousValue(), /*metrics*/
                    true, /*event*/
                    true, null, taskName, null, req.keepBinary());
                    Object val = ctx.config().getInterceptor().onBeforePut(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()), ctx.unwrapBinaryIfNeeded(updated, req.keepBinary(), false, null));
                    if (val == null)
                        continue;
                    updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
                }
                assert updated != null;
                ctx.validateKeyAndValue(entry.key(), updated);
                if (putMap == null) {
                    putMap = new LinkedHashMap<>(size, 1.0f);
                    writeVals = new ArrayList<>(size);
                }
                putMap.put(entry.key(), updated);
                writeVals.add(updated);
            } else {
                assert op == DELETE;
                if (intercept) {
                    CacheObject old = entry.innerGet(null, null, /*read through*/
                    ctx.loadPreviousValue(), /*metrics*/
                    true, /*event*/
                    true, null, taskName, null, req.keepBinary());
                    IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()));
                    if (ctx.cancelRemove(interceptorRes))
                        continue;
                }
                if (rmvKeys == null)
                    rmvKeys = new ArrayList<>(size);
                rmvKeys.add(entry.key());
            }
            filtered.add(entry);
        } catch (IgniteCheckedException e) {
            res.addFailedKey(entry.key(), e);
        }
    }
    // Store final batch.
    if (putMap != null || rmvKeys != null) {
        updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, rmvKeys, entryProcessorMap, req, res, replicate, dhtUpdRes, taskName, expiry, sndPrevVal);
    } else
        assert filtered.isEmpty();
    dhtUpdRes.returnValue(invokeRes);
}
Also used : CacheLazyEntry(org.apache.ignite.internal.processors.cache.CacheLazyEntry) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) ArrayList(java.util.ArrayList) CacheInvokeResult(org.apache.ignite.internal.processors.cache.CacheInvokeResult) LinkedHashMap(java.util.LinkedHashMap) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) UnregisteredBinaryTypeException(org.apache.ignite.internal.UnregisteredBinaryTypeException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) UnregisteredClassException(org.apache.ignite.internal.UnregisteredClassException) GridCacheReturn(org.apache.ignite.internal.processors.cache.GridCacheReturn) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteCacheRestartingException(org.apache.ignite.IgniteCacheRestartingException) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException) StorageException(org.apache.ignite.internal.processors.cache.persistence.StorageException) IgniteException(org.apache.ignite.IgniteException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) UnregisteredClassException(org.apache.ignite.internal.UnregisteredClassException) IgniteOutOfMemoryException(org.apache.ignite.internal.mem.IgniteOutOfMemoryException) NodeStoppingException(org.apache.ignite.internal.NodeStoppingException) UnregisteredBinaryTypeException(org.apache.ignite.internal.UnregisteredBinaryTypeException) ClusterTopologyCheckedException(org.apache.ignite.internal.cluster.ClusterTopologyCheckedException) CacheStoppedException(org.apache.ignite.internal.processors.cache.CacheStoppedException) CacheStorePartialUpdateException(org.apache.ignite.internal.processors.cache.CacheStorePartialUpdateException) BinaryInvalidTypeException(org.apache.ignite.binary.BinaryInvalidTypeException) EntryProcessor(javax.cache.processor.EntryProcessor) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) CacheInvokeEntry(org.apache.ignite.internal.processors.cache.CacheInvokeEntry) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridTimeoutObject(org.apache.ignite.internal.processors.timeout.GridTimeoutObject) GridCacheOperation(org.apache.ignite.internal.processors.cache.GridCacheOperation)

Example 23 with GridDhtCacheEntry

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.

the class GridNearLockFuture method map.

/**
 * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
 * such approach does not preserve order of lock acquisition. Instead, keys are split in continuous
 * groups belonging to one primary node and locks for these groups are acquired sequentially.
 *
 * @param keys Keys.
 * @param remap Remap flag.
 * @param topLocked {@code True} if thread already acquired lock preventing topology change.
 */
private void map(Iterable<KeyCacheObject> keys, boolean remap, boolean topLocked) {
    try {
        AffinityTopologyVersion topVer = this.topVer;
        assert topVer != null;
        assert topVer.topologyVersion() > 0 : topVer;
        if (CU.affinityNodes(cctx, topVer).isEmpty()) {
            onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for near-only cache (all " + "partition nodes left the grid)."));
            return;
        }
        boolean clientNode = cctx.kernalContext().clientNode();
        assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
        synchronized (this) {
            mappings = new ArrayDeque<>();
            // Assign keys to primary nodes.
            GridNearLockMapping map = null;
            for (KeyCacheObject key : keys) {
                GridNearLockMapping updated = map(key, map, topVer);
                // If new mapping was created, add to collection.
                if (updated != map) {
                    mappings.add(updated);
                    if (tx != null && updated.node().isLocal())
                        tx.nearLocallyMapped(true);
                }
                map = updated;
            }
            if (isDone()) {
                if (log.isDebugEnabled())
                    log.debug("Abandoning (re)map because future is done: " + this);
                return;
            }
            if (log.isDebugEnabled())
                log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
            boolean first = true;
            // Create mini futures.
            for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
                GridNearLockMapping mapping = iter.next();
                ClusterNode node = mapping.node();
                Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
                assert !mappedKeys.isEmpty();
                GridNearLockRequest req = null;
                Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
                boolean explicit = false;
                for (KeyCacheObject key : mappedKeys) {
                    IgniteTxKey txKey = cctx.txKey(key);
                    while (true) {
                        GridNearCacheEntry entry = null;
                        try {
                            entry = cctx.near().entryExx(key, topVer);
                            if (!cctx.isAll(entry, filter)) {
                                if (log.isDebugEnabled())
                                    log.debug("Entry being locked did not pass filter (will not lock): " + entry);
                                onComplete(false, false);
                                return;
                            }
                            // Removed exception may be thrown here.
                            GridCacheMvccCandidate cand = addEntry(topVer, entry, node.id());
                            if (isDone()) {
                                if (log.isDebugEnabled())
                                    log.debug("Abandoning (re)map because future is done after addEntry attempt " + "[fut=" + this + ", entry=" + entry + ']');
                                return;
                            }
                            if (cand != null) {
                                if (tx == null && !cand.reentry())
                                    cctx.mvcc().addExplicitLock(threadId, cand, topVer);
                                IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.versionedValue();
                                if (val == null) {
                                    GridDhtCacheEntry dhtEntry = dht().peekExx(key);
                                    try {
                                        if (dhtEntry != null)
                                            val = dhtEntry.versionedValue(topVer);
                                    } catch (GridCacheEntryRemovedException ignored) {
                                        assert dhtEntry.obsolete() : dhtEntry;
                                        if (log.isDebugEnabled())
                                            log.debug("Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                                    }
                                }
                                GridCacheVersion dhtVer = null;
                                if (val != null) {
                                    dhtVer = val.get1();
                                    valMap.put(key, val);
                                }
                                if (!cand.reentry()) {
                                    if (req == null) {
                                        boolean clientFirst = false;
                                        if (first) {
                                            clientFirst = clientNode && !topLocked && (tx == null || !tx.hasRemoteLocks());
                                            first = false;
                                        }
                                        assert !implicitTx() && !implicitSingleTx() : tx;
                                        req = new GridNearLockRequest(cctx.cacheId(), topVer, cctx.nodeId(), threadId, futId, lockVer, inTx(), read, retval, isolation(), isInvalidate(), timeout, mappedKeys.size(), inTx() ? tx.size() : mappedKeys.size(), inTx() && tx.syncMode() == FULL_SYNC, inTx() ? tx.taskNameHash() : 0, read ? createTtl : -1L, read ? accessTtl : -1L, skipStore, keepBinary, clientFirst, true, cctx.deploymentEnabled(), inTx() ? tx.label() : null);
                                        mapping.request(req);
                                    }
                                    distributedKeys.add(key);
                                    if (tx != null)
                                        tx.addKeyMapping(txKey, mapping.node());
                                    req.addKeyBytes(key, retval && dhtVer == null, dhtVer, // Include DHT version to match remote DHT entry.
                                    cctx);
                                }
                                if (cand.reentry())
                                    explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
                            } else {
                                if (timedOut)
                                    return;
                                // Ignore reentries within transactions.
                                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
                            }
                            if (explicit)
                                tx.addKeyMapping(txKey, mapping.node());
                            break;
                        } catch (GridCacheEntryRemovedException ignored) {
                            assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;
                            if (log.isDebugEnabled())
                                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
                        }
                    }
                    // Mark mapping explicit lock flag.
                    if (explicit) {
                        boolean marked = tx != null && tx.markExplicit(node.id());
                        assert tx == null || marked;
                    }
                }
                if (!distributedKeys.isEmpty())
                    mapping.distributedKeys(distributedKeys);
                else {
                    assert mapping.request() == null;
                    iter.remove();
                }
            }
        }
        cctx.mvcc().recheckPendingLocks();
        proceedMapping();
    } catch (IgniteCheckedException ex) {
        onError(ex);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) ArrayList(java.util.ArrayList) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) ClusterTopologyServerNotFoundException(org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) IgniteTxKey(org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheMvccCandidate(org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate)

Example 24 with GridDhtCacheEntry

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.

the class GridNearCacheEntry method initializeFromDht.

/**
 * @param topVer Topology version.
 * @throws GridCacheEntryRemovedException If this entry is obsolete.
 */
public void initializeFromDht(AffinityTopologyVersion topVer) throws GridCacheEntryRemovedException {
    GridDhtCacheEntry entry = cctx.near().dht().peekExx(key);
    if (entry != null) {
        GridCacheEntryInfo e = entry.info();
        if (e != null) {
            GridCacheVersion enqueueVer = null;
            try {
                ClusterNode primaryNode = cctx.affinity().primaryByKey(key, topVer);
                lockEntry();
                try {
                    checkObsolete();
                    if (isNew() || !valid(topVer)) {
                        // Version does not change for load ops.
                        update(e.value(), e.expireTime(), e.ttl(), e.isNew() ? ver : e.version(), true);
                        if (cctx.deferredDelete() && !isNew() && !isInternal()) {
                            boolean deleted = val == null;
                            if (deleted != deletedUnlocked()) {
                                deletedUnlocked(deleted);
                                if (deleted)
                                    enqueueVer = e.version();
                            }
                        }
                        if (primaryNode == null)
                            this.topVer = AffinityTopologyVersion.NONE;
                        else
                            recordNodeId(primaryNode.id(), topVer);
                        dhtVer = e.isNew() || e.isDeleted() ? null : e.version();
                    }
                } finally {
                    unlockEntry();
                }
            } finally {
                if (enqueueVer != null)
                    cctx.onDeferredDelete(this, enqueueVer);
            }
        }
    }
}
Also used : GridCacheEntryInfo(org.apache.ignite.internal.processors.cache.GridCacheEntryInfo) ClusterNode(org.apache.ignite.cluster.ClusterNode) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry)

Example 25 with GridDhtCacheEntry

use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.

the class IgniteTxHandler method mvccEnlistBatch.

/**
 * Writes updated values on the backup node.
 *
 * @param tx Transaction.
 * @param ctx Cache context.
 * @param op Operation.
 * @param keys Keys.
 * @param vals Values sent from the primary node.
 * @param snapshot Mvcc snapshot.
 * @param batchNum Batch number.
 * @param futId Future id.
 * @throws IgniteCheckedException If failed.
 */
public void mvccEnlistBatch(GridDhtTxRemote tx, GridCacheContext ctx, EnlistOperation op, List<KeyCacheObject> keys, List<Message> vals, MvccSnapshot snapshot, IgniteUuid futId, int batchNum) throws IgniteCheckedException {
    assert keys != null && (vals == null || vals.size() == keys.size());
    assert tx != null;
    GridDhtCacheAdapter dht = ctx.dht();
    tx.addActiveCache(ctx, false);
    for (int i = 0; i < keys.size(); i++) {
        KeyCacheObject key = keys.get(i);
        assert key != null;
        int part = ctx.affinity().partition(key);
        try {
            GridDhtLocalPartition locPart = ctx.topology().localPartition(part, tx.topologyVersion(), false);
            if (locPart != null && locPart.reserve()) {
                try {
                    // Skip renting partitions.
                    if (locPart.state() == RENTING) {
                        tx.addInvalidPartition(ctx.cacheId(), part);
                        continue;
                    }
                    CacheObject val = null;
                    EntryProcessor entryProc = null;
                    Object[] invokeArgs = null;
                    boolean needOldVal = tx.txState().useMvccCaching(ctx.cacheId());
                    Message val0 = vals != null ? vals.get(i) : null;
                    CacheEntryInfoCollection entries = val0 instanceof CacheEntryInfoCollection ? (CacheEntryInfoCollection) val0 : null;
                    if (entries == null && !op.isDeleteOrLock() && !op.isInvoke())
                        val = (val0 instanceof CacheObject) ? (CacheObject) val0 : null;
                    if (entries == null && op.isInvoke()) {
                        assert val0 instanceof GridInvokeValue;
                        GridInvokeValue invokeVal = (GridInvokeValue) val0;
                        entryProc = invokeVal.entryProcessor();
                        invokeArgs = invokeVal.invokeArgs();
                    }
                    assert entries != null || entryProc != null || !op.isInvoke() : "entryProc=" + entryProc + ", op=" + op;
                    GridDhtCacheEntry entry = dht.entryExx(key, tx.topologyVersion());
                    GridCacheUpdateTxResult updRes;
                    while (true) {
                        ctx.shared().database().checkpointReadLock();
                        try {
                            if (entries == null) {
                                switch(op) {
                                    case DELETE:
                                        updRes = entry.mvccRemove(tx, ctx.localNodeId(), tx.topologyVersion(), snapshot, false, needOldVal, null, false);
                                        break;
                                    case INSERT:
                                    case TRANSFORM:
                                    case UPSERT:
                                    case UPDATE:
                                        updRes = entry.mvccSet(tx, ctx.localNodeId(), val, entryProc, invokeArgs, 0, tx.topologyVersion(), snapshot, op.cacheOperation(), false, false, needOldVal, null, false, false);
                                        break;
                                    default:
                                        throw new IgniteSQLException("Cannot acquire lock for operation [op= " + op + "]" + "Operation is unsupported at the moment ", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
                                }
                            } else {
                                updRes = entry.mvccUpdateRowsWithPreloadInfo(tx, ctx.localNodeId(), tx.topologyVersion(), entries.infos(), op.cacheOperation(), snapshot, futId, batchNum);
                            }
                            break;
                        } catch (GridCacheEntryRemovedException ignore) {
                            entry = dht.entryExx(key);
                        } finally {
                            ctx.shared().database().checkpointReadUnlock();
                        }
                    }
                    if (!updRes.filtered())
                        ctx.shared().mvccCaching().addEnlisted(key, updRes.newValue(), 0, 0, tx.xidVersion(), updRes.oldValue(), tx.local(), tx.topologyVersion(), snapshot, ctx.cacheId(), tx, futId, batchNum);
                    assert updRes.updateFuture() == null : "Entry should not be locked on the backup";
                } finally {
                    locPart.release();
                }
            } else
                tx.addInvalidPartition(ctx.cacheId(), part);
        } catch (GridDhtInvalidPartitionException e) {
            tx.addInvalidPartition(ctx.cacheId(), e.partition());
        }
    }
}
Also used : GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException) GridDhtCacheAdapter(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter) GridInvokeValue(org.apache.ignite.internal.processors.cache.distributed.dht.GridInvokeValue) GridCacheMessage(org.apache.ignite.internal.processors.cache.GridCacheMessage) Message(org.apache.ignite.plugin.extensions.communication.Message) PartitionUpdateCountersMessage(org.apache.ignite.internal.processors.cache.distributed.dht.PartitionUpdateCountersMessage) EntryProcessor(javax.cache.processor.EntryProcessor) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) CacheEntryInfoCollection(org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection) GridCacheUpdateTxResult(org.apache.ignite.internal.processors.cache.GridCacheUpdateTxResult) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) GridDhtLocalPartition(org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject)

Aggregations

GridDhtCacheEntry (org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry)27 GridCacheEntryRemovedException (org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException)16 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)15 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)13 CacheObject (org.apache.ignite.internal.processors.cache.CacheObject)11 ArrayList (java.util.ArrayList)10 ClusterNode (org.apache.ignite.cluster.ClusterNode)9 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)9 GridCacheVersion (org.apache.ignite.internal.processors.cache.version.GridCacheVersion)9 GridTimeoutObject (org.apache.ignite.internal.processors.timeout.GridTimeoutObject)9 IgniteBiTuple (org.apache.ignite.lang.IgniteBiTuple)9 GridCacheOperation (org.apache.ignite.internal.processors.cache.GridCacheOperation)8 CacheStorePartialUpdateException (org.apache.ignite.internal.processors.cache.CacheStorePartialUpdateException)7 NodeStoppingException (org.apache.ignite.internal.NodeStoppingException)6 GridCacheReturn (org.apache.ignite.internal.processors.cache.GridCacheReturn)6 GridCacheUpdateAtomicResult (org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult)6 GridDhtInvalidPartitionException (org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException)6 IgniteException (org.apache.ignite.IgniteException)5 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)5 IgniteOutOfMemoryException (org.apache.ignite.internal.mem.IgniteOutOfMemoryException)5