Search in sources :

Example 26 with IgniteRunnable

use of org.apache.ignite.lang.IgniteRunnable in project ignite by apache.

the class GridDhtAtomicCache method updateAllAsyncInternal0.

/**
     * Executes local update after preloader fetched values.
     *
     * @param nodeId Node ID.
     * @param req Update request.
     * @param completionCb Completion callback.
     */
private void updateAllAsyncInternal0(UUID nodeId, GridNearAtomicAbstractUpdateRequest req, UpdateReplyClosure completionCb) {
    ClusterNode node = ctx.discovery().node(nodeId);
    if (node == null) {
        U.warn(msgLog, "Skip near update request, node originated update request left [" + "futId=" + req.futureId() + ", node=" + nodeId + ']');
        return;
    }
    GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), nodeId, req.futureId(), req.partition(), false, ctx.deploymentEnabled());
    assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
    GridDhtAtomicAbstractUpdateFuture dhtFut = null;
    boolean remap = false;
    String taskName = ctx.kernalContext().task().resolveTaskName(req.taskNameHash());
    IgniteCacheExpiryPolicy expiry = null;
    try {
        // If batch store update is enabled, we need to lock all entries.
        // First, need to acquire locks on cache entries, then check filter.
        List<GridDhtCacheEntry> locked = null;
        Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
        try {
            GridDhtPartitionTopology top = topology();
            top.readLock();
            try {
                if (top.stopping()) {
                    res.addFailedKeys(req.keys(), new IgniteCheckedException("Failed to perform cache operation " + "(cache is stopped): " + name()));
                    completionCb.apply(req, res);
                    return;
                }
                // external transaction or explicit lock.
                if (req.topologyLocked() || !needRemap(req.topologyVersion(), top.topologyVersion())) {
                    ctx.shared().database().ensureFreeSpace(ctx.memoryPolicy());
                    locked = lockEntries(req, req.topologyVersion());
                    boolean hasNear = ctx.discovery().cacheNearNode(node, name());
                    // Assign next version for update inside entries lock.
                    GridCacheVersion ver = ctx.versions().next(top.topologyVersion());
                    if (hasNear)
                        res.nearVersion(ver);
                    if (msgLog.isDebugEnabled()) {
                        msgLog.debug("Assigned update version [futId=" + req.futureId() + ", writeVer=" + ver + ']');
                    }
                    assert ver != null : "Got null version for update request: " + req;
                    boolean sndPrevVal = !top.rebalanceFinished(req.topologyVersion());
                    dhtFut = createDhtFuture(ver, req);
                    expiry = expiryPolicy(req.expiry());
                    GridCacheReturn retVal = null;
                    if (// Several keys ...
                    req.size() > 1 && writeThrough() && // and store is enabled ...
                    !req.skipStore() && // and this is not local store ...
                    !ctx.store().isLocal() && // (conflict resolver should be used for local store)
                    !// and no DR.
                    ctx.dr().receiveEnabled()) {
                        // This method can only be used when there are no replicated entries in the batch.
                        UpdateBatchResult updRes = updateWithBatch(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
                        deleted = updRes.deleted();
                        dhtFut = updRes.dhtFuture();
                        if (req.operation() == TRANSFORM)
                            retVal = updRes.invokeResults();
                    } else {
                        UpdateSingleResult updRes = updateSingle(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
                        retVal = updRes.returnValue();
                        deleted = updRes.deleted();
                        dhtFut = updRes.dhtFuture();
                    }
                    if (retVal == null)
                        retVal = new GridCacheReturn(ctx, node.isLocal(), true, null, true);
                    res.returnValue(retVal);
                    if (dhtFut != null) {
                        if (req.writeSynchronizationMode() == PRIMARY_SYNC && // To avoid deadlock disable back-pressure for sender data node.
                        !ctx.discovery().cacheAffinityNode(ctx.discovery().node(nodeId), ctx.name()) && !dhtFut.isDone()) {
                            final IgniteRunnable tracker = GridNioBackPressureControl.threadTracker();
                            if (tracker != null && tracker instanceof GridNioMessageTracker) {
                                ((GridNioMessageTracker) tracker).onMessageReceived();
                                dhtFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {

                                    @Override
                                    public void apply(IgniteInternalFuture<Void> fut) {
                                        ((GridNioMessageTracker) tracker).onMessageProcessed();
                                    }
                                });
                            }
                        }
                        ctx.mvcc().addAtomicFuture(dhtFut.id(), dhtFut);
                    }
                } else {
                    // Should remap all keys.
                    remap = true;
                    res.remapTopologyVersion(top.topologyVersion());
                }
            } finally {
                top.readUnlock();
            }
        } catch (GridCacheEntryRemovedException e) {
            assert false : "Entry should not become obsolete while holding lock.";
            e.printStackTrace();
        } finally {
            if (locked != null)
                unlockEntries(locked, req.topologyVersion());
            // Enqueue if necessary after locks release.
            if (deleted != null) {
                assert !deleted.isEmpty();
                assert ctx.deferredDelete() : this;
                for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
            }
            // TODO fire events only after successful fsync
            if (ctx.shared().wal() != null)
                ctx.shared().wal().fsync(null);
        }
    } catch (GridDhtInvalidPartitionException ignore) {
        if (log.isDebugEnabled())
            log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
        remap = true;
        res.remapTopologyVersion(ctx.topology().topologyVersion());
    } catch (Throwable e) {
        // At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
        // an attempt to use cleaned resources.
        U.error(log, "Unexpected exception during cache update", e);
        res.addFailedKeys(req.keys(), e);
        completionCb.apply(req, res);
        if (e instanceof Error)
            throw (Error) e;
        return;
    }
    if (remap) {
        assert dhtFut == null;
        completionCb.apply(req, res);
    } else if (dhtFut != null)
        dhtFut.map(node, res.returnValue(), res, completionCb);
    if (req.writeSynchronizationMode() != FULL_ASYNC)
        req.cleanup(!node.isLocal());
    sendTtlUpdateRequest(expiry);
}
Also used : IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) GridDhtPartitionTopology(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology) IgniteInternalFuture(org.apache.ignite.internal.IgniteInternalFuture) GridNioMessageTracker(org.apache.ignite.internal.util.nio.GridNioMessageTracker) IgniteRunnable(org.apache.ignite.lang.IgniteRunnable) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) ClusterNode(org.apache.ignite.cluster.ClusterNode) GridDhtInvalidPartitionException(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException) GridCacheReturn(org.apache.ignite.internal.processors.cache.GridCacheReturn) GridDhtCacheEntry(org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry) IgniteCacheExpiryPolicy(org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy)

Example 27 with IgniteRunnable

use of org.apache.ignite.lang.IgniteRunnable in project ignite by apache.

the class CacheAffinityExample method visitUsingAffinityRun.

/**
     * Collocates jobs with keys they need to work on using
     * {@link IgniteCompute#affinityRun(String, Object, IgniteRunnable)} method.
     */
private static void visitUsingAffinityRun() {
    Ignite ignite = Ignition.ignite();
    final IgniteCache<Integer, String> cache = ignite.cache(CACHE_NAME);
    for (int i = 0; i < KEY_CNT; i++) {
        final int key = i;
        // This runnable will execute on the remote node where
        // data with the given key is located. Since it will be co-located
        // we can use local 'peek' operation safely.
        ignite.compute().affinityRun(CACHE_NAME, key, new IgniteRunnable() {

            @Override
            public void run() {
                // Peek is a local memory lookup, however, value should never be 'null'
                // as we are co-located with node that has a given key.
                System.out.println("Co-located using affinityRun [key= " + key + ", value=" + cache.localPeek(key) + ']');
            }
        });
    }
}
Also used : Ignite(org.apache.ignite.Ignite) IgniteRunnable(org.apache.ignite.lang.IgniteRunnable)

Example 28 with IgniteRunnable

use of org.apache.ignite.lang.IgniteRunnable in project ignite by apache.

the class CacheAffinityExample method visitUsingMapKeysToNodes.

/**
     * Collocates jobs with keys they need to work on using {@link Affinity#mapKeysToNodes(Collection)}
     * method. The difference from {@code affinityRun(...)} method is that here we process multiple keys
     * in a single job.
     */
private static void visitUsingMapKeysToNodes() {
    final Ignite ignite = Ignition.ignite();
    Collection<Integer> keys = new ArrayList<>(KEY_CNT);
    for (int i = 0; i < KEY_CNT; i++) keys.add(i);
    // Map all keys to nodes.
    Map<ClusterNode, Collection<Integer>> mappings = ignite.<Integer>affinity(CACHE_NAME).mapKeysToNodes(keys);
    for (Map.Entry<ClusterNode, Collection<Integer>> mapping : mappings.entrySet()) {
        ClusterNode node = mapping.getKey();
        final Collection<Integer> mappedKeys = mapping.getValue();
        if (node != null) {
            // Bring computations to the nodes where the data resides (i.e. collocation).
            ignite.compute(ignite.cluster().forNode(node)).run(new IgniteRunnable() {

                @Override
                public void run() {
                    IgniteCache<Integer, String> cache = ignite.cache(CACHE_NAME);
                    // as we are co-located with node that has a given key.
                    for (Integer key : mappedKeys) System.out.println("Co-located using mapKeysToNodes [key= " + key + ", value=" + cache.localPeek(key) + ']');
                }
            });
        }
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) ArrayList(java.util.ArrayList) IgniteCache(org.apache.ignite.IgniteCache) IgniteRunnable(org.apache.ignite.lang.IgniteRunnable) Collection(java.util.Collection) Ignite(org.apache.ignite.Ignite) Map(java.util.Map)

Example 29 with IgniteRunnable

use of org.apache.ignite.lang.IgniteRunnable in project ignite by apache.

the class ComputeRunnableExample method main.

/**
     * Executes example.
     *
     * @param args Command line arguments, none required.
     * @throws IgniteException If example execution failed.
     */
public static void main(String[] args) throws IgniteException {
    try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
        System.out.println();
        System.out.println("Compute runnable example started.");
        IgniteCompute compute = ignite.compute();
        // Iterate through all words in the sentence and create runnable jobs.
        for (final String word : "Print words using runnable".split(" ")) {
            // Execute runnable on some node.
            compute.run(new IgniteRunnable() {

                @Override
                public void run() {
                    System.out.println();
                    System.out.println(">>> Printing '" + word + "' on this node from ignite job.");
                }
            });
        }
        System.out.println();
        System.out.println(">>> Finished printing words using runnable execution.");
        System.out.println(">>> Check all nodes for output (this node is also part of the cluster).");
    }
}
Also used : Ignite(org.apache.ignite.Ignite) IgniteCompute(org.apache.ignite.IgniteCompute) IgniteRunnable(org.apache.ignite.lang.IgniteRunnable)

Example 30 with IgniteRunnable

use of org.apache.ignite.lang.IgniteRunnable in project ignite by apache.

the class IgniteExecutorServiceExample method main.

/**
     * Executes example.
     *
     * @param args Command line arguments, none required.
     * @throws Exception If example execution failed.
     */
@SuppressWarnings({ "TooBroadScope" })
public static void main(String[] args) throws Exception {
    try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
        System.out.println();
        System.out.println(">>> Compute executor service example started.");
        // Get ignite-enabled executor service.
        ExecutorService exec = ignite.executorService();
        // Iterate through all words in the sentence and create callable jobs.
        for (final String word : "Print words using runnable".split(" ")) {
            // Execute runnable on some node.
            exec.submit(new IgniteRunnable() {

                @Override
                public void run() {
                    System.out.println();
                    System.out.println(">>> Printing '" + word + "' on this node from ignite job.");
                }
            });
        }
        exec.shutdown();
        // Wait for all jobs to complete (0 means no limit).
        exec.awaitTermination(0, TimeUnit.MILLISECONDS);
        System.out.println();
        System.out.println(">>> Check all nodes for output (this node is also part of the cluster).");
    }
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) Ignite(org.apache.ignite.Ignite) IgniteRunnable(org.apache.ignite.lang.IgniteRunnable)

Aggregations

IgniteRunnable (org.apache.ignite.lang.IgniteRunnable)45 Ignite (org.apache.ignite.Ignite)21 IgniteException (org.apache.ignite.IgniteException)11 IgniteEx (org.apache.ignite.internal.IgniteEx)8 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)7 IgniteInstanceResource (org.apache.ignite.resources.IgniteInstanceResource)7 ArrayList (java.util.ArrayList)6 IOException (java.io.IOException)5 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)5 ClusterNode (org.apache.ignite.cluster.ClusterNode)5 UUID (java.util.UUID)4 CountDownLatch (java.util.concurrent.CountDownLatch)4 IgniteCompute (org.apache.ignite.IgniteCompute)4 TaskEvent (org.apache.ignite.events.TaskEvent)4 IgniteFuture (org.apache.ignite.lang.IgniteFuture)4 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)3 Event (org.apache.ignite.events.Event)3 GridAbsPredicate (org.apache.ignite.internal.util.lang.GridAbsPredicate)3 Collection (java.util.Collection)2 RuntimeCamelException (org.apache.camel.RuntimeCamelException)2