Search in sources :

Example 11 with GridFutureAdapter

use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.

the class GridServiceProcessor method cancelFutures.

/**
     * @param futs Futs.
     * @param err Exception.
     */
private void cancelFutures(ConcurrentMap<String, ? extends GridFutureAdapter<?>> futs, Exception err) {
    for (Map.Entry<String, ? extends GridFutureAdapter<?>> entry : futs.entrySet()) {
        GridFutureAdapter fut = entry.getValue();
        fut.onDone(err);
        futs.remove(entry.getKey(), fut);
    }
}
Also used : GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) IgniteSystemProperties.getString(org.apache.ignite.IgniteSystemProperties.getString) Map(java.util.Map) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Example 12 with GridFutureAdapter

use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.

the class HadoopShuffleJob method flush.

/**
     * @return Future.
     */
@SuppressWarnings("unchecked")
public IgniteInternalFuture<?> flush() throws IgniteCheckedException {
    if (log.isDebugEnabled())
        log.debug("Flushing job " + job.id() + " on address " + locReduceAddr);
    flushed = true;
    if (totalReducerCnt == 0)
        return new GridFinishedFuture<>();
    if (!stripeMappers) {
        U.await(ioInitLatch);
        GridWorker snd0 = snd;
        if (snd0 != null) {
            if (log.isDebugEnabled())
                log.debug("Cancelling sender thread.");
            snd0.cancel();
            try {
                snd0.join();
                if (log.isDebugEnabled())
                    log.debug("Finished waiting for sending thread to complete on shuffle job flush: " + job.id());
            } catch (InterruptedException e) {
                throw new IgniteInterruptedCheckedException(e);
            }
        }
        // With flush.
        collectUpdatesAndSend(true);
        if (log.isDebugEnabled())
            log.debug("Finished sending collected updates to remote reducers: " + job.id());
    }
    GridCompoundFuture fut = new GridCompoundFuture<>();
    if (embedded) {
        boolean sent = false;
        for (Map.Entry<T, HadoopShuffleRemoteState> rmtStateEntry : remoteShuffleStates().entrySet()) {
            T dest = rmtStateEntry.getKey();
            HadoopShuffleRemoteState rmtState = rmtStateEntry.getValue();
            HadoopShuffleFinishRequest req = new HadoopShuffleFinishRequest(job.id(), rmtState.messageCount());
            io.apply(dest, req);
            if (log.isDebugEnabled())
                log.debug("Sent shuffle finish request [jobId=" + job.id() + ", dest=" + dest + ", req=" + req + ']');
            fut.add(rmtState.future());
            sent = true;
        }
        if (sent)
            fut.markInitialized();
        else
            return new GridFinishedFuture<>();
    } else {
        for (IgniteBiTuple<HadoopShuffleMessage, GridFutureAdapter<?>> tup : sentMsgs.values()) fut.add(tup.get2());
        fut.markInitialized();
        if (log.isDebugEnabled())
            log.debug("Collected futures to compound futures for flush: " + sentMsgs.size());
    }
    return fut;
}
Also used : GridWorker(org.apache.ignite.internal.util.worker.GridWorker) GridCompoundFuture(org.apache.ignite.internal.util.future.GridCompoundFuture) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) SHUFFLE_MAPPER_STRIPED_OUTPUT(org.apache.ignite.internal.processors.hadoop.HadoopJobProperty.SHUFFLE_MAPPER_STRIPED_OUTPUT) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Example 13 with GridFutureAdapter

use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.

the class HadoopJobTracker method processJobMetaUpdate.

/**
     * @param jobId Job ID.
     * @param meta Job metadata.
     * @param locNodeId Local node ID.
     * @throws IgniteCheckedException If failed.
     */
private void processJobMetaUpdate(HadoopJobId jobId, HadoopJobMetadata meta, UUID locNodeId) throws IgniteCheckedException {
    JobLocalState state = activeJobs.get(jobId);
    HadoopJobEx job = job(jobId, meta.jobInfo());
    HadoopMapReducePlan plan = meta.mapReducePlan();
    switch(meta.phase()) {
        case PHASE_SETUP:
            {
                if (ctx.jobUpdateLeader()) {
                    Collection<HadoopTaskInfo> setupTask = setupTask(jobId);
                    if (setupTask != null)
                        ctx.taskExecutor().run(job, setupTask);
                }
                break;
            }
        case PHASE_MAP:
            {
                // Check if we should initiate new task on local node.
                Collection<HadoopTaskInfo> tasks = mapperTasks(plan.mappers(locNodeId), meta);
                if (tasks != null)
                    ctx.taskExecutor().run(job, tasks);
                break;
            }
        case PHASE_REDUCE:
            {
                if (meta.pendingReducers().isEmpty() && ctx.jobUpdateLeader()) {
                    HadoopTaskInfo info = new HadoopTaskInfo(COMMIT, jobId, 0, 0, null);
                    if (log.isDebugEnabled())
                        log.debug("Submitting COMMIT task for execution [locNodeId=" + locNodeId + ", jobId=" + jobId + ']');
                    ctx.taskExecutor().run(job, Collections.singletonList(info));
                    break;
                }
                Collection<HadoopTaskInfo> tasks = reducerTasks(plan.reducers(locNodeId), job);
                if (tasks != null)
                    ctx.taskExecutor().run(job, tasks);
                break;
            }
        case PHASE_CANCELLING:
            {
                // Prevent multiple task executor notification.
                if (state != null && state.onCancel()) {
                    if (log.isDebugEnabled())
                        log.debug("Cancelling local task execution for job: " + meta);
                    ctx.taskExecutor().cancelTasks(jobId);
                }
                if (meta.pendingSplits().isEmpty() && meta.pendingReducers().isEmpty()) {
                    if (ctx.jobUpdateLeader()) {
                        if (state == null)
                            state = initState(jobId);
                        // Prevent running multiple abort tasks.
                        if (state.onAborted()) {
                            HadoopTaskInfo info = new HadoopTaskInfo(ABORT, jobId, 0, 0, null);
                            if (log.isDebugEnabled())
                                log.debug("Submitting ABORT task for execution [locNodeId=" + locNodeId + ", jobId=" + jobId + ']');
                            ctx.taskExecutor().run(job, Collections.singletonList(info));
                        }
                    }
                    break;
                } else {
                    // Check if there are unscheduled mappers or reducers.
                    Collection<HadoopInputSplit> cancelMappers = new ArrayList<>();
                    Collection<Integer> cancelReducers = new ArrayList<>();
                    Collection<HadoopInputSplit> mappers = plan.mappers(ctx.localNodeId());
                    if (mappers != null) {
                        for (HadoopInputSplit b : mappers) {
                            if (state == null || !state.mapperScheduled(b))
                                cancelMappers.add(b);
                        }
                    }
                    int[] rdc = plan.reducers(ctx.localNodeId());
                    if (rdc != null) {
                        for (int r : rdc) {
                            if (state == null || !state.reducerScheduled(r))
                                cancelReducers.add(r);
                        }
                    }
                    if (!cancelMappers.isEmpty() || !cancelReducers.isEmpty())
                        transform(jobId, new CancelJobProcessor(null, cancelMappers, cancelReducers));
                }
                break;
            }
        case PHASE_COMPLETE:
            {
                if (log.isDebugEnabled())
                    log.debug("Job execution is complete, will remove local state from active jobs " + "[jobId=" + jobId + ", meta=" + meta + ']');
                if (state != null) {
                    state = activeJobs.remove(jobId);
                    assert state != null;
                    ctx.shuffle().jobFinished(jobId);
                }
                GridFutureAdapter<HadoopJobId> finishFut = activeFinishFuts.remove(jobId);
                if (finishFut != null) {
                    if (log.isDebugEnabled())
                        log.debug("Completing job future [locNodeId=" + locNodeId + ", meta=" + meta + ']');
                    finishFut.onDone(jobId, meta.failCause());
                }
                assert job != null;
                if (ctx.jobUpdateLeader())
                    job.cleanupStagingDirectory();
                jobs.remove(jobId);
                if (ctx.jobUpdateLeader()) {
                    ClassLoader ldr = job.getClass().getClassLoader();
                    try {
                        String statWriterClsName = job.info().property(HadoopCommonUtils.JOB_COUNTER_WRITER_PROPERTY);
                        if (statWriterClsName != null) {
                            Class<?> cls = ldr.loadClass(statWriterClsName);
                            HadoopCounterWriter writer = (HadoopCounterWriter) cls.newInstance();
                            HadoopCounters cntrs = meta.counters();
                            writer.write(job, cntrs);
                        }
                    } catch (Exception e) {
                        log.error("Can't write statistic due to: ", e);
                    }
                }
                job.dispose(false);
                break;
            }
        default:
            throw new IllegalStateException("Unknown phase: " + meta.phase());
    }
}
Also used : HadoopCounterWriter(org.apache.ignite.internal.processors.hadoop.counter.HadoopCounterWriter) HadoopInputSplit(org.apache.ignite.hadoop.HadoopInputSplit) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) HadoopTaskCancelledException(org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException) HadoopMapReducePlan(org.apache.ignite.hadoop.HadoopMapReducePlan) HadoopJobEx(org.apache.ignite.internal.processors.hadoop.HadoopJobEx) HadoopCounters(org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters) HadoopTaskInfo(org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) Collection(java.util.Collection) HadoopClassLoader(org.apache.ignite.internal.processors.hadoop.HadoopClassLoader)

Example 14 with GridFutureAdapter

use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.

the class HadoopJobTracker method submit.

/**
     * Submits execution of Hadoop job to grid.
     *
     * @param jobId Job ID.
     * @param info Job info.
     * @return Job completion future.
     */
@SuppressWarnings("unchecked")
public IgniteInternalFuture<HadoopJobId> submit(HadoopJobId jobId, HadoopJobInfo info) {
    if (!busyLock.tryReadLock()) {
        return new GridFinishedFuture<>(new IgniteCheckedException("Failed to execute map-reduce job " + "(grid is stopping): " + info));
    }
    try {
        long jobPrepare = U.currentTimeMillis();
        if (jobs.containsKey(jobId) || jobMetaCache().containsKey(jobId))
            throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId);
        HadoopJobEx job = job(jobId, info);
        HadoopMapReducePlan mrPlan = mrPlanner.preparePlan(job, ctx.nodes(), null);
        logPlan(info, mrPlan);
        HadoopJobMetadata meta = new HadoopJobMetadata(ctx.localNodeId(), jobId, info);
        meta.mapReducePlan(mrPlan);
        meta.pendingSplits(allSplits(mrPlan));
        meta.pendingReducers(allReducers(mrPlan));
        GridFutureAdapter<HadoopJobId> completeFut = new GridFutureAdapter<>();
        GridFutureAdapter<HadoopJobId> old = activeFinishFuts.put(jobId, completeFut);
        assert old == null : "Duplicate completion future [jobId=" + jobId + ", old=" + old + ']';
        if (log.isDebugEnabled())
            log.debug("Submitting job metadata [jobId=" + jobId + ", meta=" + meta + ']');
        long jobStart = U.currentTimeMillis();
        HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(meta.counters(), ctx.localNodeId());
        perfCntr.clientSubmissionEvents(info);
        perfCntr.onJobPrepare(jobPrepare);
        perfCntr.onJobStart(jobStart);
        if (jobMetaCache().getAndPutIfAbsent(jobId, meta) != null)
            throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId);
        return completeFut;
    } catch (IgniteCheckedException e) {
        U.error(log, "Failed to submit job: " + jobId, e);
        return new GridFinishedFuture<>(e);
    } finally {
        busyLock.readUnlock();
    }
}
Also used : HadoopMapReducePlan(org.apache.ignite.hadoop.HadoopMapReducePlan) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) HadoopJobEx(org.apache.ignite.internal.processors.hadoop.HadoopJobEx) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) HadoopPerformanceCounter(org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter) HadoopJobId(org.apache.ignite.internal.processors.hadoop.HadoopJobId) GridFinishedFuture(org.apache.ignite.internal.util.future.GridFinishedFuture)

Example 15 with GridFutureAdapter

use of org.apache.ignite.internal.util.future.GridFutureAdapter in project ignite by apache.

the class GridTestUtils method runAsync.

/**
     * Runs runnable task asyncronously.
     *
     * @param task Runnable.
     * @return Future with task result.
     */
@SuppressWarnings("ExternalizableWithoutPublicNoArgConstructor")
public static IgniteInternalFuture runAsync(final Runnable task, String threadName) {
    if (!busyLock.enterBusy())
        throw new IllegalStateException("Failed to start new threads (test is being stopped).");
    try {
        final GridTestSafeThreadFactory thrFactory = new GridTestSafeThreadFactory(threadName);
        final GridFutureAdapter fut = createFutureAdapter(thrFactory);
        thrFactory.newThread(new Runnable() {

            @Override
            public void run() {
                try {
                    task.run();
                    fut.onDone();
                } catch (Throwable e) {
                    fut.onDone(e);
                }
            }
        }).start();
        return fut;
    } finally {
        busyLock.leaveBusy();
    }
}
Also used : GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter)

Aggregations

GridFutureAdapter (org.apache.ignite.internal.util.future.GridFutureAdapter)30 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)19 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)8 Map (java.util.Map)6 ConcurrentMap (java.util.concurrent.ConcurrentMap)6 HashMap (java.util.HashMap)5 UUID (java.util.UUID)5 IgniteInterruptedCheckedException (org.apache.ignite.internal.IgniteInterruptedCheckedException)5 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)5 IgniteException (org.apache.ignite.IgniteException)4 ArrayList (java.util.ArrayList)3 List (java.util.List)3 ClusterTopologyCheckedException (org.apache.ignite.internal.cluster.ClusterTopologyCheckedException)3 CacheObject (org.apache.ignite.internal.processors.cache.CacheObject)3 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)3 GridCompoundFuture (org.apache.ignite.internal.util.future.GridCompoundFuture)3 Nullable (org.jetbrains.annotations.Nullable)3 LinkedHashMap (java.util.LinkedHashMap)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 Ignite (org.apache.ignite.Ignite)2