Search in sources :

Example 1 with HadoopMapReducePlan

use of org.apache.ignite.hadoop.HadoopMapReducePlan in project ignite by apache.

the class HadoopJobTracker method submit.

/**
 * Submits execution of Hadoop job to grid.
 *
 * @param jobId Job ID.
 * @param info Job info.
 * @return Job completion future.
 */
@SuppressWarnings("unchecked")
public IgniteInternalFuture<HadoopJobId> submit(HadoopJobId jobId, HadoopJobInfo info) {
    if (!busyLock.tryReadLock()) {
        return new GridFinishedFuture<>(new IgniteCheckedException("Failed to execute map-reduce job " + "(grid is stopping): " + info));
    }
    try {
        long jobPrepare = U.currentTimeMillis();
        if (jobs.containsKey(jobId) || jobMetaCache().containsKey(jobId))
            throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId);
        HadoopJobEx job = job(jobId, info);
        HadoopMapReducePlan mrPlan = mrPlanner.preparePlan(job, ctx.nodes(), null);
        logPlan(info, mrPlan);
        HadoopJobMetadata meta = new HadoopJobMetadata(ctx.localNodeId(), jobId, info);
        meta.mapReducePlan(mrPlan);
        meta.pendingSplits(allSplits(mrPlan));
        meta.pendingReducers(allReducers(mrPlan));
        GridFutureAdapter<HadoopJobId> completeFut = new GridFutureAdapter<>();
        GridFutureAdapter<HadoopJobId> old = activeFinishFuts.put(jobId, completeFut);
        assert old == null : "Duplicate completion future [jobId=" + jobId + ", old=" + old + ']';
        if (log.isDebugEnabled())
            log.debug("Submitting job metadata [jobId=" + jobId + ", meta=" + meta + ']');
        long jobStart = U.currentTimeMillis();
        HadoopPerformanceCounter perfCntr = HadoopPerformanceCounter.getCounter(meta.counters(), ctx.localNodeId());
        perfCntr.clientSubmissionEvents(info);
        perfCntr.onJobPrepare(jobPrepare);
        perfCntr.onJobStart(jobStart);
        if (jobMetaCache().getAndPutIfAbsent(jobId, meta) != null)
            throw new IgniteCheckedException("Failed to submit job. Job with the same ID already exists: " + jobId);
        return completeFut;
    } catch (IgniteCheckedException e) {
        U.error(log, "Failed to submit job: " + jobId, e);
        return new GridFinishedFuture<>(e);
    } finally {
        busyLock.readUnlock();
    }
}
Also used : HadoopMapReducePlan(org.apache.ignite.hadoop.HadoopMapReducePlan) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) HadoopJobEx(org.apache.ignite.internal.processors.hadoop.HadoopJobEx) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) HadoopPerformanceCounter(org.apache.ignite.internal.processors.hadoop.counter.HadoopPerformanceCounter) HadoopJobId(org.apache.ignite.internal.processors.hadoop.HadoopJobId) GridFinishedFuture(org.apache.ignite.internal.util.future.GridFinishedFuture)

Example 2 with HadoopMapReducePlan

use of org.apache.ignite.hadoop.HadoopMapReducePlan in project ignite by apache.

the class HadoopJobTracker method processJobMetaUpdate.

/**
 * @param jobId Job ID.
 * @param meta Job metadata.
 * @param locNodeId Local node ID.
 * @throws IgniteCheckedException If failed.
 */
private void processJobMetaUpdate(HadoopJobId jobId, HadoopJobMetadata meta, UUID locNodeId) throws IgniteCheckedException {
    JobLocalState state = activeJobs.get(jobId);
    HadoopJobEx job = job(jobId, meta.jobInfo());
    HadoopMapReducePlan plan = meta.mapReducePlan();
    switch(meta.phase()) {
        case PHASE_SETUP:
            {
                if (ctx.jobUpdateLeader()) {
                    Collection<HadoopTaskInfo> setupTask = setupTask(jobId);
                    if (setupTask != null)
                        ctx.taskExecutor().run(job, setupTask);
                }
                break;
            }
        case PHASE_MAP:
            {
                // Check if we should initiate new task on local node.
                Collection<HadoopTaskInfo> tasks = mapperTasks(plan.mappers(locNodeId), meta);
                if (tasks != null)
                    ctx.taskExecutor().run(job, tasks);
                break;
            }
        case PHASE_REDUCE:
            {
                if (meta.pendingReducers().isEmpty() && ctx.jobUpdateLeader()) {
                    HadoopTaskInfo info = new HadoopTaskInfo(COMMIT, jobId, 0, 0, null);
                    if (log.isDebugEnabled())
                        log.debug("Submitting COMMIT task for execution [locNodeId=" + locNodeId + ", jobId=" + jobId + ']');
                    ctx.taskExecutor().run(job, Collections.singletonList(info));
                    break;
                }
                Collection<HadoopTaskInfo> tasks = reducerTasks(plan.reducers(locNodeId), job);
                if (tasks != null)
                    ctx.taskExecutor().run(job, tasks);
                break;
            }
        case PHASE_CANCELLING:
            {
                // Prevent multiple task executor notification.
                if (state != null && state.onCancel()) {
                    if (log.isDebugEnabled())
                        log.debug("Cancelling local task execution for job: " + meta);
                    ctx.taskExecutor().cancelTasks(jobId);
                }
                if (meta.pendingSplits().isEmpty() && meta.pendingReducers().isEmpty()) {
                    if (ctx.jobUpdateLeader()) {
                        if (state == null)
                            state = initState(jobId);
                        // Prevent running multiple abort tasks.
                        if (state.onAborted()) {
                            HadoopTaskInfo info = new HadoopTaskInfo(ABORT, jobId, 0, 0, null);
                            if (log.isDebugEnabled())
                                log.debug("Submitting ABORT task for execution [locNodeId=" + locNodeId + ", jobId=" + jobId + ']');
                            ctx.taskExecutor().run(job, Collections.singletonList(info));
                        }
                    }
                    break;
                } else {
                    // Check if there are unscheduled mappers or reducers.
                    Collection<HadoopInputSplit> cancelMappers = new ArrayList<>();
                    Collection<Integer> cancelReducers = new ArrayList<>();
                    Collection<HadoopInputSplit> mappers = plan.mappers(ctx.localNodeId());
                    if (mappers != null) {
                        for (HadoopInputSplit b : mappers) {
                            if (state == null || !state.mapperScheduled(b))
                                cancelMappers.add(b);
                        }
                    }
                    int[] rdc = plan.reducers(ctx.localNodeId());
                    if (rdc != null) {
                        for (int r : rdc) {
                            if (state == null || !state.reducerScheduled(r))
                                cancelReducers.add(r);
                        }
                    }
                    if (!cancelMappers.isEmpty() || !cancelReducers.isEmpty())
                        transform(jobId, new CancelJobProcessor(null, cancelMappers, cancelReducers));
                }
                break;
            }
        case PHASE_COMPLETE:
            {
                if (log.isDebugEnabled())
                    log.debug("Job execution is complete, will remove local state from active jobs " + "[jobId=" + jobId + ", meta=" + meta + ']');
                if (state != null) {
                    state = activeJobs.remove(jobId);
                    assert state != null;
                    ctx.shuffle().jobFinished(jobId);
                }
                GridFutureAdapter<HadoopJobId> finishFut = activeFinishFuts.remove(jobId);
                if (finishFut != null) {
                    if (log.isDebugEnabled())
                        log.debug("Completing job future [locNodeId=" + locNodeId + ", meta=" + meta + ']');
                    finishFut.onDone(jobId, meta.failCause());
                }
                assert job != null;
                if (ctx.jobUpdateLeader())
                    job.cleanupStagingDirectory();
                jobs.remove(jobId);
                if (ctx.jobUpdateLeader()) {
                    ClassLoader ldr = job.getClass().getClassLoader();
                    try {
                        String statWriterClsName = job.info().property(HadoopCommonUtils.JOB_COUNTER_WRITER_PROPERTY);
                        if (statWriterClsName != null) {
                            Class<?> cls = ldr.loadClass(statWriterClsName);
                            HadoopCounterWriter writer = (HadoopCounterWriter) cls.newInstance();
                            HadoopCounters cntrs = meta.counters();
                            writer.write(job, cntrs);
                        }
                    } catch (Exception e) {
                        log.error("Can't write statistic due to: ", e);
                    }
                }
                job.dispose(false);
                break;
            }
        default:
            throw new IllegalStateException("Unknown phase: " + meta.phase());
    }
}
Also used : HadoopCounterWriter(org.apache.ignite.internal.processors.hadoop.counter.HadoopCounterWriter) HadoopInputSplit(org.apache.ignite.hadoop.HadoopInputSplit) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) HadoopTaskCancelledException(org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException) HadoopMapReducePlan(org.apache.ignite.hadoop.HadoopMapReducePlan) HadoopJobEx(org.apache.ignite.internal.processors.hadoop.HadoopJobEx) HadoopCounters(org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters) HadoopTaskInfo(org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) Collection(java.util.Collection) HadoopClassLoader(org.apache.ignite.internal.processors.hadoop.HadoopClassLoader)

Example 3 with HadoopMapReducePlan

use of org.apache.ignite.hadoop.HadoopMapReducePlan in project ignite by apache.

the class HadoopWeightedMapReducePlannerTest method testOneIgfsSplitAffinity.

/**
 * Test one IGFS split being assigned to affinity node.
 *
 * @throws Exception If failed.
 */
public void testOneIgfsSplitAffinity() throws Exception {
    IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs();
    List<HadoopInputSplit> splits = new ArrayList<>();
    splits.add(new HadoopFileBlock(new String[] { HOST_1 }, URI.create("igfs://igfs@/file"), 0, 50));
    final int expReducers = 4;
    HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers);
    IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs);
    HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null);
    assert plan.mappers() == 1;
    assert plan.mapperNodeIds().size() == 1;
    assert plan.mapperNodeIds().contains(ID_1);
    checkPlanMappers(plan, splits, NODES, false);
    checkPlanReducers(plan, NODES, expReducers, false);
}
Also used : HadoopMapReducePlan(org.apache.ignite.hadoop.HadoopMapReducePlan) IgniteHadoopWeightedMapReducePlanner(org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner) ArrayList(java.util.ArrayList) IgfsMock(org.apache.ignite.internal.processors.igfs.IgfsMock) HadoopInputSplit(org.apache.ignite.hadoop.HadoopInputSplit) HadoopFileBlock(org.apache.ignite.internal.processors.hadoop.HadoopFileBlock)

Example 4 with HadoopMapReducePlan

use of org.apache.ignite.hadoop.HadoopMapReducePlan in project ignite by apache.

the class HadoopWeightedMapReducePlannerTest method testHdfsSplitsReplication.

/**
 * Test HDFS splits with Replication == 3.
 *
 * @throws Exception If failed.
 */
public void testHdfsSplitsReplication() throws Exception {
    IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs();
    final List<HadoopInputSplit> splits = new ArrayList<>();
    splits.add(new HadoopFileBlock(new String[] { HOST_1, HOST_2, HOST_3 }, URI.create("hfds://" + HOST_1 + "/x"), 0, 50));
    splits.add(new HadoopFileBlock(new String[] { HOST_2, HOST_3, HOST_4 }, URI.create("hfds://" + HOST_2 + "/x"), 50, 100));
    splits.add(new HadoopFileBlock(new String[] { HOST_3, HOST_4, HOST_5 }, URI.create("hfds://" + HOST_3 + "/x"), 100, 37));
    // The following splits belong to hosts that are out of Ignite topology at all.
    // This means that these splits should be assigned to any least loaded modes:
    splits.add(new HadoopFileBlock(new String[] { HOST_4, HOST_5, HOST_1 }, URI.create("hfds://" + HOST_4 + "/x"), 138, 2));
    splits.add(new HadoopFileBlock(new String[] { HOST_5, HOST_1, HOST_2 }, URI.create("hfds://" + HOST_5 + "/x"), 140, 3));
    final int expReducers = 8;
    HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers);
    IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs);
    final HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null);
    checkPlanMappers(plan, splits, NODES, true);
    checkPlanReducers(plan, NODES, expReducers, true);
}
Also used : HadoopMapReducePlan(org.apache.ignite.hadoop.HadoopMapReducePlan) IgniteHadoopWeightedMapReducePlanner(org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner) ArrayList(java.util.ArrayList) IgfsMock(org.apache.ignite.internal.processors.igfs.IgfsMock) HadoopInputSplit(org.apache.ignite.hadoop.HadoopInputSplit) HadoopFileBlock(org.apache.ignite.internal.processors.hadoop.HadoopFileBlock)

Example 5 with HadoopMapReducePlan

use of org.apache.ignite.hadoop.HadoopMapReducePlan in project ignite by apache.

the class HadoopContext method isParticipating.

/**
 * @param meta Job metadata.
 * @return {@code true} If local node is participating in job execution.
 */
public boolean isParticipating(HadoopJobMetadata meta) {
    UUID locNodeId = localNodeId();
    if (locNodeId.equals(meta.submitNodeId()))
        return true;
    HadoopMapReducePlan plan = meta.mapReducePlan();
    return plan.mapperNodeIds().contains(locNodeId) || plan.reducerNodeIds().contains(locNodeId) || jobUpdateLeader();
}
Also used : HadoopMapReducePlan(org.apache.ignite.hadoop.HadoopMapReducePlan) UUID(java.util.UUID)

Aggregations

HadoopMapReducePlan (org.apache.ignite.hadoop.HadoopMapReducePlan)8 HadoopInputSplit (org.apache.ignite.hadoop.HadoopInputSplit)4 ArrayList (java.util.ArrayList)3 UUID (java.util.UUID)3 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)3 IgniteHadoopWeightedMapReducePlanner (org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner)3 HadoopFileBlock (org.apache.ignite.internal.processors.hadoop.HadoopFileBlock)3 HadoopJobEx (org.apache.ignite.internal.processors.hadoop.HadoopJobEx)3 IgfsMock (org.apache.ignite.internal.processors.igfs.IgfsMock)3 Collection (java.util.Collection)2 HadoopJobId (org.apache.ignite.internal.processors.hadoop.HadoopJobId)2 HadoopTaskInfo (org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo)2 GridFutureAdapter (org.apache.ignite.internal.util.future.GridFutureAdapter)2 MutableEntry (javax.cache.processor.MutableEntry)1 IgniteCache (org.apache.ignite.IgniteCache)1 HadoopClassLoader (org.apache.ignite.internal.processors.hadoop.HadoopClassLoader)1 HadoopJobPhase (org.apache.ignite.internal.processors.hadoop.HadoopJobPhase)1 HadoopTaskCancelledException (org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException)1 HadoopCounterWriter (org.apache.ignite.internal.processors.hadoop.counter.HadoopCounterWriter)1 HadoopCounters (org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters)1