Search in sources :

Example 6 with HadoopMapReducePlan

use of org.apache.ignite.hadoop.HadoopMapReducePlan in project ignite by apache.

the class HadoopWeightedMapReducePlannerTest method testHdfsSplitsAffinity.

/**
 * Test one HDFS splits.
 *
 * @throws Exception If failed.
 */
public void testHdfsSplitsAffinity() throws Exception {
    IgfsMock igfs = LocationsBuilder.create().add(0, NODE_1).add(50, NODE_2).add(100, NODE_3).buildIgfs();
    final List<HadoopInputSplit> splits = new ArrayList<>();
    splits.add(new HadoopFileBlock(new String[] { HOST_1 }, URI.create("hfds://" + HOST_1 + "/x"), 0, 50));
    splits.add(new HadoopFileBlock(new String[] { HOST_2 }, URI.create("hfds://" + HOST_2 + "/x"), 50, 100));
    splits.add(new HadoopFileBlock(new String[] { HOST_3 }, URI.create("hfds://" + HOST_3 + "/x"), 100, 37));
    // The following splits belong to hosts that are out of Ignite topology at all.
    // This means that these splits should be assigned to any least loaded modes:
    splits.add(new HadoopFileBlock(new String[] { HOST_4 }, URI.create("hfds://" + HOST_4 + "/x"), 138, 2));
    splits.add(new HadoopFileBlock(new String[] { HOST_5 }, URI.create("hfds://" + HOST_5 + "/x"), 140, 3));
    final int expReducers = 7;
    HadoopPlannerMockJob job = new HadoopPlannerMockJob(splits, expReducers);
    IgniteHadoopWeightedMapReducePlanner planner = createPlanner(igfs);
    final HadoopMapReducePlan plan = planner.preparePlan(job, NODES, null);
    checkPlanMappers(plan, splits, NODES, true);
    checkPlanReducers(plan, NODES, expReducers, true);
}
Also used : HadoopMapReducePlan(org.apache.ignite.hadoop.HadoopMapReducePlan) IgniteHadoopWeightedMapReducePlanner(org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner) ArrayList(java.util.ArrayList) IgfsMock(org.apache.ignite.internal.processors.igfs.IgfsMock) HadoopInputSplit(org.apache.ignite.hadoop.HadoopInputSplit) HadoopFileBlock(org.apache.ignite.internal.processors.hadoop.HadoopFileBlock)

Example 7 with HadoopMapReducePlan

use of org.apache.ignite.hadoop.HadoopMapReducePlan in project ignite by apache.

the class HadoopJobTracker method processNodeLeft.

/**
 * Processes node leave (or fail) event.
 *
 * @param evt Discovery event.
 */
@SuppressWarnings("ConstantConditions")
private void processNodeLeft(DiscoveryEvent evt) {
    if (log.isDebugEnabled())
        log.debug("Processing discovery event [locNodeId=" + ctx.localNodeId() + ", evt=" + evt + ']');
    // Check only if this node is responsible for job status updates.
    if (ctx.jobUpdateLeader()) {
        boolean checkSetup = evt.eventNode().order() < ctx.localNodeOrder();
        Iterable<IgniteCache.Entry<HadoopJobId, HadoopJobMetadata>> entries;
        try {
            entries = jobMetaCache().localEntries(OFFHEAP_PEEK_MODE);
        } catch (IgniteCheckedException e) {
            U.error(log, "Failed to get local entries", e);
            return;
        }
        // Iteration over all local entries is correct since system cache is REPLICATED.
        for (IgniteCache.Entry<HadoopJobId, HadoopJobMetadata> entry : entries) {
            HadoopJobMetadata meta = entry.getValue();
            HadoopJobId jobId = meta.jobId();
            HadoopMapReducePlan plan = meta.mapReducePlan();
            HadoopJobPhase phase = meta.phase();
            try {
                if (checkSetup && phase == PHASE_SETUP && !activeJobs.containsKey(jobId)) {
                    // Failover setup task.
                    HadoopJobEx job = job(jobId, meta.jobInfo());
                    Collection<HadoopTaskInfo> setupTask = setupTask(jobId);
                    assert setupTask != null;
                    ctx.taskExecutor().run(job, setupTask);
                } else if (phase == PHASE_MAP || phase == PHASE_REDUCE) {
                    // Must check all nodes, even that are not event node ID due to
                    // multiple node failure possibility.
                    Collection<HadoopInputSplit> cancelSplits = null;
                    for (UUID nodeId : plan.mapperNodeIds()) {
                        if (ctx.kernalContext().discovery().node(nodeId) == null) {
                            // Node has left the grid.
                            Collection<HadoopInputSplit> mappers = plan.mappers(nodeId);
                            if (cancelSplits == null)
                                cancelSplits = new HashSet<>();
                            cancelSplits.addAll(mappers);
                        }
                    }
                    Collection<Integer> cancelReducers = null;
                    for (UUID nodeId : plan.reducerNodeIds()) {
                        if (ctx.kernalContext().discovery().node(nodeId) == null) {
                            // Node has left the grid.
                            int[] reducers = plan.reducers(nodeId);
                            if (cancelReducers == null)
                                cancelReducers = new HashSet<>();
                            for (int rdc : reducers) cancelReducers.add(rdc);
                        }
                    }
                    if (cancelSplits != null || cancelReducers != null)
                        jobMetaCache().invoke(meta.jobId(), new CancelJobProcessor(null, new IgniteCheckedException("One or more nodes participating in map-reduce job execution failed."), cancelSplits, cancelReducers));
                }
            } catch (IgniteCheckedException e) {
                U.error(log, "Failed to cancel job: " + meta, e);
            }
        }
    }
}
Also used : IgniteCache(org.apache.ignite.IgniteCache) HadoopJobId(org.apache.ignite.internal.processors.hadoop.HadoopJobId) HadoopMapReducePlan(org.apache.ignite.hadoop.HadoopMapReducePlan) MutableEntry(javax.cache.processor.MutableEntry) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) HadoopJobEx(org.apache.ignite.internal.processors.hadoop.HadoopJobEx) HadoopTaskInfo(org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo) HadoopJobPhase(org.apache.ignite.internal.processors.hadoop.HadoopJobPhase) Collection(java.util.Collection) UUID(java.util.UUID)

Example 8 with HadoopMapReducePlan

use of org.apache.ignite.hadoop.HadoopMapReducePlan in project ignite by apache.

the class HadoopShuffle method newJob.

/**
 * Creates new shuffle job.
 *
 * @param jobId Job ID.
 * @return Created shuffle job.
 * @throws IgniteCheckedException If job creation failed.
 */
private HadoopShuffleJob<UUID> newJob(HadoopJobId jobId) throws IgniteCheckedException {
    HadoopMapReducePlan plan = ctx.jobTracker().plan(jobId);
    HadoopShuffleJob<UUID> job = new HadoopShuffleJob<>(ctx.localNodeId(), log, ctx.jobTracker().job(jobId, null), mem, plan.reducers(), plan.reducers(ctx.localNodeId()), localMappersCount(plan), true);
    UUID[] rdcAddrs = new UUID[plan.reducers()];
    for (int i = 0; i < rdcAddrs.length; i++) {
        UUID nodeId = plan.nodeForReducer(i);
        assert nodeId != null : "Plan is missing node for reducer [plan=" + plan + ", rdc=" + i + ']';
        rdcAddrs[i] = nodeId;
    }
    boolean init = job.initializeReduceAddresses(rdcAddrs);
    assert init;
    return job;
}
Also used : HadoopMapReducePlan(org.apache.ignite.hadoop.HadoopMapReducePlan) UUID(java.util.UUID)

Aggregations

HadoopMapReducePlan (org.apache.ignite.hadoop.HadoopMapReducePlan)8 HadoopInputSplit (org.apache.ignite.hadoop.HadoopInputSplit)4 ArrayList (java.util.ArrayList)3 UUID (java.util.UUID)3 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)3 IgniteHadoopWeightedMapReducePlanner (org.apache.ignite.hadoop.mapreduce.IgniteHadoopWeightedMapReducePlanner)3 HadoopFileBlock (org.apache.ignite.internal.processors.hadoop.HadoopFileBlock)3 HadoopJobEx (org.apache.ignite.internal.processors.hadoop.HadoopJobEx)3 IgfsMock (org.apache.ignite.internal.processors.igfs.IgfsMock)3 Collection (java.util.Collection)2 HadoopJobId (org.apache.ignite.internal.processors.hadoop.HadoopJobId)2 HadoopTaskInfo (org.apache.ignite.internal.processors.hadoop.HadoopTaskInfo)2 GridFutureAdapter (org.apache.ignite.internal.util.future.GridFutureAdapter)2 MutableEntry (javax.cache.processor.MutableEntry)1 IgniteCache (org.apache.ignite.IgniteCache)1 HadoopClassLoader (org.apache.ignite.internal.processors.hadoop.HadoopClassLoader)1 HadoopJobPhase (org.apache.ignite.internal.processors.hadoop.HadoopJobPhase)1 HadoopTaskCancelledException (org.apache.ignite.internal.processors.hadoop.HadoopTaskCancelledException)1 HadoopCounterWriter (org.apache.ignite.internal.processors.hadoop.counter.HadoopCounterWriter)1 HadoopCounters (org.apache.ignite.internal.processors.hadoop.counter.HadoopCounters)1