Search in sources :

Example 1 with MapRedStats

use of org.apache.hadoop.hive.ql.MapRedStats in project ambrose by twitter.

the class AmbroseHiveFinishHook method displayStatistics.

private void displayStatistics() {
    EmbeddedAmbroseHiveProgressReporter reporter = getEmbeddedProgressReporter();
    Map<String, String> jobIdToNodeId = reporter.getJobIdToNodeId();
    LOG.info("MapReduce Jobs Launched: ");
    List<MapRedStats> lastMapRedStats = SessionState.get().getLastMapRedStatsList();
    for (int i = 0; i < lastMapRedStats.size(); i++) {
        MapRedStats mrStats = lastMapRedStats.get(i);
        String jobId = mrStats.getJobId();
        String nodeId = jobIdToNodeId.get(jobId);
        StringBuilder sb = new StringBuilder();
        sb.append("Job ").append(i).append(" (").append(jobId).append(", ").append(nodeId).append("): ").append(mrStats);
        LOG.info(sb.toString());
    }
}
Also used : MapRedStats(org.apache.hadoop.hive.ql.MapRedStats) EmbeddedAmbroseHiveProgressReporter(com.twitter.ambrose.hive.reporter.EmbeddedAmbroseHiveProgressReporter)

Example 2 with MapRedStats

use of org.apache.hadoop.hive.ql.MapRedStats in project hive by apache.

the class ShowMapredStatsHook method run.

public void run(HookContext hookContext) {
    SessionState ss = SessionState.get();
    Assert.assertNotNull("SessionState returned null");
    Map<String, MapRedStats> stats = ss.getMapRedStats();
    if (stats != null && !stats.isEmpty()) {
        for (Map.Entry<String, MapRedStats> stat : stats.entrySet()) {
            SessionState.getConsole().printError(stat.getKey() + "=" + stat.getValue().getTaskNumbers());
        }
    }
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) MapRedStats(org.apache.hadoop.hive.ql.MapRedStats) Map(java.util.Map)

Example 3 with MapRedStats

use of org.apache.hadoop.hive.ql.MapRedStats in project hive by apache.

the class VerifyNumReducersHook method run.

public void run(HookContext hookContext) {
    SessionState ss = SessionState.get();
    Assert.assertNotNull("SessionState returned null");
    int expectedReducers = hookContext.getConf().getInt(BUCKET_CONFIG, 0);
    Map<String, MapRedStats> stats = ss.getMapRedStats();
    Assert.assertEquals("Number of MapReduce jobs is incorrect", 1, stats.size());
    MapRedStats stat = stats.values().iterator().next();
    Assert.assertEquals("NumReducers is incorrect", expectedReducers, stat.getNumReduce());
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) MapRedStats(org.apache.hadoop.hive.ql.MapRedStats)

Example 4 with MapRedStats

use of org.apache.hadoop.hive.ql.MapRedStats in project hive by apache.

the class HadoopJobExecHelper method progress.

public int progress(RunningJob rj, JobClient jc, Context ctx) throws IOException, LockException {
    jobId = rj.getID();
    int returnVal = 0;
    // remove the pwd from conf file so that job tracker doesn't show this
    // logs
    String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
    if (pwd != null) {
        HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
    }
    // replace it back
    if (pwd != null) {
        HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, pwd);
    }
    // add to list of running jobs to kill in case of abnormal shutdown
    runningJobs.add(rj);
    ExecDriverTaskHandle th = new ExecDriverTaskHandle(jc, rj, ctx);
    jobInfo(rj);
    MapRedStats mapRedStats = progress(th);
    this.task.taskHandle = th;
    // for special modes. In that case, SessionState.get() is empty.
    if (SessionState.get() != null) {
        SessionState.get().getMapRedStats().put(getId(), mapRedStats);
        // of Success or Failure
        if (this.task.getQueryPlan() != null) {
            computeReducerTimeStatsPerJob(rj);
        }
    }
    boolean success = mapRedStats.isSuccess();
    String statusMesg = getJobEndMsg(rj.getID());
    if (!success) {
        statusMesg += " with errors";
        returnVal = 2;
        console.printError(statusMesg);
        if (HiveConf.getBoolVar(job, HiveConf.ConfVars.SHOW_JOB_FAIL_DEBUG_INFO) || HiveConf.getBoolVar(job, HiveConf.ConfVars.JOB_DEBUG_CAPTURE_STACKTRACES)) {
            try {
                JobDebugger jd;
                if (SessionState.get() != null) {
                    jd = new JobDebugger(job, rj, console, SessionState.get().getStackTraces());
                } else {
                    jd = new JobDebugger(job, rj, console);
                }
                Thread t = new Thread(jd);
                t.start();
                t.join(HiveConf.getIntVar(job, HiveConf.ConfVars.JOB_DEBUG_TIMEOUT));
                task.setDiagnosticMessage(jd.getDiagnosticMesg());
                int ec = jd.getErrorCode();
                if (ec > 0) {
                    returnVal = ec;
                }
            } catch (InterruptedException e) {
                console.printError("Timed out trying to grab more detailed job failure" + " information, please check jobtracker for more info");
            }
        }
    } else {
        console.printInfo(statusMesg);
    }
    return returnVal;
}
Also used : MapRedStats(org.apache.hadoop.hive.ql.MapRedStats)

Example 5 with MapRedStats

use of org.apache.hadoop.hive.ql.MapRedStats in project hive by apache.

the class HadoopJobExecHelper method progress.

private MapRedStats progress(ExecDriverTaskHandle th) throws IOException, LockException {
    JobClient jc = th.getJobClient();
    RunningJob rj = th.getRunningJob();
    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
    // DecimalFormat longFormatter = new DecimalFormat("###,###");
    long reportTime = System.currentTimeMillis();
    long maxReportInterval = HiveConf.getTimeVar(job, HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL, TimeUnit.MILLISECONDS);
    boolean fatal = false;
    StringBuilder errMsg = new StringBuilder();
    long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL);
    boolean initializing = true;
    boolean initOutputPrinted = false;
    long cpuMsec = -1;
    int numMap = -1;
    int numReduce = -1;
    List<ClientStatsPublisher> clientStatPublishers = getClientStatPublishers();
    final boolean localMode = ShimLoader.getHadoopShims().isLocalMode(job);
    while (!rj.isComplete()) {
        if (th.getContext() != null) {
            th.getContext().checkHeartbeaterLockException();
        }
        try {
            Thread.sleep(pullInterval);
        } catch (InterruptedException e) {
        }
        if (initializing && rj.getJobState() == JobStatus.PREP) {
            // No reason to poll untill the job is initialized
            continue;
        } else {
            // By now the job is initialized so no reason to do
            // rj.getJobState() again and we do not want to do an extra RPC call
            initializing = false;
        }
        if (!localMode) {
            if (!initOutputPrinted) {
                SessionState ss = SessionState.get();
                String logMapper;
                String logReducer;
                TaskReport[] mappers = jc.getMapTaskReports(rj.getID());
                if (mappers == null) {
                    logMapper = "no information for number of mappers; ";
                } else {
                    numMap = mappers.length;
                    if (ss != null) {
                        ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
                    }
                    logMapper = "number of mappers: " + numMap + "; ";
                }
                TaskReport[] reducers = jc.getReduceTaskReports(rj.getID());
                if (reducers == null) {
                    logReducer = "no information for number of reducers. ";
                } else {
                    numReduce = reducers.length;
                    if (ss != null) {
                        ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
                    }
                    logReducer = "number of reducers: " + numReduce;
                }
                console.printInfo("Hadoop job information for " + getId() + ": " + logMapper + logReducer);
                initOutputPrinted = true;
            }
            RunningJob newRj = jc.getJob(rj.getID());
            if (newRj == null) {
                // So raise a meaningful exception
                throw new IOException("Could not find status of job:" + rj.getID());
            } else {
                th.setRunningJob(newRj);
                rj = newRj;
            }
        }
        // let the job retry several times, which eventually lead to failure.
        if (fatal) {
            // wait until rj.isComplete
            continue;
        }
        Counters ctrs = th.getCounters();
        if (fatal = checkFatalErrors(ctrs, errMsg)) {
            console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job.");
            rj.killJob();
            continue;
        }
        errMsg.setLength(0);
        updateCounters(ctrs, rj);
        // Prepare data for Client Stat Publishers (if any present) and execute them
        if (clientStatPublishers.size() > 0 && ctrs != null) {
            Map<String, Double> exctractedCounters = extractAllCounterValues(ctrs);
            for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) {
                try {
                    clientStatPublisher.run(exctractedCounters, rj.getID().toString());
                } catch (RuntimeException runtimeException) {
                    LOG.error("Exception " + runtimeException.getClass().getCanonicalName() + " thrown when running clientStatsPublishers. The stack trace is: ", runtimeException);
                }
            }
        }
        if (mapProgress == lastMapProgress && reduceProgress == lastReduceProgress && System.currentTimeMillis() < reportTime + maxReportInterval) {
            continue;
        }
        StringBuilder report = new StringBuilder();
        report.append(dateFormat.format(Calendar.getInstance().getTime()));
        report.append(' ').append(getId());
        report.append(" map = ").append(mapProgress).append("%, ");
        report.append(" reduce = ").append(reduceProgress).append('%');
        // it out.
        if (ctrs != null) {
            Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS");
            if (counterCpuMsec != null) {
                long newCpuMSec = counterCpuMsec.getValue();
                if (newCpuMSec > 0) {
                    cpuMsec = newCpuMSec;
                    report.append(", Cumulative CPU ").append((cpuMsec / 1000D)).append(" sec");
                }
            }
        }
        // write out serialized plan with counters to log file
        // LOG.info(queryPlan);
        String output = report.toString();
        SessionState ss = SessionState.get();
        if (ss != null) {
            ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
            ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_HADOOP_PROGRESS, output);
            if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
                ss.getHiveHistory().progressTask(queryId, this.task);
                this.callBackObj.logPlanProgress(ss);
            }
        }
        console.printInfo(output);
        task.setStatusMessage(output);
        reportTime = System.currentTimeMillis();
    }
    Counters ctrs = th.getCounters();
    if (ctrs != null) {
        Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS");
        if (counterCpuMsec != null) {
            long newCpuMSec = counterCpuMsec.getValue();
            if (newCpuMSec > cpuMsec) {
                cpuMsec = newCpuMSec;
            }
        }
    }
    if (cpuMsec > 0) {
        String status = "MapReduce Total cumulative CPU time: " + Utilities.formatMsecToStr(cpuMsec);
        console.printInfo(status);
        task.setStatusMessage(status);
    }
    boolean success;
    if (fatal) {
        success = false;
    } else {
        // the last check before the job is completed
        if (checkFatalErrors(ctrs, errMsg)) {
            console.printError("[Fatal Error] " + errMsg.toString());
            success = false;
        } else {
            SessionState ss = SessionState.get();
            if (ss != null) {
                ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
            }
            success = rj.isSuccessful();
        }
    }
    MapRedStats mapRedStats = new MapRedStats(numMap, numReduce, cpuMsec, success, rj.getID().toString());
    mapRedStats.setCounters(ctrs);
    // update based on the final value of the counters
    updateCounters(ctrs, rj);
    SessionState ss = SessionState.get();
    if (ss != null) {
        this.callBackObj.logPlanProgress(ss);
    }
    // LOG.info(queryPlan);
    return mapRedStats;
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) TaskReport(org.apache.hadoop.mapred.TaskReport) IOException(java.io.IOException) JobClient(org.apache.hadoop.mapred.JobClient) Counter(org.apache.hadoop.mapred.Counters.Counter) ClientStatsPublisher(org.apache.hadoop.hive.ql.stats.ClientStatsPublisher) RunningJob(org.apache.hadoop.mapred.RunningJob) Counters(org.apache.hadoop.mapred.Counters) MapRedStats(org.apache.hadoop.hive.ql.MapRedStats) SimpleDateFormat(java.text.SimpleDateFormat)

Aggregations

MapRedStats (org.apache.hadoop.hive.ql.MapRedStats)5 SessionState (org.apache.hadoop.hive.ql.session.SessionState)3 EmbeddedAmbroseHiveProgressReporter (com.twitter.ambrose.hive.reporter.EmbeddedAmbroseHiveProgressReporter)1 IOException (java.io.IOException)1 SimpleDateFormat (java.text.SimpleDateFormat)1 Map (java.util.Map)1 ClientStatsPublisher (org.apache.hadoop.hive.ql.stats.ClientStatsPublisher)1 Counters (org.apache.hadoop.mapred.Counters)1 Counter (org.apache.hadoop.mapred.Counters.Counter)1 JobClient (org.apache.hadoop.mapred.JobClient)1 RunningJob (org.apache.hadoop.mapred.RunningJob)1 TaskReport (org.apache.hadoop.mapred.TaskReport)1