use of org.apache.hadoop.mapred.RunningJob in project hive by apache.
the class LogRetriever method logJob.
private void logJob(String logDir, String jobID, PrintWriter listWriter) throws IOException {
RunningJob rj = jobClient.getJob(JobID.forName(jobID));
String jobURLString = rj.getTrackingURL();
Path jobDir = new Path(logDir, jobID);
fs.mkdirs(jobDir);
// Logger jobconf
try {
logJobConf(jobID, jobURLString, jobDir.toString());
} catch (IOException e) {
System.err.println("Cannot retrieve job.xml.html for " + jobID);
e.printStackTrace();
}
listWriter.println("job: " + jobID + "(" + "name=" + rj.getJobName() + "," + "status=" + JobStatus.getJobRunState(rj.getJobState()) + ")");
// Get completed attempts
List<AttemptInfo> attempts = new ArrayList<AttemptInfo>();
for (String type : new String[] { "map", "reduce", "setup", "cleanup" }) {
try {
List<AttemptInfo> successAttempts = getCompletedAttempts(jobID, jobURLString, type);
attempts.addAll(successAttempts);
} catch (IOException e) {
System.err.println("Cannot retrieve " + type + " tasks for " + jobID);
e.printStackTrace();
}
}
// Get failed attempts
try {
List<AttemptInfo> failedAttempts = getFailedAttempts(jobID, jobURLString);
attempts.addAll(failedAttempts);
} catch (IOException e) {
System.err.println("Cannot retrieve failed attempts for " + jobID);
e.printStackTrace();
}
// Logger attempts
for (AttemptInfo attempt : attempts) {
try {
logAttempt(jobID, attempt, jobDir.toString());
listWriter.println(" attempt:" + attempt.id + "(" + "type=" + attempt.type + "," + "status=" + attempt.status + "," + "starttime=" + attempt.startTime + "," + "endtime=" + attempt.endTime + ")");
} catch (IOException e) {
System.err.println("Cannot log attempt " + attempt.id);
e.printStackTrace();
}
}
listWriter.println();
}
use of org.apache.hadoop.mapred.RunningJob in project hive by apache.
the class CompactorMR method launchCompactionJob.
/**
* @param baseDir if not null, it's either table/partition root folder or base_xxxx.
* If it's base_xxxx, it's in dirsToSearch, else the actual original files
* (all leaves recursively) are in the dirsToSearch list
*/
private void launchCompactionJob(JobConf job, Path baseDir, CompactionType compactionType, StringableList dirsToSearch, List<AcidUtils.ParsedDelta> parsedDeltas, int curDirNumber, int obsoleteDirNumber, HiveConf hiveConf, IMetaStoreClient msc, long id, String jobName) throws IOException {
job.setBoolean(IS_MAJOR, compactionType == CompactionType.MAJOR);
if (dirsToSearch == null) {
dirsToSearch = new StringableList();
}
StringableList deltaDirs = new StringableList();
// Note: if compaction creates a delta, it won't replace an existing base dir, so the txn ID
// of the base dir won't be a part of delta's range. If otoh compaction creates a base,
// we don't care about this value because bases don't have min txn ID in the name.
// However logically this should also take base into account if it's included.
long minTxn = Long.MAX_VALUE;
long maxTxn = Long.MIN_VALUE;
for (AcidUtils.ParsedDelta delta : parsedDeltas) {
LOG.debug("Adding delta " + delta.getPath() + " to directories to search");
dirsToSearch.add(delta.getPath());
deltaDirs.add(delta.getPath());
minTxn = Math.min(minTxn, delta.getMinWriteId());
maxTxn = Math.max(maxTxn, delta.getMaxWriteId());
}
if (baseDir != null)
job.set(BASE_DIR, baseDir.toString());
job.set(DELTA_DIRS, deltaDirs.toString());
job.set(DIRS_TO_SEARCH, dirsToSearch.toString());
job.setLong(MIN_TXN, minTxn);
job.setLong(MAX_TXN, maxTxn);
// HIVE-23354 enforces that MR speculative execution is disabled
job.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
job.setBoolean(MRJobConfig.MAP_SPECULATIVE, false);
// Add tokens for all the file system in the input path.
ArrayList<Path> dirs = new ArrayList<>();
if (baseDir != null) {
dirs.add(baseDir);
}
dirs.addAll(deltaDirs);
dirs.addAll(dirsToSearch);
TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs.toArray(new Path[] {}), job);
if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST)) {
mrJob = job;
}
LOG.info("Submitting " + compactionType + " compaction job '" + job.getJobName() + "' to " + job.getQueueName() + " queue. " + "(current delta dirs count=" + curDirNumber + ", obsolete delta dirs count=" + obsoleteDirNumber + ". TxnIdRange[" + minTxn + "," + maxTxn + "]");
JobClient jc = null;
try {
jc = new JobClient(job);
RunningJob rj = jc.submitJob(job);
LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID() + " compaction ID=" + id);
try {
msc.setHadoopJobid(rj.getID().toString(), id);
} catch (TException e) {
LOG.warn("Error setting hadoop job, jobId=" + rj.getID().toString() + " compactionId=" + id, e);
}
rj.waitForCompletion();
if (!rj.isSuccessful()) {
throw new IOException((compactionType == CompactionType.MAJOR ? "Major" : "Minor") + " compactor job failed for " + jobName + "! Hadoop JobId: " + rj.getID());
}
} finally {
if (jc != null) {
jc.close();
}
}
}
use of org.apache.hadoop.mapred.RunningJob in project hive by apache.
the class HadoopJobExecHelper method progress.
private MapRedStats progress(ExecDriverTaskHandle th) throws IOException, LockException {
JobClient jc = th.getJobClient();
RunningJob rj = th.getRunningJob();
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
// DecimalFormat longFormatter = new DecimalFormat("###,###");
long reportTime = System.currentTimeMillis();
long maxReportInterval = HiveConf.getTimeVar(job, HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL, TimeUnit.MILLISECONDS);
boolean fatal = false;
StringBuilder errMsg = new StringBuilder();
long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL);
boolean initializing = true;
boolean initOutputPrinted = false;
long cpuMsec = -1;
int numMap = -1;
int numReduce = -1;
List<ClientStatsPublisher> clientStatPublishers = getClientStatPublishers();
final boolean localMode = ShimLoader.getHadoopShims().isLocalMode(job);
MapRedStats mapRedStats = new MapRedStats(job, numMap, numReduce, cpuMsec, false, rj.getID().toString());
updateMapRedTaskWebUIStatistics(mapRedStats, rj);
while (!rj.isComplete()) {
if (th.getContext() != null) {
th.getContext().checkHeartbeaterLockException();
}
try {
Thread.sleep(pullInterval);
} catch (InterruptedException e) {
}
if (initializing && rj.getJobState() == JobStatus.PREP) {
// No reason to poll untill the job is initialized
continue;
} else {
// By now the job is initialized so no reason to do
// rj.getJobState() again and we do not want to do an extra RPC call
initializing = false;
}
if (!localMode) {
if (!initOutputPrinted) {
SessionState ss = SessionState.get();
String logMapper;
String logReducer;
TaskReport[] mappers = jc.getMapTaskReports(rj.getID());
if (mappers == null) {
logMapper = "no information for number of mappers; ";
} else {
numMap = mappers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
}
logMapper = "number of mappers: " + numMap + "; ";
}
TaskReport[] reducers = jc.getReduceTaskReports(rj.getID());
if (reducers == null) {
logReducer = "no information for number of reducers. ";
} else {
numReduce = reducers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
}
logReducer = "number of reducers: " + numReduce;
}
console.printInfo("Hadoop job information for " + getId() + ": " + logMapper + logReducer);
initOutputPrinted = true;
}
RunningJob newRj = jc.getJob(rj.getID());
if (newRj == null) {
// So raise a meaningful exception
throw new IOException("Could not find status of job:" + rj.getID());
} else {
th.setRunningJob(newRj);
rj = newRj;
}
}
// let the job retry several times, which eventually lead to failure.
if (fatal) {
// wait until rj.isComplete
continue;
}
Counters ctrs = th.getCounters();
mapRedStats.setCounters(ctrs);
mapRedStats.setNumMap(numMap);
mapRedStats.setNumReduce(numReduce);
updateMapRedTaskWebUIStatistics(mapRedStats, rj);
if (fatal = checkFatalErrors(ctrs, errMsg)) {
console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job.");
rj.killJob();
continue;
}
errMsg.setLength(0);
updateCounters(ctrs, rj);
// Prepare data for Client Stat Publishers (if any present) and execute them
if (clientStatPublishers.size() > 0 && ctrs != null) {
Map<String, Double> exctractedCounters = extractAllCounterValues(ctrs);
for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) {
try {
clientStatPublisher.run(exctractedCounters, rj.getID().toString());
} catch (RuntimeException runtimeException) {
LOG.error("Exception " + runtimeException.getClass().getCanonicalName() + " thrown when running clientStatsPublishers. The stack trace is: ", runtimeException);
}
}
}
if (mapProgress == lastMapProgress && reduceProgress == lastReduceProgress && System.currentTimeMillis() < reportTime + maxReportInterval) {
continue;
}
StringBuilder report = new StringBuilder();
report.append(dateFormat.format(Calendar.getInstance().getTime()));
report.append(' ').append(getId());
report.append(" map = ").append(mapProgress).append("%, ");
report.append(" reduce = ").append(reduceProgress).append('%');
// it out.
if (ctrs != null) {
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS");
if (counterCpuMsec != null) {
long newCpuMSec = counterCpuMsec.getValue();
if (newCpuMSec > 0) {
cpuMsec = newCpuMSec;
report.append(", Cumulative CPU ").append((cpuMsec / 1000D)).append(" sec");
}
}
}
// write out serialized plan with counters to log file
// LOG.info(queryPlan);
String output = report.toString();
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_HADOOP_PROGRESS, output);
if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
ss.getHiveHistory().progressTask(queryId, this.task);
this.callBackObj.logPlanProgress(ss);
}
}
console.printInfo(output);
task.setStatusMessage(output);
reportTime = System.currentTimeMillis();
}
Counters ctrs = th.getCounters();
if (ctrs != null) {
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS");
if (counterCpuMsec != null) {
long newCpuMSec = counterCpuMsec.getValue();
if (newCpuMSec > cpuMsec) {
cpuMsec = newCpuMSec;
}
}
}
if (cpuMsec > 0) {
String status = "MapReduce Total cumulative CPU time: " + Utilities.formatMsecToStr(cpuMsec);
console.printInfo(status);
task.setStatusMessage(status);
}
boolean success;
if (fatal) {
success = false;
} else {
// the last check before the job is completed
if (checkFatalErrors(ctrs, errMsg)) {
console.printError("[Fatal Error] " + errMsg.toString());
success = false;
} else {
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
}
success = rj.isSuccessful();
}
}
mapRedStats.setSuccess(success);
mapRedStats.setCounters(ctrs);
mapRedStats.setCpuMSec(cpuMsec);
updateMapRedTaskWebUIStatistics(mapRedStats, rj);
// update based on the final value of the counters
updateCounters(ctrs, rj);
SessionState ss = SessionState.get();
if (ss != null) {
// Set the number of table rows affected in mapRedStats to display number of rows inserted.
if (ctrs != null) {
Counter counter = ctrs.findCounter(ss.getConf().getVar(HiveConf.ConfVars.HIVECOUNTERGROUP), FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN);
if (counter != null) {
mapRedStats.setNumModifiedRows(counter.getValue());
}
}
this.callBackObj.logPlanProgress(ss);
}
// LOG.info(queryPlan);
return mapRedStats;
}
use of org.apache.hadoop.mapred.RunningJob in project ambrose by twitter.
the class MapReduceHelper method getMapReduceJobState.
private MapReduceJobState getMapReduceJobState(MapReduceJob job, JobClient jobClient) throws Exception {
RunningJob runningJob = getRunningJob(job, jobClient);
JobID jobID = runningJob.getID();
TaskReport[] mapTaskReport = jobClient.getMapTaskReports(jobID);
TaskReport[] reduceTaskReport = jobClient.getReduceTaskReports(jobID);
return new MapReduceJobState(runningJob, mapTaskReport, reduceTaskReport);
}
use of org.apache.hadoop.mapred.RunningJob in project ambrose by twitter.
the class MapReduceHelper method setJobConfFromFile.
/**
* Get the configurations at the beginning of the job flow, it will contain information about the
* map/reduce plan and decoded pig script.
*
* @param job job whose configuration should be fetched.
* @param jobClient client with which to retrieve job configuration.
*/
public void setJobConfFromFile(MapReduceJob job, JobClient jobClient) {
try {
RunningJob runningJob = getRunningJob(job, jobClient);
String jobFile = runningJob.getJobFile();
LOG.info(String.format("Loading RunningJob configuration file '%s'", jobFile));
Path path = new Path(jobFile);
FileSystem fileSystem = FileSystem.get(new Configuration());
InputStream inputStream = fileSystem.open(path);
Configuration conf = new Configuration(false);
conf.addResource(inputStream);
job.setConfiguration(toProperties(conf));
} catch (Exception e) {
LOG.warn("Error occurred when retrieving configuration info", e);
}
}
Aggregations