use of org.apache.hadoop.mapred.RunningJob in project Cloud9 by lintool.
the class FileMerger method mergeFilesDistribute.
private static Path mergeFilesDistribute(Configuration configuration, String inputFiles, String outputFile, int numberOfMappers, Class<? extends Writable> keyClass, Class<? extends Writable> valueClass, Class<? extends FileInputFormat> fileInputClass, Class<? extends FileOutputFormat> fileOutputClass, boolean deleteSource, boolean deleteDestinationFileIfExist) throws IOException {
JobConf conf = new JobConf(configuration, FileMerger.class);
conf.setJobName(FileMerger.class.getSimpleName());
FileSystem fs = FileSystem.get(conf);
sLogger.info("Tool: " + FileMerger.class.getSimpleName());
sLogger.info(" - merge files from: " + inputFiles);
sLogger.info(" - merge files to: " + outputFile);
conf.setNumMapTasks(numberOfMappers);
conf.setNumReduceTasks(1);
conf.setMapperClass(IdentityMapper.class);
conf.setReducerClass(IdentityReducer.class);
conf.setMapOutputKeyClass(keyClass);
conf.setMapOutputValueClass(valueClass);
conf.setOutputKeyClass(keyClass);
conf.setOutputValueClass(valueClass);
conf.setInputFormat(fileInputClass);
conf.setOutputFormat(fileOutputClass);
Path inputPath = new Path(inputFiles);
Path mergePath = new Path(inputPath.getParent().toString() + Path.SEPARATOR + MERGE + generateRandomString());
Preconditions.checkArgument(!fs.exists(mergePath), new IOException("Intermediate merge directory already exists..."));
Path outputPath = new Path(outputFile);
if (deleteDestinationFileIfExist) {
if (fs.exists(outputPath)) {
// carefully remove the destination file, not recursive
fs.delete(outputPath, false);
sLogger.info("Warning: remove destination file since it already exists...");
}
} else {
Preconditions.checkArgument(!fs.exists(outputPath), new IOException("Destination file already exists..."));
}
FileInputFormat.setInputPaths(conf, inputPath);
FileOutputFormat.setOutputPath(conf, mergePath);
FileOutputFormat.setCompressOutput(conf, true);
try {
long startTime = System.currentTimeMillis();
RunningJob job = JobClient.runJob(conf);
sLogger.info("Merge Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");
fs.rename(new Path(mergePath.toString() + Path.SEPARATOR + "part-00000"), outputPath);
if (deleteSource) {
for (FileStatus fileStatus : fs.globStatus(inputPath)) {
fs.deleteOnExit(fileStatus.getPath());
}
}
} finally {
fs.delete(mergePath, true);
}
sLogger.info("Successfully merge " + inputFiles.toString() + " to " + outputFile);
return outputPath;
}
use of org.apache.hadoop.mapred.RunningJob in project hive by apache.
the class CompactorMR method launchCompactionJob.
private void launchCompactionJob(JobConf job, Path baseDir, CompactionType compactionType, StringableList dirsToSearch, List<AcidUtils.ParsedDelta> parsedDeltas, int curDirNumber, int obsoleteDirNumber, HiveConf hiveConf, TxnStore txnHandler, long id) throws IOException {
job.setBoolean(IS_MAJOR, compactionType == CompactionType.MAJOR);
if (dirsToSearch == null) {
dirsToSearch = new StringableList();
}
StringableList deltaDirs = new StringableList();
long minTxn = Long.MAX_VALUE;
long maxTxn = Long.MIN_VALUE;
for (AcidUtils.ParsedDelta delta : parsedDeltas) {
LOG.debug("Adding delta " + delta.getPath() + " to directories to search");
dirsToSearch.add(delta.getPath());
deltaDirs.add(delta.getPath());
minTxn = Math.min(minTxn, delta.getMinTransaction());
maxTxn = Math.max(maxTxn, delta.getMaxTransaction());
}
if (baseDir != null)
job.set(BASE_DIR, baseDir.toString());
job.set(DELTA_DIRS, deltaDirs.toString());
job.set(DIRS_TO_SEARCH, dirsToSearch.toString());
job.setLong(MIN_TXN, minTxn);
job.setLong(MAX_TXN, maxTxn);
if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST)) {
mrJob = job;
}
LOG.info("Submitting " + compactionType + " compaction job '" + job.getJobName() + "' to " + job.getQueueName() + " queue. " + "(current delta dirs count=" + curDirNumber + ", obsolete delta dirs count=" + obsoleteDirNumber + ". TxnIdRange[" + minTxn + "," + maxTxn + "]");
RunningJob rj = new JobClient(job).submitJob(job);
LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID() + " compaction ID=" + id);
txnHandler.setHadoopJobId(rj.getID().toString(), id);
rj.waitForCompletion();
}
use of org.apache.hadoop.mapred.RunningJob in project hive by apache.
the class PartialScanTask method execute.
@Override
public /**
* start a new map-reduce job to do partial scan to calculate Stats,
* almost the same as BlockMergeTask or ExecDriver.
*/
int execute(DriverContext driverContext) {
HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, CombineHiveInputFormat.class.getName());
success = true;
HiveFileFormatUtils.prepareJobOutput(job);
job.setOutputFormat(HiveOutputFormatImpl.class);
job.setMapperClass(work.getMapperClass());
Context ctx = driverContext.getCtx();
boolean ctxCreated = false;
try {
if (ctx == null) {
ctx = new Context(job);
ctxCreated = true;
}
} catch (IOException e) {
e.printStackTrace();
console.printError("Error launching map-reduce job", "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
return 5;
}
job.setMapOutputKeyClass(NullWritable.class);
job.setMapOutputValueClass(NullWritable.class);
if (work.getNumMapTasks() != null) {
job.setNumMapTasks(work.getNumMapTasks());
}
// zero reducers
job.setNumReduceTasks(0);
if (work.getMinSplitSize() != null) {
HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize().longValue());
}
if (work.getInputformat() != null) {
HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, work.getInputformat());
}
String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT);
LOG.info("Using " + inpFormat);
try {
job.setInputFormat(JavaUtils.loadClass(inpFormat));
} catch (ClassNotFoundException e) {
throw new RuntimeException(e.getMessage(), e);
}
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
int returnVal = 0;
RunningJob rj = null;
boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
String jobName = null;
if (noName && this.getQueryPlan() != null) {
int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6);
}
if (noName) {
// This is for a special case to ensure unit tests pass
job.set(MRJobConfig.JOB_NAME, jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
}
// pass aggregation key to mapper
HiveConf.setVar(job, HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX, work.getAggKey());
job.set(StatsSetupConst.STATS_TMP_LOC, work.getStatsTmpDir());
try {
addInputPaths(job, work);
MapredWork mrWork = new MapredWork();
mrWork.setMapWork(work);
Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
// remove the pwd from conf file so that job tracker doesn't show this
// logs
String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
if (pwd != null) {
HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
}
JobClient jc = new JobClient(job);
String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
if (!addedJars.isEmpty()) {
job.set("tmpjars", addedJars);
}
// make this client wait if job trcker is not behaving well.
Throttle.checkJobTracker(job, LOG);
if (work.isGatheringStats()) {
// initialize stats publishing table
StatsPublisher statsPublisher;
StatsFactory factory = StatsFactory.newFactory(job);
if (factory != null) {
statsPublisher = factory.getStatsPublisher();
StatsCollectionContext sc = new StatsCollectionContext(job);
sc.setStatsTmpDir(work.getStatsTmpDir());
if (!statsPublisher.init(sc)) {
// creating stats table if not exists
if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
throw new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
}
}
}
}
// Finally SUBMIT the JOB!
rj = jc.submitJob(job);
this.jobID = rj.getJobID();
returnVal = jobExecHelper.progress(rj, jc, ctx);
success = (returnVal == 0);
} catch (Exception e) {
e.printStackTrace();
setException(e);
String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
if (rj != null) {
mesg = "Ended Job = " + rj.getJobID() + mesg;
} else {
mesg = "Job Submission failed" + mesg;
}
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
success = false;
returnVal = 1;
} finally {
try {
if (ctxCreated) {
ctx.clear();
}
if (rj != null) {
if (returnVal != 0) {
rj.killJob();
}
}
} catch (Exception e) {
LOG.warn("Failed in cleaning up ", e);
} finally {
HadoopJobExecHelper.runningJobs.remove(rj);
}
}
return (returnVal);
}
use of org.apache.hadoop.mapred.RunningJob in project hadoop by apache.
the class TestMultithreadedMapRunner method run.
private void run(boolean ioEx, boolean rtEx) throws Exception {
Path inDir = new Path("testing/mt/input");
Path outDir = new Path("testing/mt/output");
// Hack for local FS that does not have the concept of a 'mounting point'
if (isLocalFS()) {
String localPathRoot = System.getProperty("test.build.data", "/tmp").replace(' ', '+');
inDir = new Path(localPathRoot, inDir);
outDir = new Path(localPathRoot, outDir);
}
JobConf conf = createJobConf();
FileSystem fs = FileSystem.get(conf);
fs.delete(outDir, true);
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
DataOutputStream file = fs.create(new Path(inDir, "part-0"));
file.writeBytes("a\nb\n\nc\nd\ne");
file.close();
}
conf.setJobName("mt");
conf.setInputFormat(TextInputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(IDMap.class);
conf.setReducerClass(IDReduce.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
conf.setMapRunnerClass(MultithreadedMapRunner.class);
conf.setInt(MultithreadedMapper.NUM_THREADS, 2);
if (ioEx) {
conf.setBoolean("multithreaded.ioException", true);
}
if (rtEx) {
conf.setBoolean("multithreaded.runtimeException", true);
}
JobClient jc = new JobClient(conf);
RunningJob job = jc.submitJob(conf);
while (!job.isComplete()) {
Thread.sleep(100);
}
if (job.isSuccessful()) {
assertFalse(ioEx || rtEx);
} else {
assertTrue(ioEx || rtEx);
}
}
use of org.apache.hadoop.mapred.RunningJob in project hadoop by apache.
the class TestEncryptedShuffle method encryptedShuffleWithCerts.
private void encryptedShuffleWithCerts(boolean useClientCerts) throws Exception {
try {
Configuration conf = new Configuration();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, useClientCerts);
conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
startCluster(conf);
FileSystem fs = FileSystem.get(getJobConf());
Path inputDir = new Path("input");
fs.mkdirs(inputDir);
Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
writer.write("hello");
writer.close();
Path outputDir = new Path("output", "output");
JobConf jobConf = new JobConf(getJobConf());
jobConf.setInt("mapred.map.tasks", 1);
jobConf.setInt("mapred.map.max.attempts", 1);
jobConf.setInt("mapred.reduce.max.attempts", 1);
jobConf.set("mapred.input.dir", inputDir.toString());
jobConf.set("mapred.output.dir", outputDir.toString());
JobClient jobClient = new JobClient(jobConf);
RunningJob runJob = jobClient.submitJob(jobConf);
runJob.waitForCompletion();
Assert.assertTrue(runJob.isComplete());
Assert.assertTrue(runJob.isSuccessful());
} finally {
stopCluster();
}
}
Aggregations