Search in sources :

Example 46 with RunningJob

use of org.apache.hadoop.mapred.RunningJob in project Cloud9 by lintool.

the class FileMerger method mergeFilesDistribute.

private static Path mergeFilesDistribute(Configuration configuration, String inputFiles, String outputFile, int numberOfMappers, Class<? extends Writable> keyClass, Class<? extends Writable> valueClass, Class<? extends FileInputFormat> fileInputClass, Class<? extends FileOutputFormat> fileOutputClass, boolean deleteSource, boolean deleteDestinationFileIfExist) throws IOException {
    JobConf conf = new JobConf(configuration, FileMerger.class);
    conf.setJobName(FileMerger.class.getSimpleName());
    FileSystem fs = FileSystem.get(conf);
    sLogger.info("Tool: " + FileMerger.class.getSimpleName());
    sLogger.info(" - merge files from: " + inputFiles);
    sLogger.info(" - merge files to: " + outputFile);
    conf.setNumMapTasks(numberOfMappers);
    conf.setNumReduceTasks(1);
    conf.setMapperClass(IdentityMapper.class);
    conf.setReducerClass(IdentityReducer.class);
    conf.setMapOutputKeyClass(keyClass);
    conf.setMapOutputValueClass(valueClass);
    conf.setOutputKeyClass(keyClass);
    conf.setOutputValueClass(valueClass);
    conf.setInputFormat(fileInputClass);
    conf.setOutputFormat(fileOutputClass);
    Path inputPath = new Path(inputFiles);
    Path mergePath = new Path(inputPath.getParent().toString() + Path.SEPARATOR + MERGE + generateRandomString());
    Preconditions.checkArgument(!fs.exists(mergePath), new IOException("Intermediate merge directory already exists..."));
    Path outputPath = new Path(outputFile);
    if (deleteDestinationFileIfExist) {
        if (fs.exists(outputPath)) {
            // carefully remove the destination file, not recursive
            fs.delete(outputPath, false);
            sLogger.info("Warning: remove destination file since it already exists...");
        }
    } else {
        Preconditions.checkArgument(!fs.exists(outputPath), new IOException("Destination file already exists..."));
    }
    FileInputFormat.setInputPaths(conf, inputPath);
    FileOutputFormat.setOutputPath(conf, mergePath);
    FileOutputFormat.setCompressOutput(conf, true);
    try {
        long startTime = System.currentTimeMillis();
        RunningJob job = JobClient.runJob(conf);
        sLogger.info("Merge Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");
        fs.rename(new Path(mergePath.toString() + Path.SEPARATOR + "part-00000"), outputPath);
        if (deleteSource) {
            for (FileStatus fileStatus : fs.globStatus(inputPath)) {
                fs.deleteOnExit(fileStatus.getPath());
            }
        }
    } finally {
        fs.delete(mergePath, true);
    }
    sLogger.info("Successfully merge " + inputFiles.toString() + " to " + outputFile);
    return outputPath;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) RunningJob(org.apache.hadoop.mapred.RunningJob) IOException(java.io.IOException) JobConf(org.apache.hadoop.mapred.JobConf)

Example 47 with RunningJob

use of org.apache.hadoop.mapred.RunningJob in project hive by apache.

the class CompactorMR method launchCompactionJob.

private void launchCompactionJob(JobConf job, Path baseDir, CompactionType compactionType, StringableList dirsToSearch, List<AcidUtils.ParsedDelta> parsedDeltas, int curDirNumber, int obsoleteDirNumber, HiveConf hiveConf, TxnStore txnHandler, long id) throws IOException {
    job.setBoolean(IS_MAJOR, compactionType == CompactionType.MAJOR);
    if (dirsToSearch == null) {
        dirsToSearch = new StringableList();
    }
    StringableList deltaDirs = new StringableList();
    long minTxn = Long.MAX_VALUE;
    long maxTxn = Long.MIN_VALUE;
    for (AcidUtils.ParsedDelta delta : parsedDeltas) {
        LOG.debug("Adding delta " + delta.getPath() + " to directories to search");
        dirsToSearch.add(delta.getPath());
        deltaDirs.add(delta.getPath());
        minTxn = Math.min(minTxn, delta.getMinTransaction());
        maxTxn = Math.max(maxTxn, delta.getMaxTransaction());
    }
    if (baseDir != null)
        job.set(BASE_DIR, baseDir.toString());
    job.set(DELTA_DIRS, deltaDirs.toString());
    job.set(DIRS_TO_SEARCH, dirsToSearch.toString());
    job.setLong(MIN_TXN, minTxn);
    job.setLong(MAX_TXN, maxTxn);
    if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST)) {
        mrJob = job;
    }
    LOG.info("Submitting " + compactionType + " compaction job '" + job.getJobName() + "' to " + job.getQueueName() + " queue.  " + "(current delta dirs count=" + curDirNumber + ", obsolete delta dirs count=" + obsoleteDirNumber + ". TxnIdRange[" + minTxn + "," + maxTxn + "]");
    RunningJob rj = new JobClient(job).submitJob(job);
    LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID() + " compaction ID=" + id);
    txnHandler.setHadoopJobId(rj.getID().toString(), id);
    rj.waitForCompletion();
}
Also used : RunningJob(org.apache.hadoop.mapred.RunningJob) JobClient(org.apache.hadoop.mapred.JobClient) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils)

Example 48 with RunningJob

use of org.apache.hadoop.mapred.RunningJob in project hive by apache.

the class PartialScanTask method execute.

@Override
public /**
   * start a new map-reduce job to do partial scan to calculate Stats,
   * almost the same as BlockMergeTask or ExecDriver.
   */
int execute(DriverContext driverContext) {
    HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, CombineHiveInputFormat.class.getName());
    success = true;
    HiveFileFormatUtils.prepareJobOutput(job);
    job.setOutputFormat(HiveOutputFormatImpl.class);
    job.setMapperClass(work.getMapperClass());
    Context ctx = driverContext.getCtx();
    boolean ctxCreated = false;
    try {
        if (ctx == null) {
            ctx = new Context(job);
            ctxCreated = true;
        }
    } catch (IOException e) {
        e.printStackTrace();
        console.printError("Error launching map-reduce job", "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return 5;
    }
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(NullWritable.class);
    if (work.getNumMapTasks() != null) {
        job.setNumMapTasks(work.getNumMapTasks());
    }
    // zero reducers
    job.setNumReduceTasks(0);
    if (work.getMinSplitSize() != null) {
        HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize().longValue());
    }
    if (work.getInputformat() != null) {
        HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, work.getInputformat());
    }
    String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT);
    LOG.info("Using " + inpFormat);
    try {
        job.setInputFormat(JavaUtils.loadClass(inpFormat));
    } catch (ClassNotFoundException e) {
        throw new RuntimeException(e.getMessage(), e);
    }
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    int returnVal = 0;
    RunningJob rj = null;
    boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
    String jobName = null;
    if (noName && this.getQueryPlan() != null) {
        int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
        jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6);
    }
    if (noName) {
        // This is for a special case to ensure unit tests pass
        job.set(MRJobConfig.JOB_NAME, jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
    }
    // pass aggregation key to mapper
    HiveConf.setVar(job, HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX, work.getAggKey());
    job.set(StatsSetupConst.STATS_TMP_LOC, work.getStatsTmpDir());
    try {
        addInputPaths(job, work);
        MapredWork mrWork = new MapredWork();
        mrWork.setMapWork(work);
        Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
        // remove the pwd from conf file so that job tracker doesn't show this
        // logs
        String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
        if (pwd != null) {
            HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
        }
        JobClient jc = new JobClient(job);
        String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
        if (!addedJars.isEmpty()) {
            job.set("tmpjars", addedJars);
        }
        // make this client wait if job trcker is not behaving well.
        Throttle.checkJobTracker(job, LOG);
        if (work.isGatheringStats()) {
            // initialize stats publishing table
            StatsPublisher statsPublisher;
            StatsFactory factory = StatsFactory.newFactory(job);
            if (factory != null) {
                statsPublisher = factory.getStatsPublisher();
                StatsCollectionContext sc = new StatsCollectionContext(job);
                sc.setStatsTmpDir(work.getStatsTmpDir());
                if (!statsPublisher.init(sc)) {
                    // creating stats table if not exists
                    if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
                        throw new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
                    }
                }
            }
        }
        // Finally SUBMIT the JOB!
        rj = jc.submitJob(job);
        this.jobID = rj.getJobID();
        returnVal = jobExecHelper.progress(rj, jc, ctx);
        success = (returnVal == 0);
    } catch (Exception e) {
        e.printStackTrace();
        setException(e);
        String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
        if (rj != null) {
            mesg = "Ended Job = " + rj.getJobID() + mesg;
        } else {
            mesg = "Job Submission failed" + mesg;
        }
        // Has to use full name to make sure it does not conflict with
        // org.apache.commons.lang.StringUtils
        console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        success = false;
        returnVal = 1;
    } finally {
        try {
            if (ctxCreated) {
                ctx.clear();
            }
            if (rj != null) {
                if (returnVal != 0) {
                    rj.killJob();
                }
            }
        } catch (Exception e) {
            LOG.warn("Failed in cleaning up ", e);
        } finally {
            HadoopJobExecHelper.runningJobs.remove(rj);
        }
    }
    return (returnVal);
}
Also used : StatsCollectionContext(org.apache.hadoop.hive.ql.stats.StatsCollectionContext) Context(org.apache.hadoop.hive.ql.Context) DriverContext(org.apache.hadoop.hive.ql.DriverContext) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) StatsCollectionContext(org.apache.hadoop.hive.ql.stats.StatsCollectionContext) CombineHiveInputFormat(org.apache.hadoop.hive.ql.io.CombineHiveInputFormat) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) IOException(java.io.IOException) JobClient(org.apache.hadoop.mapred.JobClient) IOException(java.io.IOException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) StatsPublisher(org.apache.hadoop.hive.ql.stats.StatsPublisher) StatsFactory(org.apache.hadoop.hive.ql.stats.StatsFactory) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) RunningJob(org.apache.hadoop.mapred.RunningJob)

Example 49 with RunningJob

use of org.apache.hadoop.mapred.RunningJob in project hadoop by apache.

the class TestMultithreadedMapRunner method run.

private void run(boolean ioEx, boolean rtEx) throws Exception {
    Path inDir = new Path("testing/mt/input");
    Path outDir = new Path("testing/mt/output");
    // Hack for local FS that does not have the concept of a 'mounting point'
    if (isLocalFS()) {
        String localPathRoot = System.getProperty("test.build.data", "/tmp").replace(' ', '+');
        inDir = new Path(localPathRoot, inDir);
        outDir = new Path(localPathRoot, outDir);
    }
    JobConf conf = createJobConf();
    FileSystem fs = FileSystem.get(conf);
    fs.delete(outDir, true);
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
    {
        DataOutputStream file = fs.create(new Path(inDir, "part-0"));
        file.writeBytes("a\nb\n\nc\nd\ne");
        file.close();
    }
    conf.setJobName("mt");
    conf.setInputFormat(TextInputFormat.class);
    conf.setOutputKeyClass(LongWritable.class);
    conf.setOutputValueClass(Text.class);
    conf.setMapOutputKeyClass(LongWritable.class);
    conf.setMapOutputValueClass(Text.class);
    conf.setOutputFormat(TextOutputFormat.class);
    conf.setOutputKeyClass(LongWritable.class);
    conf.setOutputValueClass(Text.class);
    conf.setMapperClass(IDMap.class);
    conf.setReducerClass(IDReduce.class);
    FileInputFormat.setInputPaths(conf, inDir);
    FileOutputFormat.setOutputPath(conf, outDir);
    conf.setMapRunnerClass(MultithreadedMapRunner.class);
    conf.setInt(MultithreadedMapper.NUM_THREADS, 2);
    if (ioEx) {
        conf.setBoolean("multithreaded.ioException", true);
    }
    if (rtEx) {
        conf.setBoolean("multithreaded.runtimeException", true);
    }
    JobClient jc = new JobClient(conf);
    RunningJob job = jc.submitJob(conf);
    while (!job.isComplete()) {
        Thread.sleep(100);
    }
    if (job.isSuccessful()) {
        assertFalse(ioEx || rtEx);
    } else {
        assertTrue(ioEx || rtEx);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DataOutputStream(java.io.DataOutputStream) FileSystem(org.apache.hadoop.fs.FileSystem) RunningJob(org.apache.hadoop.mapred.RunningJob) IOException(java.io.IOException) JobConf(org.apache.hadoop.mapred.JobConf) JobClient(org.apache.hadoop.mapred.JobClient)

Example 50 with RunningJob

use of org.apache.hadoop.mapred.RunningJob in project hadoop by apache.

the class TestEncryptedShuffle method encryptedShuffleWithCerts.

private void encryptedShuffleWithCerts(boolean useClientCerts) throws Exception {
    try {
        Configuration conf = new Configuration();
        String keystoresDir = new File(BASEDIR).getAbsolutePath();
        String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
        KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, useClientCerts);
        conf.setBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY, true);
        startCluster(conf);
        FileSystem fs = FileSystem.get(getJobConf());
        Path inputDir = new Path("input");
        fs.mkdirs(inputDir);
        Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
        writer.write("hello");
        writer.close();
        Path outputDir = new Path("output", "output");
        JobConf jobConf = new JobConf(getJobConf());
        jobConf.setInt("mapred.map.tasks", 1);
        jobConf.setInt("mapred.map.max.attempts", 1);
        jobConf.setInt("mapred.reduce.max.attempts", 1);
        jobConf.set("mapred.input.dir", inputDir.toString());
        jobConf.set("mapred.output.dir", outputDir.toString());
        JobClient jobClient = new JobClient(jobConf);
        RunningJob runJob = jobClient.submitJob(jobConf);
        runJob.waitForCompletion();
        Assert.assertTrue(runJob.isComplete());
        Assert.assertTrue(runJob.isSuccessful());
    } finally {
        stopCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) RunningJob(org.apache.hadoop.mapred.RunningJob) OutputStreamWriter(java.io.OutputStreamWriter) File(java.io.File) JobConf(org.apache.hadoop.mapred.JobConf) JobClient(org.apache.hadoop.mapred.JobClient) OutputStreamWriter(java.io.OutputStreamWriter) FileWriter(java.io.FileWriter) Writer(java.io.Writer)

Aggregations

RunningJob (org.apache.hadoop.mapred.RunningJob)93 JobConf (org.apache.hadoop.mapred.JobConf)65 Path (org.apache.hadoop.fs.Path)49 JobClient (org.apache.hadoop.mapred.JobClient)33 IOException (java.io.IOException)28 FileSystem (org.apache.hadoop.fs.FileSystem)28 DMLConfig (org.apache.sysml.conf.DMLConfig)27 Group (org.apache.hadoop.mapred.Counters.Group)26 Counters (org.apache.hadoop.mapred.Counters)17 Configuration (org.apache.hadoop.conf.Configuration)14 MatrixChar_N_ReducerGroups (org.apache.sysml.runtime.matrix.mapred.MRJobConfiguration.MatrixChar_N_ReducerGroups)13 InputInfo (org.apache.sysml.runtime.matrix.data.InputInfo)10 DMLRuntimeException (org.apache.sysml.runtime.DMLRuntimeException)8 File (java.io.File)6 TaggedMatrixBlock (org.apache.sysml.runtime.matrix.data.TaggedMatrixBlock)6 DataOutputStream (java.io.DataOutputStream)5 URI (java.net.URI)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)5 Context (org.apache.hadoop.hive.ql.Context)5 Text (org.apache.hadoop.io.Text)5