Search in sources :

Example 11 with ClusterStatus

use of org.apache.hadoop.mapred.ClusterStatus in project hadoop by apache.

the class Sort method run.

/**
   * The main driver for sort program.
   * Invoke this method to submit the map/reduce job.
   * @throws IOException When there is communication problems with the 
   *                     job tracker.
   */
public int run(String[] args) throws Exception {
    Configuration conf = getConf();
    JobClient client = new JobClient(conf);
    ClusterStatus cluster = client.getClusterStatus();
    int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
    String sort_reduces = conf.get(REDUCES_PER_HOST);
    if (sort_reduces != null) {
        num_reduces = cluster.getTaskTrackers() * Integer.parseInt(sort_reduces);
    }
    Class<? extends InputFormat> inputFormatClass = SequenceFileInputFormat.class;
    Class<? extends OutputFormat> outputFormatClass = SequenceFileOutputFormat.class;
    Class<? extends WritableComparable> outputKeyClass = BytesWritable.class;
    Class<? extends Writable> outputValueClass = BytesWritable.class;
    List<String> otherArgs = new ArrayList<String>();
    InputSampler.Sampler<K, V> sampler = null;
    for (int i = 0; i < args.length; ++i) {
        try {
            if ("-r".equals(args[i])) {
                num_reduces = Integer.parseInt(args[++i]);
            } else if ("-inFormat".equals(args[i])) {
                inputFormatClass = Class.forName(args[++i]).asSubclass(InputFormat.class);
            } else if ("-outFormat".equals(args[i])) {
                outputFormatClass = Class.forName(args[++i]).asSubclass(OutputFormat.class);
            } else if ("-outKey".equals(args[i])) {
                outputKeyClass = Class.forName(args[++i]).asSubclass(WritableComparable.class);
            } else if ("-outValue".equals(args[i])) {
                outputValueClass = Class.forName(args[++i]).asSubclass(Writable.class);
            } else if ("-totalOrder".equals(args[i])) {
                double pcnt = Double.parseDouble(args[++i]);
                int numSamples = Integer.parseInt(args[++i]);
                int maxSplits = Integer.parseInt(args[++i]);
                if (0 >= maxSplits)
                    maxSplits = Integer.MAX_VALUE;
                sampler = new InputSampler.RandomSampler<K, V>(pcnt, numSamples, maxSplits);
            } else {
                otherArgs.add(args[i]);
            }
        } catch (NumberFormatException except) {
            System.out.println("ERROR: Integer expected instead of " + args[i]);
            return printUsage();
        } catch (ArrayIndexOutOfBoundsException except) {
            System.out.println("ERROR: Required parameter missing from " + args[i - 1]);
            // exits
            return printUsage();
        }
    }
    // Set user-supplied (possibly default) job configs
    job = Job.getInstance(conf);
    job.setJobName("sorter");
    job.setJarByClass(Sort.class);
    job.setMapperClass(Mapper.class);
    job.setReducerClass(Reducer.class);
    job.setNumReduceTasks(num_reduces);
    job.setInputFormatClass(inputFormatClass);
    job.setOutputFormatClass(outputFormatClass);
    job.setOutputKeyClass(outputKeyClass);
    job.setOutputValueClass(outputValueClass);
    // Make sure there are exactly 2 parameters left.
    if (otherArgs.size() != 2) {
        System.out.println("ERROR: Wrong number of parameters: " + otherArgs.size() + " instead of 2.");
        return printUsage();
    }
    FileInputFormat.setInputPaths(job, otherArgs.get(0));
    FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(1)));
    if (sampler != null) {
        System.out.println("Sampling input to effect total-order sort...");
        job.setPartitionerClass(TotalOrderPartitioner.class);
        Path inputDir = FileInputFormat.getInputPaths(job)[0];
        FileSystem fs = inputDir.getFileSystem(conf);
        inputDir = inputDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
        Path partitionFile = new Path(inputDir, "_sortPartitioning");
        TotalOrderPartitioner.setPartitionFile(conf, partitionFile);
        InputSampler.<K, V>writePartitionFile(job, sampler);
        URI partitionUri = new URI(partitionFile.toString() + "#" + "_sortPartitioning");
        job.addCacheFile(partitionUri);
    }
    System.out.println("Running on " + cluster.getTaskTrackers() + " nodes to sort from " + FileInputFormat.getInputPaths(job)[0] + " into " + FileOutputFormat.getOutputPath(job) + " with " + num_reduces + " reduces.");
    Date startTime = new Date();
    System.out.println("Job started: " + startTime);
    int ret = job.waitForCompletion(true) ? 0 : 1;
    Date end_time = new Date();
    System.out.println("Job ended: " + end_time);
    System.out.println("The job took " + (end_time.getTime() - startTime.getTime()) / 1000 + " seconds.");
    return ret;
}
Also used : SequenceFileOutputFormat(org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat) InputSampler(org.apache.hadoop.mapreduce.lib.partition.InputSampler) Configuration(org.apache.hadoop.conf.Configuration) Writable(org.apache.hadoop.io.Writable) BytesWritable(org.apache.hadoop.io.BytesWritable) JobClient(org.apache.hadoop.mapred.JobClient) URI(java.net.URI) FileSystem(org.apache.hadoop.fs.FileSystem) Path(org.apache.hadoop.fs.Path) SequenceFileInputFormat(org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat) FileOutputFormat(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) SequenceFileOutputFormat(org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat) BytesWritable(org.apache.hadoop.io.BytesWritable) ClusterStatus(org.apache.hadoop.mapred.ClusterStatus)

Example 12 with ClusterStatus

use of org.apache.hadoop.mapred.ClusterStatus in project hadoop by apache.

the class StressJobFactory method update.

/**
   * STRESS Once you get the notification from StatsCollector.Collect the
   * clustermetrics. Update current loadStatus with new load status of JT.
   *
   * @param item
   */
@Override
public void update(Statistics.ClusterStats item) {
    ClusterStatus clusterStatus = item.getStatus();
    try {
        // update the max cluster map/reduce task capacity
        loadStatus.updateMapCapacity(clusterStatus.getMaxMapTasks());
        loadStatus.updateReduceCapacity(clusterStatus.getMaxReduceTasks());
        int numTrackers = clusterStatus.getTaskTrackers();
        int jobLoad = (int) (maxJobTrackerRatio * numTrackers) - item.getNumRunningJob();
        loadStatus.updateJobLoad(jobLoad);
    } catch (Exception e) {
        LOG.error("Couldn't get the new Status", e);
    }
}
Also used : ClusterStatus(org.apache.hadoop.mapred.ClusterStatus) IOException(java.io.IOException)

Example 13 with ClusterStatus

use of org.apache.hadoop.mapred.ClusterStatus in project incubator-systemml by apache.

the class InfrastructureAnalyzer method analyzeHadoopCluster.

/**
	 * Analyzes properties of hadoop cluster and configuration.
	 */
private static void analyzeHadoopCluster() {
    try {
        JobConf job = ConfigurationManager.getCachedJobConf();
        JobClient client = new JobClient(job);
        ClusterStatus stat = client.getClusterStatus();
        if (//if in cluster mode
        stat != null) {
            //analyze cluster status
            _remotePar = stat.getTaskTrackers();
            _remoteParMap = stat.getMaxMapTasks();
            _remoteParReduce = stat.getMaxReduceTasks();
            //analyze pure configuration properties
            analyzeHadoopConfiguration();
        }
    } catch (IOException e) {
        throw new RuntimeException("Unable to analyze infrastructure.", e);
    }
}
Also used : IOException(java.io.IOException) JobConf(org.apache.hadoop.mapred.JobConf) JobClient(org.apache.hadoop.mapred.JobClient) ClusterStatus(org.apache.hadoop.mapred.ClusterStatus)

Aggregations

ClusterStatus (org.apache.hadoop.mapred.ClusterStatus)13 JobClient (org.apache.hadoop.mapred.JobClient)11 Configuration (org.apache.hadoop.conf.Configuration)7 Path (org.apache.hadoop.fs.Path)7 Date (java.util.Date)6 IOException (java.io.IOException)4 JobConf (org.apache.hadoop.mapred.JobConf)4 FileOutputFormat (org.apache.hadoop.mapreduce.lib.output.FileOutputFormat)4 SequenceFileOutputFormat (org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat)4 ArrayList (java.util.ArrayList)3 BytesWritable (org.apache.hadoop.io.BytesWritable)2 Writable (org.apache.hadoop.io.Writable)2 SequenceFileInputFormat (org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat)2 URI (java.net.URI)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)1 AuthorizationException (org.apache.hadoop.hive.ql.metadata.AuthorizationException)1 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)1 Job (org.apache.hadoop.mapreduce.Job)1 OutputFormat (org.apache.hadoop.mapreduce.OutputFormat)1