use of org.apache.hadoop.mapred.ClusterStatus in project hadoop by apache.
the class Sort method run.
/**
* The main driver for sort program.
* Invoke this method to submit the map/reduce job.
* @throws IOException When there is communication problems with the
* job tracker.
*/
public int run(String[] args) throws Exception {
Configuration conf = getConf();
JobClient client = new JobClient(conf);
ClusterStatus cluster = client.getClusterStatus();
int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
String sort_reduces = conf.get(REDUCES_PER_HOST);
if (sort_reduces != null) {
num_reduces = cluster.getTaskTrackers() * Integer.parseInt(sort_reduces);
}
Class<? extends InputFormat> inputFormatClass = SequenceFileInputFormat.class;
Class<? extends OutputFormat> outputFormatClass = SequenceFileOutputFormat.class;
Class<? extends WritableComparable> outputKeyClass = BytesWritable.class;
Class<? extends Writable> outputValueClass = BytesWritable.class;
List<String> otherArgs = new ArrayList<String>();
InputSampler.Sampler<K, V> sampler = null;
for (int i = 0; i < args.length; ++i) {
try {
if ("-r".equals(args[i])) {
num_reduces = Integer.parseInt(args[++i]);
} else if ("-inFormat".equals(args[i])) {
inputFormatClass = Class.forName(args[++i]).asSubclass(InputFormat.class);
} else if ("-outFormat".equals(args[i])) {
outputFormatClass = Class.forName(args[++i]).asSubclass(OutputFormat.class);
} else if ("-outKey".equals(args[i])) {
outputKeyClass = Class.forName(args[++i]).asSubclass(WritableComparable.class);
} else if ("-outValue".equals(args[i])) {
outputValueClass = Class.forName(args[++i]).asSubclass(Writable.class);
} else if ("-totalOrder".equals(args[i])) {
double pcnt = Double.parseDouble(args[++i]);
int numSamples = Integer.parseInt(args[++i]);
int maxSplits = Integer.parseInt(args[++i]);
if (0 >= maxSplits)
maxSplits = Integer.MAX_VALUE;
sampler = new InputSampler.RandomSampler<K, V>(pcnt, numSamples, maxSplits);
} else {
otherArgs.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " + args[i - 1]);
// exits
return printUsage();
}
}
// Set user-supplied (possibly default) job configs
job = Job.getInstance(conf);
job.setJobName("sorter");
job.setJarByClass(Sort.class);
job.setMapperClass(Mapper.class);
job.setReducerClass(Reducer.class);
job.setNumReduceTasks(num_reduces);
job.setInputFormatClass(inputFormatClass);
job.setOutputFormatClass(outputFormatClass);
job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass);
// Make sure there are exactly 2 parameters left.
if (otherArgs.size() != 2) {
System.out.println("ERROR: Wrong number of parameters: " + otherArgs.size() + " instead of 2.");
return printUsage();
}
FileInputFormat.setInputPaths(job, otherArgs.get(0));
FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(1)));
if (sampler != null) {
System.out.println("Sampling input to effect total-order sort...");
job.setPartitionerClass(TotalOrderPartitioner.class);
Path inputDir = FileInputFormat.getInputPaths(job)[0];
FileSystem fs = inputDir.getFileSystem(conf);
inputDir = inputDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
Path partitionFile = new Path(inputDir, "_sortPartitioning");
TotalOrderPartitioner.setPartitionFile(conf, partitionFile);
InputSampler.<K, V>writePartitionFile(job, sampler);
URI partitionUri = new URI(partitionFile.toString() + "#" + "_sortPartitioning");
job.addCacheFile(partitionUri);
}
System.out.println("Running on " + cluster.getTaskTrackers() + " nodes to sort from " + FileInputFormat.getInputPaths(job)[0] + " into " + FileOutputFormat.getOutputPath(job) + " with " + num_reduces + " reduces.");
Date startTime = new Date();
System.out.println("Job started: " + startTime);
int ret = job.waitForCompletion(true) ? 0 : 1;
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
System.out.println("The job took " + (end_time.getTime() - startTime.getTime()) / 1000 + " seconds.");
return ret;
}
use of org.apache.hadoop.mapred.ClusterStatus in project hadoop by apache.
the class StressJobFactory method update.
/**
* STRESS Once you get the notification from StatsCollector.Collect the
* clustermetrics. Update current loadStatus with new load status of JT.
*
* @param item
*/
@Override
public void update(Statistics.ClusterStats item) {
ClusterStatus clusterStatus = item.getStatus();
try {
// update the max cluster map/reduce task capacity
loadStatus.updateMapCapacity(clusterStatus.getMaxMapTasks());
loadStatus.updateReduceCapacity(clusterStatus.getMaxReduceTasks());
int numTrackers = clusterStatus.getTaskTrackers();
int jobLoad = (int) (maxJobTrackerRatio * numTrackers) - item.getNumRunningJob();
loadStatus.updateJobLoad(jobLoad);
} catch (Exception e) {
LOG.error("Couldn't get the new Status", e);
}
}
use of org.apache.hadoop.mapred.ClusterStatus in project incubator-systemml by apache.
the class InfrastructureAnalyzer method analyzeHadoopCluster.
/**
* Analyzes properties of hadoop cluster and configuration.
*/
private static void analyzeHadoopCluster() {
try {
JobConf job = ConfigurationManager.getCachedJobConf();
JobClient client = new JobClient(job);
ClusterStatus stat = client.getClusterStatus();
if (//if in cluster mode
stat != null) {
//analyze cluster status
_remotePar = stat.getTaskTrackers();
_remoteParMap = stat.getMaxMapTasks();
_remoteParReduce = stat.getMaxReduceTasks();
//analyze pure configuration properties
analyzeHadoopConfiguration();
}
} catch (IOException e) {
throw new RuntimeException("Unable to analyze infrastructure.", e);
}
}
Aggregations