use of org.apache.hadoop.io.WritableComparable in project hive by apache.
the class CommonMergeJoinOperator method compareKeysMany.
@SuppressWarnings("rawtypes")
private int compareKeysMany(WritableComparator[] comparators, final List<Object> k1, final List<Object> k2) {
// invariant: k1.size == k2.size
int ret = 0;
final int size = k1.size();
for (int i = 0; i < size; i++) {
WritableComparable key_1 = (WritableComparable) k1.get(i);
WritableComparable key_2 = (WritableComparable) k2.get(i);
ret = compareKey(comparators, i, key_1, key_2, nullsafes != null ? nullsafes[i] : false);
if (ret != 0) {
return ret;
}
}
return ret;
}
use of org.apache.hadoop.io.WritableComparable in project hive by apache.
the class TypedBytesWritableInput method readSortedMap.
public SortedMapWritable readSortedMap(SortedMapWritable mw) throws IOException {
if (mw == null) {
mw = new SortedMapWritable();
}
int length = in.readMapHeader();
for (int i = 0; i < length; i++) {
WritableComparable key = (WritableComparable) read();
Writable value = read();
mw.put(key, value);
}
return mw;
}
use of org.apache.hadoop.io.WritableComparable in project hadoop-book by elephantscale.
the class Sort method run.
/**
* The main driver for sort program.
* Invoke this method to submit the map/reduce job.
* @throws IOException When there is communication problems with the
* job tracker.
*/
public int run(String[] args) throws Exception {
JobConf jobConf = new JobConf(getConf(), Sort.class);
jobConf.setJobName("sorter");
jobConf.setMapperClass(IdentityMapper.class);
jobConf.setReducerClass(IdentityReducer.class);
JobClient client = new JobClient(jobConf);
ClusterStatus cluster = client.getClusterStatus();
int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
String sort_reduces = jobConf.get("test.sort.reduces_per_host");
if (sort_reduces != null) {
num_reduces = cluster.getTaskTrackers() * Integer.parseInt(sort_reduces);
}
Class<? extends InputFormat> inputFormatClass = SequenceFileInputFormat.class;
Class<? extends OutputFormat> outputFormatClass = SequenceFileOutputFormat.class;
Class<? extends WritableComparable> outputKeyClass = BytesWritable.class;
Class<? extends Writable> outputValueClass = BytesWritable.class;
List<String> otherArgs = new ArrayList<String>();
InputSampler.Sampler<K, V> sampler = null;
for (int i = 0; i < args.length; ++i) {
try {
if ("-m".equals(args[i])) {
jobConf.setNumMapTasks(Integer.parseInt(args[++i]));
} else if ("-r".equals(args[i])) {
num_reduces = Integer.parseInt(args[++i]);
} else if ("-inFormat".equals(args[i])) {
inputFormatClass = Class.forName(args[++i]).asSubclass(InputFormat.class);
} else if ("-outFormat".equals(args[i])) {
outputFormatClass = Class.forName(args[++i]).asSubclass(OutputFormat.class);
} else if ("-outKey".equals(args[i])) {
outputKeyClass = Class.forName(args[++i]).asSubclass(WritableComparable.class);
} else if ("-outValue".equals(args[i])) {
outputValueClass = Class.forName(args[++i]).asSubclass(Writable.class);
} else if ("-totalOrder".equals(args[i])) {
double pcnt = Double.parseDouble(args[++i]);
int numSamples = Integer.parseInt(args[++i]);
int maxSplits = Integer.parseInt(args[++i]);
if (0 >= maxSplits)
maxSplits = Integer.MAX_VALUE;
sampler = new InputSampler.RandomSampler<K, V>(pcnt, numSamples, maxSplits);
} else {
otherArgs.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " + args[i - 1]);
// exits
return printUsage();
}
}
// Set user-supplied (possibly default) job configs
jobConf.setNumReduceTasks(num_reduces);
jobConf.setInputFormat(inputFormatClass);
jobConf.setOutputFormat(outputFormatClass);
jobConf.setOutputKeyClass(outputKeyClass);
jobConf.setOutputValueClass(outputValueClass);
// Make sure there are exactly 2 parameters left.
if (otherArgs.size() != 2) {
System.out.println("ERROR: Wrong number of parameters: " + otherArgs.size() + " instead of 2.");
return printUsage();
}
FileInputFormat.setInputPaths(jobConf, otherArgs.get(0));
FileOutputFormat.setOutputPath(jobConf, new Path(otherArgs.get(1)));
if (sampler != null) {
System.out.println("Sampling input to effect total-order sort...");
jobConf.setPartitionerClass(TotalOrderPartitioner.class);
Path inputDir = FileInputFormat.getInputPaths(jobConf)[0];
inputDir = inputDir.makeQualified(inputDir.getFileSystem(jobConf));
Path partitionFile = new Path(inputDir, "_sortPartitioning");
TotalOrderPartitioner.setPartitionFile(jobConf, partitionFile);
InputSampler.<K, V>writePartitionFile(jobConf, sampler);
URI partitionUri = new URI(partitionFile.toString() + "#" + "_sortPartitioning");
DistributedCache.addCacheFile(partitionUri, jobConf);
DistributedCache.createSymlink(jobConf);
}
System.out.println("Running on " + cluster.getTaskTrackers() + " nodes to sort from " + FileInputFormat.getInputPaths(jobConf)[0] + " into " + FileOutputFormat.getOutputPath(jobConf) + " with " + num_reduces + " reduces.");
Date startTime = new Date();
System.out.println("Job started: " + startTime);
jobResult = JobClient.runJob(jobConf);
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
System.out.println("The job took " + (end_time.getTime() - startTime.getTime()) / 1000 + " seconds.");
return 0;
}
use of org.apache.hadoop.io.WritableComparable in project hadoop-book by elephantscale.
the class Join method run.
/**
* The main driver for sort program. Invoke this method to submit the
* map/reduce job.
*
* @throws IOException When there is communication problems with the job
* tracker.
*/
@Override
public int run(String[] args) throws Exception {
JobConf jobConf = new JobConf(getConf(), Sort.class);
jobConf.setJobName("join");
jobConf.setMapperClass(IdentityMapper.class);
jobConf.setReducerClass(IdentityReducer.class);
JobClient client = new JobClient(jobConf);
ClusterStatus cluster = client.getClusterStatus();
int num_maps = cluster.getTaskTrackers() * jobConf.getInt("test.sort.maps_per_host", 10);
int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
String sort_reduces = jobConf.get("test.sort.reduces_per_host");
if (sort_reduces != null) {
num_reduces = cluster.getTaskTrackers() * Integer.parseInt(sort_reduces);
}
Class<? extends InputFormat> inputFormatClass = SequenceFileInputFormat.class;
Class<? extends OutputFormat> outputFormatClass = SequenceFileOutputFormat.class;
Class<? extends WritableComparable> outputKeyClass = BytesWritable.class;
Class<? extends Writable> outputValueClass = TupleWritable.class;
String op = "inner";
List<String> otherArgs = new ArrayList<String>();
for (int i = 0; i < args.length; ++i) {
try {
if ("-m".equals(args[i])) {
num_maps = Integer.parseInt(args[++i]);
} else if ("-r".equals(args[i])) {
num_reduces = Integer.parseInt(args[++i]);
} else if ("-inFormat".equals(args[i])) {
inputFormatClass = Class.forName(args[++i]).asSubclass(InputFormat.class);
} else if ("-outFormat".equals(args[i])) {
outputFormatClass = Class.forName(args[++i]).asSubclass(OutputFormat.class);
} else if ("-outKey".equals(args[i])) {
outputKeyClass = Class.forName(args[++i]).asSubclass(WritableComparable.class);
} else if ("-outValue".equals(args[i])) {
outputValueClass = Class.forName(args[++i]).asSubclass(Writable.class);
} else if ("-joinOp".equals(args[i])) {
op = args[++i];
} else {
otherArgs.add(args[i]);
}
} catch (NumberFormatException except) {
System.out.println("ERROR: Integer expected instead of " + args[i]);
return printUsage();
} catch (ArrayIndexOutOfBoundsException except) {
System.out.println("ERROR: Required parameter missing from " + args[i - 1]);
// exits
return printUsage();
}
}
// Set user-supplied (possibly default) job configs
jobConf.setNumMapTasks(num_maps);
jobConf.setNumReduceTasks(num_reduces);
if (otherArgs.size() < 2) {
System.out.println("ERROR: Wrong number of parameters: ");
return printUsage();
}
FileOutputFormat.setOutputPath(jobConf, new Path(otherArgs.remove(otherArgs.size() - 1)));
List<Path> plist = new ArrayList<Path>(otherArgs.size());
for (String s : otherArgs) {
plist.add(new Path(s));
}
jobConf.setInputFormat(CompositeInputFormat.class);
jobConf.set("mapred.join.expr", CompositeInputFormat.compose(op, inputFormatClass, plist.toArray(new Path[0])));
jobConf.setOutputFormat(outputFormatClass);
jobConf.setOutputKeyClass(outputKeyClass);
jobConf.setOutputValueClass(outputValueClass);
Date startTime = new Date();
System.out.println("Job started: " + startTime);
JobClient.runJob(jobConf);
Date end_time = new Date();
System.out.println("Job ended: " + end_time);
System.out.println("The job took " + (end_time.getTime() - startTime.getTime()) / 1000 + " seconds.");
return 0;
}
use of org.apache.hadoop.io.WritableComparable in project SQLWindowing by hbutani.
the class CompositeDataType method define.
public static CompositeDataType define(StructObjectInspector OI) throws WindowingException {
List<? extends StructField> fields = OI.getAllStructFieldRefs();
@SuppressWarnings("unchecked") DataType<? extends WritableComparable>[] elementTypes = (DataType<? extends WritableComparable>[]) new DataType[fields.size()];
int i = 0;
for (StructField f : fields) {
ObjectInspector fOI = f.getFieldObjectInspector();
if (fOI.getCategory() != Category.PRIMITIVE) {
throw new WindowingException("Cannot handle non primitve fields for partitioning/sorting");
}
PrimitiveObjectInspector pOI = (PrimitiveObjectInspector) fOI;
switch(pOI.getPrimitiveCategory()) {
case BOOLEAN:
elementTypes[i] = BOOLEAN;
break;
case DOUBLE:
elementTypes[i] = DOUBLE;
break;
case BYTE:
elementTypes[i] = BYTE;
break;
case FLOAT:
elementTypes[i] = FLOAT;
break;
case INT:
elementTypes[i] = INT;
break;
case LONG:
elementTypes[i] = LONG;
break;
case SHORT:
elementTypes[i] = SHORT;
break;
case STRING:
elementTypes[i] = TEXT;
break;
default:
throw new WindowingException(Utils.sprintf("Cannot handle datatype %s for partitioning/sorting", pOI.toString()));
}
i++;
}
return new CompositeDataType(",", elementTypes);
}
Aggregations