use of org.apache.hadoop.io.WritableComparable in project hadoop by apache.
the class TestJoinProperties method testFormat.
@SuppressWarnings("unchecked")
public int testFormat(Configuration conf, int tupleSize, boolean firstTuple, boolean secondTuple, TestType ttype) throws Exception {
Job job = Job.getInstance(conf);
CompositeInputFormat format = new CompositeInputFormat();
int count = 0;
for (InputSplit split : (List<InputSplit>) format.getSplits(job)) {
TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(conf);
RecordReader reader = format.createRecordReader(split, context);
MapContext mcontext = new MapContextImpl(conf, context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
WritableComparable key = null;
Writable value = null;
while (reader.nextKeyValue()) {
key = (WritableComparable) reader.getCurrentKey();
value = (Writable) reader.getCurrentValue();
validateKeyValue(key, value, tupleSize, firstTuple, secondTuple, ttype);
count++;
}
}
return count;
}
use of org.apache.hadoop.io.WritableComparable in project hadoop by apache.
the class MapFileOutputFormat method getRecordWriter.
public RecordWriter<WritableComparable<?>, Writable> getRecordWriter(TaskAttemptContext context) throws IOException {
Configuration conf = context.getConfiguration();
CompressionCodec codec = null;
CompressionType compressionType = CompressionType.NONE;
if (getCompressOutput(context)) {
// find the kind of compression to do
compressionType = SequenceFileOutputFormat.getOutputCompressionType(context);
// find the right codec
Class<?> codecClass = getOutputCompressorClass(context, DefaultCodec.class);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
}
Path file = getDefaultWorkFile(context, "");
FileSystem fs = file.getFileSystem(conf);
// ignore the progress parameter, since MapFile is local
final MapFile.Writer out = new MapFile.Writer(conf, fs, file.toString(), context.getOutputKeyClass().asSubclass(WritableComparable.class), context.getOutputValueClass().asSubclass(Writable.class), compressionType, codec, context);
return new RecordWriter<WritableComparable<?>, Writable>() {
public void write(WritableComparable<?> key, Writable value) throws IOException {
out.append(key, value);
}
public void close(TaskAttemptContext context) throws IOException {
out.close();
}
};
}
use of org.apache.hadoop.io.WritableComparable in project Plume by tdunning.
the class MSCRReducer method reduce.
@SuppressWarnings("unchecked")
protected void reduce(final PlumeObject arg0, java.lang.Iterable<PlumeObject> values, Reducer<PlumeObject, PlumeObject, NullWritable, NullWritable>.Context<PlumeObject, PlumeObject, NullWritable, NullWritable> arg2) throws IOException, InterruptedException {
PCollection col = mscr.getChannelByNumber().get(arg0.sourceId);
OutputChannel oC = mscr.getOutputChannels().get(col);
if (oC.reducer != null) {
// apply reducer
ParallelDo pDo = oC.reducer;
// TODO how to check / report this
DoFn reducer = pDo.getFunction();
List<WritableComparable> vals = Lists.newArrayList();
for (PlumeObject val : values) {
vals.add(val.obj);
}
reducer.process(Pair.create(arg0.obj, vals), new EmitFn() {
@Override
public void emit(Object v) {
try {
if (v instanceof Pair) {
Pair p = (Pair) v;
mos.write(arg0.sourceId + "", p.getKey(), p.getValue());
} else {
mos.write(arg0.sourceId + "", NullWritable.get(), (WritableComparable) v);
}
} catch (Exception e) {
// TODO How to report this
e.printStackTrace();
}
}
});
} else {
// direct writing - write all key, value pairs
for (PlumeObject val : values) {
if (oC.output instanceof PTable) {
mos.write(arg0.sourceId + "", arg0.obj, val.obj);
} else {
mos.write(arg0.sourceId + "", NullWritable.get(), val.obj);
}
}
}
}
use of org.apache.hadoop.io.WritableComparable in project accumulo by apache.
the class MultiReader method seek.
public synchronized boolean seek(WritableComparable key) throws IOException {
PriorityBuffer reheap = new PriorityBuffer(heap.size());
boolean result = false;
for (Object obj : heap) {
Index index = (Index) obj;
try {
WritableComparable found = index.reader.getClosest(key, index.value, true);
if (found != null && found.equals(key)) {
result = true;
}
} catch (EOFException ex) {
// thrown if key is beyond all data in the map
}
index.cached = false;
reheap.add(index);
}
heap = reheap;
return result;
}
use of org.apache.hadoop.io.WritableComparable in project Plume by tdunning.
the class MSCRCombiner method reduce.
@SuppressWarnings("unchecked")
protected void reduce(final PlumeObject arg0, java.lang.Iterable<PlumeObject> values, Reducer<PlumeObject, PlumeObject, PlumeObject, PlumeObject>.Context context) throws IOException, InterruptedException {
PCollection col = mscr.getChannelByNumber().get(arg0.sourceId);
OutputChannel oC = mscr.getOutputChannels().get(col);
if (oC.combiner != null) {
// Apply combiner function for this channel
List<WritableComparable> vals = Lists.newArrayList();
for (PlumeObject val : values) {
vals.add(val.obj);
}
WritableComparable result = (WritableComparable) oC.combiner.getCombiner().combine(vals);
context.write(arg0, new PlumeObject(result, arg0.sourceId));
} else {
// direct writing - write all key, value pairs
for (PlumeObject val : values) {
context.write(arg0, val);
}
}
}
Aggregations