use of org.apache.hadoop.io.WritableComparable in project SQLWindowing by hbutani.
the class CompositeDataType method readFields.
@SuppressWarnings("unchecked")
public void readFields(String s) throws IOException {
ArrayList<DataType<? extends WritableComparable>> types = new ArrayList<DataType<? extends WritableComparable>>();
String elemSep = s.substring(1, 2);
s = s.substring(2, s.length() - 1);
FieldIterator it = new FieldIterator(s, elemSep, TYPE_BEGIN, TYPE_END);
fieldSep = new Text(it.next());
while (it.hasNext()) {
String stype = it.next();
if (nameToTypeMap.containsKey(stype))
types.add(nameToTypeMap.get(stype));
else {
CompositeDataType ctype = new CompositeDataType();
ctype.readFields(stype);
types.add(ctype);
}
}
elementTypes = types.toArray((DataType<? extends WritableComparable>[]) Array.newInstance(DataType.class, types.size()));
}
use of org.apache.hadoop.io.WritableComparable in project Plume by tdunning.
the class MSCRCombiner method reduce.
@SuppressWarnings("unchecked")
protected void reduce(final PlumeObject arg0, java.lang.Iterable<PlumeObject> values, Reducer<PlumeObject, PlumeObject, PlumeObject, PlumeObject>.Context<PlumeObject, PlumeObject, PlumeObject, PlumeObject> context) throws IOException, InterruptedException {
PCollection col = mscr.getChannelByNumber().get(arg0.sourceId);
OutputChannel oC = mscr.getOutputChannels().get(col);
if (oC.combiner != null) {
// Apply combiner function for this channel
List<WritableComparable> vals = Lists.newArrayList();
for (PlumeObject val : values) {
vals.add(val.obj);
}
WritableComparable result = (WritableComparable) oC.combiner.getCombiner().combine(vals);
context.write(arg0, new PlumeObject(result, arg0.sourceId));
} else {
// direct writing - write all key, value pairs
for (PlumeObject val : values) {
context.write(arg0, val);
}
}
}
use of org.apache.hadoop.io.WritableComparable in project hadoop by apache.
the class MapFileOutputFormat method getRecordWriter.
public RecordWriter<WritableComparable, Writable> getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException {
// get the path of the temporary output file
Path file = FileOutputFormat.getTaskOutputPath(job, name);
FileSystem fs = file.getFileSystem(job);
CompressionCodec codec = null;
CompressionType compressionType = CompressionType.NONE;
if (getCompressOutput(job)) {
// find the kind of compression to do
compressionType = SequenceFileOutputFormat.getOutputCompressionType(job);
// find the right codec
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, DefaultCodec.class);
codec = ReflectionUtils.newInstance(codecClass, job);
}
// ignore the progress parameter, since MapFile is local
final MapFile.Writer out = new MapFile.Writer(job, fs, file.toString(), job.getOutputKeyClass().asSubclass(WritableComparable.class), job.getOutputValueClass().asSubclass(Writable.class), compressionType, codec, progress);
return new RecordWriter<WritableComparable, Writable>() {
public void write(WritableComparable key, Writable value) throws IOException {
out.append(key, value);
}
public void close(Reporter reporter) throws IOException {
out.close();
}
};
}
use of org.apache.hadoop.io.WritableComparable in project hadoop by apache.
the class TestPipeApplication method testApplication.
/**
* test org.apache.hadoop.mapred.pipes.Application
* test a internal functions: MessageType.REGISTER_COUNTER, INCREMENT_COUNTER, STATUS, PROGRESS...
*
* @throws Throwable
*/
@Test
public void testApplication() throws Throwable {
JobConf conf = new JobConf();
RecordReader<FloatWritable, NullWritable> rReader = new Reader();
// client for test
File fCommand = getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationStub");
TestTaskReporter reporter = new TestTaskReporter();
File[] psw = cleanTokenPasswordFile();
try {
conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);
conf.set(MRJobConfig.CACHE_LOCALFILES, fCommand.getAbsolutePath());
// token for authorization
Token<AMRMTokenIdentifier> token = new Token<AMRMTokenIdentifier>("user".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"));
TokenCache.setJobToken(token, conf.getCredentials());
FakeCollector output = new FakeCollector(new Counters.Counter(), new Progress());
FileSystem fs = new RawLocalFileSystem();
fs.initialize(FsConstants.LOCAL_FS_URI, conf);
Writer<IntWritable, Text> wr = new Writer<IntWritable, Text>(conf, fs.create(new Path(workSpace.getAbsolutePath() + File.separator + "outfile")), IntWritable.class, Text.class, null, null, true);
output.setWriter(wr);
conf.set(Submitter.PRESERVE_COMMANDFILE, "true");
initStdOut(conf);
Application<WritableComparable<IntWritable>, Writable, IntWritable, Text> application = new Application<WritableComparable<IntWritable>, Writable, IntWritable, Text>(conf, rReader, output, reporter, IntWritable.class, Text.class);
application.getDownlink().flush();
application.getDownlink().mapItem(new IntWritable(3), new Text("txt"));
application.getDownlink().flush();
application.waitForFinish();
wr.close();
// test getDownlink().mapItem();
String stdOut = readStdOut(conf);
assertTrue(stdOut.contains("key:3"));
assertTrue(stdOut.contains("value:txt"));
// reporter test counter, and status should be sended
// test MessageType.REGISTER_COUNTER and INCREMENT_COUNTER
assertEquals(1.0, reporter.getProgress(), 0.01);
assertNotNull(reporter.getCounter("group", "name"));
// test status MessageType.STATUS
assertEquals(reporter.getStatus(), "PROGRESS");
stdOut = readFile(new File(workSpace.getAbsolutePath() + File.separator + "outfile"));
// check MessageType.PROGRESS
assertEquals(0.55f, rReader.getProgress(), 0.001);
application.getDownlink().close();
// test MessageType.OUTPUT
Entry<IntWritable, Text> entry = output.getCollect().entrySet().iterator().next();
assertEquals(123, entry.getKey().get());
assertEquals("value", entry.getValue().toString());
try {
// try to abort
application.abort(new Throwable());
fail();
} catch (IOException e) {
// abort works ?
assertEquals("pipe child exception", e.getMessage());
}
} finally {
if (psw != null) {
// remove password files
for (File file : psw) {
file.deleteOnExit();
}
}
}
}
use of org.apache.hadoop.io.WritableComparable in project hive by apache.
the class HiveInputFormat method getInputFormatFromCache.
/**
* NOTE: this no longer wraps the IF for LLAP. Call wrapForLlap manually if needed.
*/
public static InputFormat<WritableComparable, Writable> getInputFormatFromCache(Class inputFormatClass, JobConf job) throws IOException {
InputFormat<WritableComparable, Writable> instance = inputFormats.get(inputFormatClass);
if (instance == null) {
try {
instance = (InputFormat<WritableComparable, Writable>) ReflectionUtil.newInstance(inputFormatClass, job);
// HBase input formats are not thread safe today. See HIVE-8808.
String inputFormatName = inputFormatClass.getName().toLowerCase();
if (!inputFormatName.contains("hbase")) {
inputFormats.put(inputFormatClass, instance);
}
} catch (Exception e) {
throw new IOException("Cannot create an instance of InputFormat class " + inputFormatClass.getName() + " as specified in mapredWork!", e);
}
}
return instance;
}
Aggregations