Search in sources :

Example 11 with WritableComparable

use of org.apache.hadoop.io.WritableComparable in project Plume by tdunning.

the class MSCRReducer method reduce.

@SuppressWarnings("unchecked")
protected void reduce(final PlumeObject arg0, java.lang.Iterable<PlumeObject> values, Reducer<PlumeObject, PlumeObject, NullWritable, NullWritable>.Context arg2) throws IOException, InterruptedException {
    PCollection col = mscr.getChannelByNumber().get(arg0.sourceId);
    OutputChannel oC = mscr.getOutputChannels().get(col);
    if (oC.reducer != null) {
        // apply reducer
        ParallelDo pDo = oC.reducer;
        // TODO how to check / report this
        DoFn reducer = pDo.getFunction();
        List<WritableComparable> vals = Lists.newArrayList();
        for (PlumeObject val : values) {
            vals.add(val.obj);
        }
        reducer.process(Pair.create(arg0.obj, vals), new EmitFn() {

            @Override
            public void emit(Object v) {
                try {
                    if (v instanceof Pair) {
                        Pair p = (Pair) v;
                        mos.write(arg0.sourceId + "", p.getKey(), p.getValue());
                    } else {
                        mos.write(arg0.sourceId + "", NullWritable.get(), (WritableComparable) v);
                    }
                } catch (Exception e) {
                    // TODO How to report this
                    e.printStackTrace();
                }
            }
        });
    } else {
        // direct writing - write all key, value pairs
        for (PlumeObject val : values) {
            if (oC.output instanceof PTable) {
                mos.write(arg0.sourceId + "", arg0.obj, val.obj);
            } else {
                mos.write(arg0.sourceId + "", NullWritable.get(), val.obj);
            }
        }
    }
}
Also used : ParallelDo(com.tdunning.plume.local.lazy.op.ParallelDo) PlumeObject(com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject) IOException(java.io.IOException) PTable(com.tdunning.plume.PTable) PCollection(com.tdunning.plume.PCollection) DoFn(com.tdunning.plume.DoFn) EmitFn(com.tdunning.plume.EmitFn) WritableComparable(org.apache.hadoop.io.WritableComparable) OutputChannel(com.tdunning.plume.local.lazy.MSCR.OutputChannel) PlumeObject(com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject) Pair(com.tdunning.plume.Pair)

Example 12 with WritableComparable

use of org.apache.hadoop.io.WritableComparable in project hive by apache.

the class HCatOutputFormatWriter method write.

@Override
public void write(Iterator<HCatRecord> recordItr) throws HCatException {
    int id = sp.getId();
    setVarsInConf(id);
    HCatOutputFormat outFormat = new HCatOutputFormat();
    TaskAttemptContext cntxt = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(conf, new TaskAttemptID(ShimLoader.getHadoopShims().getHCatShim().createTaskID(), id));
    OutputCommitter committer = null;
    RecordWriter<WritableComparable<?>, HCatRecord> writer;
    try {
        committer = outFormat.getOutputCommitter(cntxt);
        committer.setupTask(cntxt);
        writer = outFormat.getRecordWriter(cntxt);
        while (recordItr.hasNext()) {
            HCatRecord rec = recordItr.next();
            writer.write(null, rec);
        }
        writer.close(cntxt);
        if (committer.needsTaskCommit(cntxt)) {
            committer.commitTask(cntxt);
        }
    } catch (IOException e) {
        if (null != committer) {
            try {
                committer.abortTask(cntxt);
            } catch (IOException e1) {
                throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
            }
        }
        throw new HCatException("Failed while writing", e);
    } catch (InterruptedException e) {
        if (null != committer) {
            try {
                committer.abortTask(cntxt);
            } catch (IOException e1) {
                throw new HCatException(ErrorType.ERROR_INTERNAL_EXCEPTION, e1);
            }
        }
        throw new HCatException("Failed while writing", e);
    }
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) WritableComparable(org.apache.hadoop.io.WritableComparable) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) HCatException(org.apache.hive.hcatalog.common.HCatException) HCatOutputFormat(org.apache.hive.hcatalog.mapreduce.HCatOutputFormat) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) IOException(java.io.IOException) HCatRecord(org.apache.hive.hcatalog.data.HCatRecord)

Example 13 with WritableComparable

use of org.apache.hadoop.io.WritableComparable in project hive by apache.

the class HCatInputFormatReader method read.

@Override
public Iterator<HCatRecord> read() throws HCatException {
    HCatInputFormat inpFmt = new HCatInputFormat();
    RecordReader<WritableComparable, HCatRecord> rr;
    try {
        TaskAttemptContext cntxt = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(conf, new TaskAttemptID());
        rr = inpFmt.createRecordReader(split, cntxt);
        rr.initialize(split, cntxt);
    } catch (IOException e) {
        throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
        throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    }
    return new HCatRecordItr(rr);
}
Also used : WritableComparable(org.apache.hadoop.io.WritableComparable) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) HCatException(org.apache.hive.hcatalog.common.HCatException) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) IOException(java.io.IOException) HCatInputFormat(org.apache.hive.hcatalog.mapreduce.HCatInputFormat) HCatRecord(org.apache.hive.hcatalog.data.HCatRecord)

Example 14 with WritableComparable

use of org.apache.hadoop.io.WritableComparable in project hive by apache.

the class SMBMapJoinOperator method compareKeys.

private int compareKeys(List<Object> k1, List<Object> k2) {
    int ret = 0;
    // join keys have difference sizes?
    ret = k1.size() - k2.size();
    if (ret != 0) {
        return ret;
    }
    for (int i = 0; i < k1.size(); i++) {
        WritableComparable key_1 = (WritableComparable) k1.get(i);
        WritableComparable key_2 = (WritableComparable) k2.get(i);
        if (key_1 == null && key_2 == null) {
            // just return k1 is smaller than k2
            return nullsafes != null && nullsafes[i] ? 0 : -1;
        } else if (key_1 == null) {
            return -1;
        } else if (key_2 == null) {
            return 1;
        }
        ret = WritableComparator.get(key_1.getClass()).compare(key_1, key_2);
        if (ret != 0) {
            return ret;
        }
    }
    return ret;
}
Also used : WritableComparable(org.apache.hadoop.io.WritableComparable)

Example 15 with WritableComparable

use of org.apache.hadoop.io.WritableComparable in project hive by apache.

the class TestE2EScenarios method copyTable.

private void copyTable(String in, String out) throws IOException, InterruptedException {
    Job ijob = new Job();
    Job ojob = new Job();
    HCatInputFormat inpy = new HCatInputFormat();
    inpy.setInput(ijob, null, in);
    HCatOutputFormat oupy = new HCatOutputFormat();
    oupy.setOutput(ojob, OutputJobInfo.create(null, out, new HashMap<String, String>()));
    // Test HCatContext
    System.err.println("HCatContext INSTANCE is present : " + HCatContext.INSTANCE.getConf().isPresent());
    if (HCatContext.INSTANCE.getConf().isPresent()) {
        System.err.println("HCatContext tinyint->int promotion says " + HCatContext.INSTANCE.getConf().get().getBoolean(HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION, HCatConstants.HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT));
    }
    HCatSchema tableSchema = inpy.getTableSchema(ijob.getConfiguration());
    System.err.println("Copying from [" + in + "] to [" + out + "] with schema : " + tableSchema.toString());
    oupy.setSchema(ojob, tableSchema);
    oupy.checkOutputSpecs(ojob);
    OutputCommitter oc = oupy.getOutputCommitter(createTaskAttemptContext(ojob.getConfiguration()));
    oc.setupJob(ojob);
    for (InputSplit split : inpy.getSplits(ijob)) {
        TaskAttemptContext rtaskContext = createTaskAttemptContext(ijob.getConfiguration());
        TaskAttemptContext wtaskContext = createTaskAttemptContext(ojob.getConfiguration());
        RecordReader<WritableComparable, HCatRecord> rr = inpy.createRecordReader(split, rtaskContext);
        rr.initialize(split, rtaskContext);
        OutputCommitter taskOc = oupy.getOutputCommitter(wtaskContext);
        taskOc.setupTask(wtaskContext);
        RecordWriter<WritableComparable<?>, HCatRecord> rw = oupy.getRecordWriter(wtaskContext);
        while (rr.nextKeyValue()) {
            rw.write(rr.getCurrentKey(), rr.getCurrentValue());
        }
        rw.close(wtaskContext);
        taskOc.commitTask(wtaskContext);
        rr.close();
    }
    oc.commitJob(ojob);
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) HCatSchema(org.apache.hive.hcatalog.data.schema.HCatSchema) HashMap(java.util.HashMap) WritableComparable(org.apache.hadoop.io.WritableComparable) HCatOutputFormat(org.apache.hive.hcatalog.mapreduce.HCatOutputFormat) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) Job(org.apache.hadoop.mapreduce.Job) InputSplit(org.apache.hadoop.mapreduce.InputSplit) HCatInputFormat(org.apache.hive.hcatalog.mapreduce.HCatInputFormat) HCatRecord(org.apache.hive.hcatalog.data.HCatRecord)

Aggregations

WritableComparable (org.apache.hadoop.io.WritableComparable)34 IOException (java.io.IOException)14 Writable (org.apache.hadoop.io.Writable)14 Path (org.apache.hadoop.fs.Path)13 FileSystem (org.apache.hadoop.fs.FileSystem)11 JobConf (org.apache.hadoop.mapred.JobConf)6 CompressionCodec (org.apache.hadoop.io.compress.CompressionCodec)5 ArrayList (java.util.ArrayList)4 IntWritable (org.apache.hadoop.io.IntWritable)4 NullWritable (org.apache.hadoop.io.NullWritable)4 SequenceFile (org.apache.hadoop.io.SequenceFile)4 TaskAttemptContext (org.apache.hadoop.mapreduce.TaskAttemptContext)4 PCollection (com.tdunning.plume.PCollection)3 OutputChannel (com.tdunning.plume.local.lazy.MSCR.OutputChannel)3 PlumeObject (com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject)3 HashMap (java.util.HashMap)3 BytesWritable (org.apache.hadoop.io.BytesWritable)3 FloatWritable (org.apache.hadoop.io.FloatWritable)3 HCatRecord (org.apache.hive.hcatalog.data.HCatRecord)3 DoFn (com.tdunning.plume.DoFn)2