Search in sources :

Example 1 with PlumeObject

use of com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject in project Plume by tdunning.

the class MSCRMapper method map.

@SuppressWarnings("unchecked")
protected void map(WritableComparable key, WritableComparable value, final Mapper<WritableComparable, WritableComparable, PlumeObject, PlumeObject>.Context<WritableComparable, WritableComparable, PlumeObject, PlumeObject> context) throws IOException, InterruptedException {
    LazyCollection<?> l = null;
    FileSplit fS = FileInputSplitWrapper.getFileInputSplit(context);
    // Get LazyCollection for this input (according to FileSplit)
    for (PCollection<?> input : mscr.getInputs()) {
        LazyCollection<?> thisL = (LazyCollection<?>) input;
        if (thisL.getFile() == null) {
            // Convention for intermediate results
            thisL.setFile(tmpFolder + "/" + thisL.getPlumeId());
        }
        if (fS.getPath().toString().startsWith(thisL.getFile()) || fS.getPath().toString().startsWith("file:" + thisL.getFile())) {
            l = thisL;
            break;
        }
    }
    if (l == null) {
        throw new RuntimeException("Unable to match input split with any MSCR input");
    }
    // If this collection is a table -> process Pair, otherwise process value
    PCollectionType type = l.getType();
    Object toProcess = value;
    if (type instanceof PTableType) {
        toProcess = Pair.create(key, value);
    }
    for (DeferredOp op : l.getDownOps()) {
        if (op instanceof MultipleParallelDo) {
            MultipleParallelDo mPDo = ((MultipleParallelDo) op);
            for (Object entry : mPDo.getDests().entrySet()) {
                Map.Entry<PCollection, DoFn> en = (Map.Entry<PCollection, DoFn>) entry;
                LazyCollection<?> lCol = (LazyCollection<?>) en.getKey();
                DeferredOp childOp = null;
                if (lCol.getDownOps() != null && lCol.getDownOps().size() > 0) {
                    childOp = lCol.getDownOps().get(0);
                }
                final Integer channel;
                if (childOp != null && childOp instanceof Flatten) {
                    channel = mscr.getNumberedChannels().get(((Flatten) childOp).getDest());
                } else if (childOp != null && childOp instanceof GroupByKey) {
                    channel = mscr.getNumberedChannels().get(((GroupByKey) childOp).getOrigin());
                } else {
                    // bypass channel?
                    channel = mscr.getNumberedChannels().get(en.getKey());
                }
                if (channel == null) {
                    // This is not for this MSCR - just skip it
                    return;
                }
                // Call parallelDo function
                en.getValue().process(toProcess, new EmitFn() {

                    @Override
                    public void emit(Object v) {
                        try {
                            if (v instanceof Pair) {
                                Pair p = (Pair) v;
                                context.write(new PlumeObject((WritableComparable) p.getKey(), channel), new PlumeObject((WritableComparable) p.getValue(), channel));
                            } else {
                                context.write(new PlumeObject((WritableComparable) v, channel), new PlumeObject((WritableComparable) v, channel));
                            }
                        } catch (Exception e) {
                            // TODO How to report this
                            e.printStackTrace();
                        }
                    }
                });
            }
        } else {
            if (op instanceof Flatten) {
                l = (LazyCollection) ((Flatten) op).getDest();
            }
            int channel = mscr.getNumberedChannels().get(l);
            if (toProcess instanceof Pair) {
                context.write(new PlumeObject(key, channel), new PlumeObject(value, channel));
            } else {
                context.write(new PlumeObject(value, channel), new PlumeObject(value, channel));
            }
        }
    }
}
Also used : MultipleParallelDo(com.tdunning.plume.local.lazy.op.MultipleParallelDo) GroupByKey(com.tdunning.plume.local.lazy.op.GroupByKey) PlumeObject(com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject) PTableType(com.tdunning.plume.types.PTableType) Flatten(com.tdunning.plume.local.lazy.op.Flatten) PCollectionType(com.tdunning.plume.types.PCollectionType) FileSplit(org.apache.hadoop.mapreduce.lib.input.FileSplit) DeferredOp(com.tdunning.plume.local.lazy.op.DeferredOp) IOException(java.io.IOException) PCollection(com.tdunning.plume.PCollection) DoFn(com.tdunning.plume.DoFn) EmitFn(com.tdunning.plume.EmitFn) PlumeObject(com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject) Map(java.util.Map) Pair(com.tdunning.plume.Pair)

Example 2 with PlumeObject

use of com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject in project Plume by tdunning.

the class MSCRReducer method reduce.

@SuppressWarnings("unchecked")
protected void reduce(final PlumeObject arg0, java.lang.Iterable<PlumeObject> values, Reducer<PlumeObject, PlumeObject, NullWritable, NullWritable>.Context<PlumeObject, PlumeObject, NullWritable, NullWritable> arg2) throws IOException, InterruptedException {
    PCollection col = mscr.getChannelByNumber().get(arg0.sourceId);
    OutputChannel oC = mscr.getOutputChannels().get(col);
    if (oC.reducer != null) {
        // apply reducer
        ParallelDo pDo = oC.reducer;
        // TODO how to check / report this
        DoFn reducer = pDo.getFunction();
        List<WritableComparable> vals = Lists.newArrayList();
        for (PlumeObject val : values) {
            vals.add(val.obj);
        }
        reducer.process(Pair.create(arg0.obj, vals), new EmitFn() {

            @Override
            public void emit(Object v) {
                try {
                    if (v instanceof Pair) {
                        Pair p = (Pair) v;
                        mos.write(arg0.sourceId + "", p.getKey(), p.getValue());
                    } else {
                        mos.write(arg0.sourceId + "", NullWritable.get(), (WritableComparable) v);
                    }
                } catch (Exception e) {
                    // TODO How to report this
                    e.printStackTrace();
                }
            }
        });
    } else {
        // direct writing - write all key, value pairs
        for (PlumeObject val : values) {
            if (oC.output instanceof PTable) {
                mos.write(arg0.sourceId + "", arg0.obj, val.obj);
            } else {
                mos.write(arg0.sourceId + "", NullWritable.get(), val.obj);
            }
        }
    }
}
Also used : ParallelDo(com.tdunning.plume.local.lazy.op.ParallelDo) PlumeObject(com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject) IOException(java.io.IOException) PTable(com.tdunning.plume.PTable) PCollection(com.tdunning.plume.PCollection) DoFn(com.tdunning.plume.DoFn) EmitFn(com.tdunning.plume.EmitFn) WritableComparable(org.apache.hadoop.io.WritableComparable) OutputChannel(com.tdunning.plume.local.lazy.MSCR.OutputChannel) PlumeObject(com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject) Pair(com.tdunning.plume.Pair)

Example 3 with PlumeObject

use of com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject in project Plume by tdunning.

the class MSCRCombiner method reduce.

@SuppressWarnings("unchecked")
protected void reduce(final PlumeObject arg0, java.lang.Iterable<PlumeObject> values, Reducer<PlumeObject, PlumeObject, PlumeObject, PlumeObject>.Context<PlumeObject, PlumeObject, PlumeObject, PlumeObject> context) throws IOException, InterruptedException {
    PCollection col = mscr.getChannelByNumber().get(arg0.sourceId);
    OutputChannel oC = mscr.getOutputChannels().get(col);
    if (oC.combiner != null) {
        // Apply combiner function for this channel
        List<WritableComparable> vals = Lists.newArrayList();
        for (PlumeObject val : values) {
            vals.add(val.obj);
        }
        WritableComparable result = (WritableComparable) oC.combiner.getCombiner().combine(vals);
        context.write(arg0, new PlumeObject(result, arg0.sourceId));
    } else {
        // direct writing - write all key, value pairs
        for (PlumeObject val : values) {
            context.write(arg0, val);
        }
    }
}
Also used : PCollection(com.tdunning.plume.PCollection) WritableComparable(org.apache.hadoop.io.WritableComparable) PlumeObject(com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject) OutputChannel(com.tdunning.plume.local.lazy.MSCR.OutputChannel)

Aggregations

PCollection (com.tdunning.plume.PCollection)3 PlumeObject (com.tdunning.plume.local.lazy.MapRedExecutor.PlumeObject)3 DoFn (com.tdunning.plume.DoFn)2 EmitFn (com.tdunning.plume.EmitFn)2 Pair (com.tdunning.plume.Pair)2 OutputChannel (com.tdunning.plume.local.lazy.MSCR.OutputChannel)2 IOException (java.io.IOException)2 WritableComparable (org.apache.hadoop.io.WritableComparable)2 PTable (com.tdunning.plume.PTable)1 DeferredOp (com.tdunning.plume.local.lazy.op.DeferredOp)1 Flatten (com.tdunning.plume.local.lazy.op.Flatten)1 GroupByKey (com.tdunning.plume.local.lazy.op.GroupByKey)1 MultipleParallelDo (com.tdunning.plume.local.lazy.op.MultipleParallelDo)1 ParallelDo (com.tdunning.plume.local.lazy.op.ParallelDo)1 PCollectionType (com.tdunning.plume.types.PCollectionType)1 PTableType (com.tdunning.plume.types.PTableType)1 Map (java.util.Map)1 FileSplit (org.apache.hadoop.mapreduce.lib.input.FileSplit)1