Search in sources :

Example 1 with BatchJoiner

use of co.cask.cdap.etl.api.batch.BatchJoiner in project cdap by caskdata.

the class SparkPipelineRunner method runPipeline.

public void runPipeline(PipelinePhase pipelinePhase, String sourcePluginType, JavaSparkExecutionContext sec, Map<String, Integer> stagePartitions, PipelinePluginContext pluginContext) throws Exception {
    MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(sec.getWorkflowToken(), sec.getRuntimeArguments(), sec.getLogicalStartTime(), sec, sec.getNamespace());
    Map<String, SparkCollection<Object>> stageDataCollections = new HashMap<>();
    Map<String, SparkCollection<ErrorRecord<Object>>> stageErrorCollections = new HashMap<>();
    // should never happen, but removes warning
    if (pipelinePhase.getDag() == null) {
        throw new IllegalStateException("Pipeline phase has no connections.");
    }
    for (String stageName : pipelinePhase.getDag().getTopologicalOrder()) {
        StageInfo stageInfo = pipelinePhase.getStage(stageName);
        //noinspection ConstantConditions
        String pluginType = stageInfo.getPluginType();
        // don't want to do an additional filter for stages that can emit errors,
        // but aren't connected to an ErrorTransform
        boolean hasErrorOutput = false;
        Set<String> outputs = pipelinePhase.getStageOutputs(stageInfo.getName());
        for (String output : outputs) {
            //noinspection ConstantConditions
            if (ErrorTransform.PLUGIN_TYPE.equals(pipelinePhase.getStage(output).getPluginType())) {
                hasErrorOutput = true;
                break;
            }
        }
        SparkCollection<Object> stageData = null;
        Map<String, SparkCollection<Object>> inputDataCollections = new HashMap<>();
        Set<String> stageInputs = stageInfo.getInputs();
        for (String inputStageName : stageInputs) {
            inputDataCollections.put(inputStageName, stageDataCollections.get(inputStageName));
        }
        // initialize the stageRDD as the union of all input RDDs.
        if (!inputDataCollections.isEmpty()) {
            Iterator<SparkCollection<Object>> inputCollectionIter = inputDataCollections.values().iterator();
            stageData = inputCollectionIter.next();
            // don't union inputs records if we're joining or if we're processing errors
            while (!BatchJoiner.PLUGIN_TYPE.equals(pluginType) && !ErrorTransform.PLUGIN_TYPE.equals(pluginType) && inputCollectionIter.hasNext()) {
                stageData = stageData.union(inputCollectionIter.next());
            }
        }
        SparkCollection<ErrorRecord<Object>> stageErrors = null;
        PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageInfo, sec);
        if (stageData == null) {
            // null in the other else-if conditions
            if (sourcePluginType.equals(pluginType)) {
                SparkCollection<Tuple2<Boolean, Object>> combinedData = getSource(stageInfo);
                if (hasErrorOutput) {
                    // need to cache, otherwise the stage can be computed twice, once for output and once for errors.
                    combinedData.cache();
                    stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
                }
                stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
            } else {
                throw new IllegalStateException(String.format("Stage '%s' has no input and is not a source.", stageName));
            }
        } else if (BatchSink.PLUGIN_TYPE.equals(pluginType)) {
            stageData.store(stageInfo, Compat.convert(new BatchSinkFunction(pluginFunctionContext)));
        } else if (Transform.PLUGIN_TYPE.equals(pluginType)) {
            SparkCollection<Tuple2<Boolean, Object>> combinedData = stageData.transform(stageInfo);
            if (hasErrorOutput) {
                // need to cache, otherwise the stage can be computed twice, once for output and once for errors.
                combinedData.cache();
                stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
            }
            stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
        } else if (ErrorTransform.PLUGIN_TYPE.equals(pluginType)) {
            // union all the errors coming into this stage
            SparkCollection<ErrorRecord<Object>> inputErrors = null;
            for (String inputStage : stageInputs) {
                SparkCollection<ErrorRecord<Object>> inputErrorsFromStage = stageErrorCollections.get(inputStage);
                if (inputErrorsFromStage == null) {
                    continue;
                }
                if (inputErrors == null) {
                    inputErrors = inputErrorsFromStage;
                } else {
                    inputErrors = inputErrors.union(inputErrorsFromStage);
                }
            }
            if (inputErrors != null) {
                SparkCollection<Tuple2<Boolean, Object>> combinedData = inputErrors.flatMap(stageInfo, Compat.convert(new ErrorTransformFunction<>(pluginFunctionContext)));
                if (hasErrorOutput) {
                    // need to cache, otherwise the stage can be computed twice, once for output and once for errors.
                    combinedData.cache();
                    stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
                }
                stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
            }
        } else if (SparkCompute.PLUGIN_TYPE.equals(pluginType)) {
            SparkCompute<Object, Object> sparkCompute = pluginContext.newPluginInstance(stageName, macroEvaluator);
            stageData = stageData.compute(stageInfo, sparkCompute);
        } else if (SparkSink.PLUGIN_TYPE.equals(pluginType)) {
            SparkSink<Object> sparkSink = pluginContext.newPluginInstance(stageName, macroEvaluator);
            stageData.store(stageInfo, sparkSink);
        } else if (BatchAggregator.PLUGIN_TYPE.equals(pluginType)) {
            Integer partitions = stagePartitions.get(stageName);
            SparkCollection<Tuple2<Boolean, Object>> combinedData = stageData.aggregate(stageInfo, partitions);
            if (hasErrorOutput) {
                // need to cache, otherwise the stage can be computed twice, once for output and once for errors.
                combinedData.cache();
                stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
            }
            stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
        } else if (BatchJoiner.PLUGIN_TYPE.equals(pluginType)) {
            BatchJoiner<Object, Object, Object> joiner = pluginContext.newPluginInstance(stageName, macroEvaluator);
            BatchJoinerRuntimeContext joinerRuntimeContext = pluginFunctionContext.createBatchRuntimeContext();
            joiner.initialize(joinerRuntimeContext);
            Map<String, SparkPairCollection<Object, Object>> preJoinStreams = new HashMap<>();
            for (Map.Entry<String, SparkCollection<Object>> inputStreamEntry : inputDataCollections.entrySet()) {
                String inputStage = inputStreamEntry.getKey();
                SparkCollection<Object> inputStream = inputStreamEntry.getValue();
                preJoinStreams.put(inputStage, addJoinKey(stageInfo, inputStage, inputStream));
            }
            Set<String> remainingInputs = new HashSet<>();
            remainingInputs.addAll(inputDataCollections.keySet());
            Integer numPartitions = stagePartitions.get(stageName);
            SparkPairCollection<Object, List<JoinElement<Object>>> joinedInputs = null;
            // inner join on required inputs
            for (final String inputStageName : joiner.getJoinConfig().getRequiredInputs()) {
                SparkPairCollection<Object, Object> preJoinCollection = preJoinStreams.get(inputStageName);
                if (joinedInputs == null) {
                    joinedInputs = preJoinCollection.mapValues(new InitialJoinFunction<>(inputStageName));
                } else {
                    JoinFlattenFunction<Object> joinFlattenFunction = new JoinFlattenFunction<>(inputStageName);
                    joinedInputs = numPartitions == null ? joinedInputs.join(preJoinCollection).mapValues(joinFlattenFunction) : joinedInputs.join(preJoinCollection, numPartitions).mapValues(joinFlattenFunction);
                }
                remainingInputs.remove(inputStageName);
            }
            // outer join on non-required inputs
            boolean isFullOuter = joinedInputs == null;
            for (final String inputStageName : remainingInputs) {
                SparkPairCollection<Object, Object> preJoinStream = preJoinStreams.get(inputStageName);
                if (joinedInputs == null) {
                    joinedInputs = preJoinStream.mapValues(new InitialJoinFunction<>(inputStageName));
                } else {
                    if (isFullOuter) {
                        OuterJoinFlattenFunction<Object> flattenFunction = new OuterJoinFlattenFunction<>(inputStageName);
                        joinedInputs = numPartitions == null ? joinedInputs.fullOuterJoin(preJoinStream).mapValues(flattenFunction) : joinedInputs.fullOuterJoin(preJoinStream, numPartitions).mapValues(flattenFunction);
                    } else {
                        LeftJoinFlattenFunction<Object> flattenFunction = new LeftJoinFlattenFunction<>(inputStageName);
                        joinedInputs = numPartitions == null ? joinedInputs.leftOuterJoin(preJoinStream).mapValues(flattenFunction) : joinedInputs.leftOuterJoin(preJoinStream, numPartitions).mapValues(flattenFunction);
                    }
                }
            }
            // should never happen, but removes warnings
            if (joinedInputs == null) {
                throw new IllegalStateException("There are no inputs into join stage " + stageName);
            }
            stageData = mergeJoinResults(stageInfo, joinedInputs).cache();
        } else if (Windower.PLUGIN_TYPE.equals(pluginType)) {
            Windower windower = pluginContext.newPluginInstance(stageName, macroEvaluator);
            stageData = stageData.window(stageInfo, windower);
        } else {
            throw new IllegalStateException(String.format("Stage %s is of unsupported plugin type %s.", stageName, pluginType));
        }
        if (shouldCache(pipelinePhase, stageInfo)) {
            stageData = stageData.cache();
            if (stageErrors != null) {
                stageErrors = stageErrors.cache();
            }
        }
        stageDataCollections.put(stageName, stageData);
        stageErrorCollections.put(stageName, stageErrors);
    }
}
Also used : OutputFilter(co.cask.cdap.etl.spark.function.OutputFilter) MacroEvaluator(co.cask.cdap.api.macro.MacroEvaluator) DefaultMacroEvaluator(co.cask.cdap.etl.common.DefaultMacroEvaluator) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) StageInfo(co.cask.cdap.etl.planner.StageInfo) PluginFunctionContext(co.cask.cdap.etl.spark.function.PluginFunctionContext) DefaultMacroEvaluator(co.cask.cdap.etl.common.DefaultMacroEvaluator) BatchJoinerRuntimeContext(co.cask.cdap.etl.api.batch.BatchJoinerRuntimeContext) Windower(co.cask.cdap.etl.api.streaming.Windower) ErrorFilter(co.cask.cdap.etl.spark.function.ErrorFilter) JoinFlattenFunction(co.cask.cdap.etl.spark.function.JoinFlattenFunction) OuterJoinFlattenFunction(co.cask.cdap.etl.spark.function.OuterJoinFlattenFunction) LeftJoinFlattenFunction(co.cask.cdap.etl.spark.function.LeftJoinFlattenFunction) BatchJoiner(co.cask.cdap.etl.api.batch.BatchJoiner) JoinElement(co.cask.cdap.etl.api.JoinElement) BatchSinkFunction(co.cask.cdap.etl.spark.function.BatchSinkFunction) SparkSink(co.cask.cdap.etl.api.batch.SparkSink) OuterJoinFlattenFunction(co.cask.cdap.etl.spark.function.OuterJoinFlattenFunction) LeftJoinFlattenFunction(co.cask.cdap.etl.spark.function.LeftJoinFlattenFunction) Tuple2(scala.Tuple2) HashMap(java.util.HashMap) Map(java.util.Map) ErrorRecord(co.cask.cdap.etl.api.ErrorRecord)

Example 2 with BatchJoiner

use of co.cask.cdap.etl.api.batch.BatchJoiner in project cdap by caskdata.

the class ETLMapReduce method initialize.

@Override
public void initialize() throws Exception {
    MapReduceContext context = getContext();
    Map<String, String> properties = context.getSpecification().getProperties();
    if (Boolean.valueOf(properties.get(Constants.STAGE_LOGGING_ENABLED))) {
        LogStageInjector.start();
    }
    CompositeFinisher.Builder finishers = CompositeFinisher.builder();
    Job job = context.getHadoopJob();
    Configuration hConf = job.getConfiguration();
    hConf.setBoolean("mapreduce.map.speculative", false);
    hConf.setBoolean("mapreduce.reduce.speculative", false);
    // plugin name -> runtime args for that plugin
    Map<String, Map<String, String>> runtimeArgs = new HashMap<>();
    MacroEvaluator evaluator = new DefaultMacroEvaluator(context.getWorkflowToken(), context.getRuntimeArguments(), context.getLogicalStartTime(), context, context.getNamespace());
    BatchPhaseSpec phaseSpec = GSON.fromJson(properties.get(Constants.PIPELINEID), BatchPhaseSpec.class);
    for (Map.Entry<String, String> pipelineProperty : phaseSpec.getPipelineProperties().entrySet()) {
        hConf.set(pipelineProperty.getKey(), pipelineProperty.getValue());
    }
    PipelinePhase phase = phaseSpec.getPhase();
    PipelinePluginInstantiator pluginInstantiator = new PipelinePluginInstantiator(context, mrMetrics, phaseSpec);
    Map<String, String> inputAliasToStage = new HashMap<>();
    for (String sourceName : phaseSpec.getPhase().getSources()) {
        try {
            BatchConfigurable<BatchSourceContext> batchSource = pluginInstantiator.newPluginInstance(sourceName, evaluator);
            StageInfo stageInfo = phaseSpec.getPhase().getStage(sourceName);
            MapReduceBatchContext sourceContext = new MapReduceBatchContext(context, mrMetrics, stageInfo);
            batchSource.prepareRun(sourceContext);
            runtimeArgs.put(sourceName, sourceContext.getRuntimeArguments());
            for (String inputAlias : sourceContext.getInputNames()) {
                inputAliasToStage.put(inputAlias, sourceName);
            }
            finishers.add(batchSource, sourceContext);
        } catch (Exception e) {
            // Catch the Exception to generate a User Error Log for the Pipeline
            PIPELINE_LOG.error("Failed to initialize batch source '{}' with the error: {}. Please review your pipeline " + "configuration and check the system logs for more details.", sourceName, Throwables.getRootCause(e).getMessage(), Throwables.getRootCause(e));
            throw e;
        }
    }
    hConf.set(INPUT_ALIAS_KEY, GSON.toJson(inputAliasToStage));
    Map<String, SinkOutput> sinkOutputs = new HashMap<>();
    for (StageInfo stageInfo : Sets.union(phase.getStagesOfType(Constants.CONNECTOR_TYPE), phase.getStagesOfType(BatchSink.PLUGIN_TYPE))) {
        String sinkName = stageInfo.getName();
        // todo: add a better way to get info for all sinks
        if (!phase.getSinks().contains(sinkName)) {
            continue;
        }
        try {
            BatchConfigurable<BatchSinkContext> batchSink = pluginInstantiator.newPluginInstance(sinkName, evaluator);
            MapReduceBatchContext sinkContext = new MapReduceBatchContext(context, mrMetrics, stageInfo);
            batchSink.prepareRun(sinkContext);
            runtimeArgs.put(sinkName, sinkContext.getRuntimeArguments());
            finishers.add(batchSink, sinkContext);
            sinkOutputs.put(sinkName, new SinkOutput(sinkContext.getOutputNames(), stageInfo.getErrorDatasetName()));
        } catch (Exception e) {
            // Catch the Exception to generate a User Error Log for the Pipeline
            PIPELINE_LOG.error("Failed to initialize batch sink '{}' with the error: {}. Please review your pipeline " + "configuration and check the system logs for more details.", sinkName, Throwables.getRootCause(e).getMessage(), Throwables.getRootCause(e));
            throw e;
        }
    }
    finisher = finishers.build();
    hConf.set(SINK_OUTPUTS_KEY, GSON.toJson(sinkOutputs));
    // setup time partition for each error dataset
    for (StageInfo stageInfo : Sets.union(phase.getStagesOfType(Transform.PLUGIN_TYPE), phase.getStagesOfType(BatchSink.PLUGIN_TYPE))) {
        if (stageInfo.getErrorDatasetName() != null) {
            Map<String, String> args = new HashMap<>();
            args.put(FileSetProperties.OUTPUT_PROPERTIES_PREFIX + "avro.schema.output.key", Constants.ERROR_SCHEMA.toString());
            TimePartitionedFileSetArguments.setOutputPartitionTime(args, context.getLogicalStartTime());
            context.addOutput(Output.ofDataset(stageInfo.getErrorDatasetName(), args));
        }
    }
    job.setMapperClass(ETLMapper.class);
    Set<StageInfo> reducers = phaseSpec.getPhase().getStagesOfType(BatchAggregator.PLUGIN_TYPE, BatchJoiner.PLUGIN_TYPE);
    if (!reducers.isEmpty()) {
        job.setReducerClass(ETLReducer.class);
        String reducerName = reducers.iterator().next().getName();
        StageInfo stageInfo = phase.getStage(reducerName);
        Class<?> outputKeyClass;
        Class<?> outputValClass;
        try {
            if (!phaseSpec.getPhase().getStagesOfType(BatchAggregator.PLUGIN_TYPE).isEmpty()) {
                BatchAggregator aggregator = pluginInstantiator.newPluginInstance(reducerName, evaluator);
                DefaultAggregatorContext aggregatorContext = new DefaultAggregatorContext(context, mrMetrics, stageInfo);
                aggregator.prepareRun(aggregatorContext);
                finishers.add(aggregator, aggregatorContext);
                if (aggregatorContext.getNumPartitions() != null) {
                    job.setNumReduceTasks(aggregatorContext.getNumPartitions());
                }
                outputKeyClass = aggregatorContext.getGroupKeyClass();
                outputValClass = aggregatorContext.getGroupValueClass();
                if (outputKeyClass == null) {
                    outputKeyClass = TypeChecker.getGroupKeyClass(aggregator);
                }
                if (outputValClass == null) {
                    outputValClass = TypeChecker.getGroupValueClass(aggregator);
                }
                hConf.set(MAP_KEY_CLASS, outputKeyClass.getName());
                hConf.set(MAP_VAL_CLASS, outputValClass.getName());
                job.setMapOutputKeyClass(getOutputKeyClass(reducerName, outputKeyClass));
                job.setMapOutputValueClass(getOutputValClass(reducerName, outputValClass));
            } else {
                // reducer type is joiner
                BatchJoiner batchJoiner = pluginInstantiator.newPluginInstance(reducerName, evaluator);
                DefaultJoinerContext joinerContext = new DefaultJoinerContext(context, mrMetrics, stageInfo);
                batchJoiner.prepareRun(joinerContext);
                finishers.add(batchJoiner, joinerContext);
                if (joinerContext.getNumPartitions() != null) {
                    job.setNumReduceTasks(joinerContext.getNumPartitions());
                }
                outputKeyClass = joinerContext.getJoinKeyClass();
                Class<?> inputRecordClass = joinerContext.getJoinInputRecordClass();
                if (outputKeyClass == null) {
                    outputKeyClass = TypeChecker.getJoinKeyClass(batchJoiner);
                }
                if (inputRecordClass == null) {
                    inputRecordClass = TypeChecker.getJoinInputRecordClass(batchJoiner);
                }
                hConf.set(MAP_KEY_CLASS, outputKeyClass.getName());
                hConf.set(MAP_VAL_CLASS, inputRecordClass.getName());
                job.setMapOutputKeyClass(getOutputKeyClass(reducerName, outputKeyClass));
                getOutputValClass(reducerName, inputRecordClass);
                // for joiner plugin map output is tagged with stageName
                job.setMapOutputValueClass(TaggedWritable.class);
            }
        } catch (Exception e) {
            // Catch the Exception to generate a User Error Log for the Pipeline
            PIPELINE_LOG.error("Failed to initialize pipeline stage '{}' with the error: {}. Please review your pipeline " + "configuration and check the system logs for more details.", reducerName, Throwables.getRootCause(e).getMessage(), Throwables.getRootCause(e));
            throw e;
        }
    } else {
        job.setNumReduceTasks(0);
    }
    hConf.set(RUNTIME_ARGS_KEY, GSON.toJson(runtimeArgs));
}
Also used : DefaultAggregatorContext(co.cask.cdap.etl.batch.DefaultAggregatorContext) DefaultMacroEvaluator(co.cask.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(co.cask.cdap.api.macro.MacroEvaluator) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) StageInfo(co.cask.cdap.etl.planner.StageInfo) CompositeFinisher(co.cask.cdap.etl.common.CompositeFinisher) DefaultJoinerContext(co.cask.cdap.etl.batch.DefaultJoinerContext) BatchAggregator(co.cask.cdap.etl.api.batch.BatchAggregator) DefaultMacroEvaluator(co.cask.cdap.etl.common.DefaultMacroEvaluator) Job(org.apache.hadoop.mapreduce.Job) PipelinePluginInstantiator(co.cask.cdap.etl.batch.PipelinePluginInstantiator) BatchSourceContext(co.cask.cdap.etl.api.batch.BatchSourceContext) BatchSinkContext(co.cask.cdap.etl.api.batch.BatchSinkContext) BatchJoiner(co.cask.cdap.etl.api.batch.BatchJoiner) StageFailureException(co.cask.cdap.etl.batch.StageFailureException) IOException(java.io.IOException) MapReduceContext(co.cask.cdap.api.mapreduce.MapReduceContext) PipelinePhase(co.cask.cdap.etl.common.PipelinePhase) BatchPhaseSpec(co.cask.cdap.etl.batch.BatchPhaseSpec) Map(java.util.Map) HashMap(java.util.HashMap)

Example 3 with BatchJoiner

use of co.cask.cdap.etl.api.batch.BatchJoiner in project cdap by caskdata.

the class ETLSpark method initialize.

@Override
public void initialize() throws Exception {
    SparkClientContext context = getContext();
    cleanupFiles = new ArrayList<>();
    CompositeFinisher.Builder finishers = CompositeFinisher.builder();
    SparkConf sparkConf = new SparkConf();
    sparkConf.set("spark.driver.extraJavaOptions", "-XX:MaxPermSize=256m");
    sparkConf.set("spark.executor.extraJavaOptions", "-XX:MaxPermSize=256m");
    sparkConf.set("spark.speculation", "false");
    context.setSparkConf(sparkConf);
    Map<String, String> properties = context.getSpecification().getProperties();
    BatchPhaseSpec phaseSpec = GSON.fromJson(properties.get(Constants.PIPELINEID), BatchPhaseSpec.class);
    for (Map.Entry<String, String> pipelineProperty : phaseSpec.getPipelineProperties().entrySet()) {
        sparkConf.set(pipelineProperty.getKey(), pipelineProperty.getValue());
    }
    MacroEvaluator evaluator = new DefaultMacroEvaluator(context.getWorkflowToken(), context.getRuntimeArguments(), context.getLogicalStartTime(), context, context.getNamespace());
    SparkBatchSourceFactory sourceFactory = new SparkBatchSourceFactory();
    SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
    Map<String, Integer> stagePartitions = new HashMap<>();
    PluginContext pluginContext = new SparkPipelinePluginContext(context, context.getMetrics(), phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
    for (StageInfo stageInfo : phaseSpec.getPhase()) {
        String stageName = stageInfo.getName();
        String pluginType = stageInfo.getPluginType();
        if (BatchSource.PLUGIN_TYPE.equals(pluginType)) {
            BatchConfigurable<BatchSourceContext> batchSource = pluginContext.newPluginInstance(stageName, evaluator);
            BatchSourceContext sourceContext = new SparkBatchSourceContext(sourceFactory, context, stageInfo);
            batchSource.prepareRun(sourceContext);
            finishers.add(batchSource, sourceContext);
        } else if (BatchSink.PLUGIN_TYPE.equals(pluginType)) {
            BatchConfigurable<BatchSinkContext> batchSink = pluginContext.newPluginInstance(stageName, evaluator);
            BatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, context, null, stageInfo);
            batchSink.prepareRun(sinkContext);
            finishers.add(batchSink, sinkContext);
        } else if (SparkSink.PLUGIN_TYPE.equals(pluginType)) {
            BatchConfigurable<SparkPluginContext> sparkSink = pluginContext.newPluginInstance(stageName, evaluator);
            SparkPluginContext sparkPluginContext = new BasicSparkPluginContext(context, stageInfo);
            sparkSink.prepareRun(sparkPluginContext);
            finishers.add(sparkSink, sparkPluginContext);
        } else if (BatchAggregator.PLUGIN_TYPE.equals(pluginType)) {
            BatchAggregator aggregator = pluginContext.newPluginInstance(stageName, evaluator);
            DefaultAggregatorContext aggregatorContext = new DefaultAggregatorContext(context, stageInfo);
            aggregator.prepareRun(aggregatorContext);
            finishers.add(aggregator, aggregatorContext);
            stagePartitions.put(stageName, aggregatorContext.getNumPartitions());
        } else if (BatchJoiner.PLUGIN_TYPE.equals(pluginType)) {
            BatchJoiner joiner = pluginContext.newPluginInstance(stageName, evaluator);
            DefaultJoinerContext sparkJoinerContext = new DefaultJoinerContext(context, stageInfo);
            joiner.prepareRun(sparkJoinerContext);
            finishers.add(joiner, sparkJoinerContext);
            stagePartitions.put(stageName, sparkJoinerContext.getNumPartitions());
        }
    }
    File configFile = File.createTempFile("HydratorSpark", ".config");
    cleanupFiles.add(configFile);
    try (Writer writer = Files.newBufferedWriter(configFile.toPath(), StandardCharsets.UTF_8)) {
        SparkBatchSourceSinkFactoryInfo sourceSinkInfo = new SparkBatchSourceSinkFactoryInfo(sourceFactory, sinkFactory, stagePartitions);
        writer.write(GSON.toJson(sourceSinkInfo));
    }
    finisher = finishers.build();
    context.localize("HydratorSpark.config", configFile.toURI());
}
Also used : DefaultAggregatorContext(co.cask.cdap.etl.batch.DefaultAggregatorContext) MacroEvaluator(co.cask.cdap.api.macro.MacroEvaluator) DefaultMacroEvaluator(co.cask.cdap.etl.common.DefaultMacroEvaluator) HashMap(java.util.HashMap) StageInfo(co.cask.cdap.etl.planner.StageInfo) SparkClientContext(co.cask.cdap.api.spark.SparkClientContext) CompositeFinisher(co.cask.cdap.etl.common.CompositeFinisher) SparkPipelinePluginContext(co.cask.cdap.etl.spark.plugin.SparkPipelinePluginContext) DefaultJoinerContext(co.cask.cdap.etl.batch.DefaultJoinerContext) BatchAggregator(co.cask.cdap.etl.api.batch.BatchAggregator) DefaultMacroEvaluator(co.cask.cdap.etl.common.DefaultMacroEvaluator) SparkPipelinePluginContext(co.cask.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginContext(co.cask.cdap.api.plugin.PluginContext) SparkPluginContext(co.cask.cdap.etl.api.batch.SparkPluginContext) BatchSourceContext(co.cask.cdap.etl.api.batch.BatchSourceContext) BatchSinkContext(co.cask.cdap.etl.api.batch.BatchSinkContext) BatchJoiner(co.cask.cdap.etl.api.batch.BatchJoiner) BatchPhaseSpec(co.cask.cdap.etl.batch.BatchPhaseSpec) SparkPluginContext(co.cask.cdap.etl.api.batch.SparkPluginContext) SparkConf(org.apache.spark.SparkConf) HashMap(java.util.HashMap) Map(java.util.Map) BatchConfigurable(co.cask.cdap.etl.api.batch.BatchConfigurable) File(java.io.File) Writer(java.io.Writer)

Example 4 with BatchJoiner

use of co.cask.cdap.etl.api.batch.BatchJoiner in project cdap by caskdata.

the class MapReduceTransformExecutorFactory method getTransformation.

@SuppressWarnings("unchecked")
@Override
protected TrackedTransform getTransformation(StageInfo stageInfo) throws Exception {
    DefaultMacroEvaluator macroEvaluator = new DefaultMacroEvaluator(taskContext.getWorkflowToken(), taskContext.getRuntimeArguments(), taskContext.getLogicalStartTime(), taskContext, taskContext.getNamespace());
    String stageName = stageInfo.getName();
    String pluginType = stageInfo.getPluginType();
    StageMetrics stageMetrics = new DefaultStageMetrics(metrics, stageName);
    if (BatchAggregator.PLUGIN_TYPE.equals(pluginType)) {
        BatchAggregator<?, ?, ?> batchAggregator = pluginInstantiator.newPluginInstance(stageName, macroEvaluator);
        BatchRuntimeContext runtimeContext = createRuntimeContext(stageInfo);
        batchAggregator.initialize(runtimeContext);
        if (isMapPhase) {
            return getTrackedEmitKeyStep(new MapperAggregatorTransformation(batchAggregator, mapOutputKeyClassName, mapOutputValClassName), stageMetrics, taskContext.getDataTracer(stageName));
        } else {
            return getTrackedAggregateStep(new ReducerAggregatorTransformation(batchAggregator, mapOutputKeyClassName, mapOutputValClassName), stageMetrics, taskContext.getDataTracer(stageName));
        }
    } else if (BatchJoiner.PLUGIN_TYPE.equals(pluginType)) {
        BatchJoiner<?, ?, ?> batchJoiner = pluginInstantiator.newPluginInstance(stageName, macroEvaluator);
        BatchJoinerRuntimeContext runtimeContext = createRuntimeContext(stageInfo);
        batchJoiner.initialize(runtimeContext);
        if (isMapPhase) {
            return getTrackedEmitKeyStep(new MapperJoinerTransformation(batchJoiner, mapOutputKeyClassName, mapOutputValClassName), stageMetrics, taskContext.getDataTracer(stageName));
        } else {
            return getTrackedMergeStep(new ReducerJoinerTransformation(batchJoiner, mapOutputKeyClassName, mapOutputValClassName, runtimeContext.getInputSchemas().size()), stageMetrics, taskContext.getDataTracer(stageName));
        }
    }
    Transformation transformation = getInitializedTransformation(stageInfo);
    boolean isLimitingSource = taskContext.getDataTracer(stageName).isEnabled() && BatchSource.PLUGIN_TYPE.equals(pluginType) && isMapPhase;
    return new TrackedTransform(isLimitingSource ? new LimitingTransform(transformation, numberOfRecordsPreview) : transformation, stageMetrics, taskContext.getDataTracer(stageName));
}
Also used : BatchJoinerRuntimeContext(co.cask.cdap.etl.api.batch.BatchJoinerRuntimeContext) TrackedTransform(co.cask.cdap.etl.common.TrackedTransform) Transformation(co.cask.cdap.etl.api.Transformation) LimitingTransform(co.cask.cdap.etl.common.preview.LimitingTransform) BatchJoiner(co.cask.cdap.etl.api.batch.BatchJoiner) BatchRuntimeContext(co.cask.cdap.etl.api.batch.BatchRuntimeContext) DefaultMacroEvaluator(co.cask.cdap.etl.common.DefaultMacroEvaluator) StageMetrics(co.cask.cdap.etl.api.StageMetrics) DefaultStageMetrics(co.cask.cdap.etl.common.DefaultStageMetrics) DefaultStageMetrics(co.cask.cdap.etl.common.DefaultStageMetrics)

Aggregations

BatchJoiner (co.cask.cdap.etl.api.batch.BatchJoiner)4 DefaultMacroEvaluator (co.cask.cdap.etl.common.DefaultMacroEvaluator)4 MacroEvaluator (co.cask.cdap.api.macro.MacroEvaluator)3 StageInfo (co.cask.cdap.etl.planner.StageInfo)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 BatchAggregator (co.cask.cdap.etl.api.batch.BatchAggregator)2 BatchJoinerRuntimeContext (co.cask.cdap.etl.api.batch.BatchJoinerRuntimeContext)2 BatchSinkContext (co.cask.cdap.etl.api.batch.BatchSinkContext)2 BatchSourceContext (co.cask.cdap.etl.api.batch.BatchSourceContext)2 BatchPhaseSpec (co.cask.cdap.etl.batch.BatchPhaseSpec)2 DefaultAggregatorContext (co.cask.cdap.etl.batch.DefaultAggregatorContext)2 DefaultJoinerContext (co.cask.cdap.etl.batch.DefaultJoinerContext)2 CompositeFinisher (co.cask.cdap.etl.common.CompositeFinisher)2 MapReduceContext (co.cask.cdap.api.mapreduce.MapReduceContext)1 PluginContext (co.cask.cdap.api.plugin.PluginContext)1 SparkClientContext (co.cask.cdap.api.spark.SparkClientContext)1 ErrorRecord (co.cask.cdap.etl.api.ErrorRecord)1 JoinElement (co.cask.cdap.etl.api.JoinElement)1 StageMetrics (co.cask.cdap.etl.api.StageMetrics)1