Search in sources :

Example 6 with RecordInfo

use of io.cdap.cdap.etl.common.RecordInfo in project cdap by cdapio.

the class SparkPipelineRunner method runPipeline.

public void runPipeline(PhaseSpec phaseSpec, String sourcePluginType, JavaSparkExecutionContext sec, Map<String, Integer> stagePartitions, PluginContext pluginContext, Map<String, StageStatisticsCollector> collectors, Set<String> uncombinableSinks, boolean consolidateStages, boolean cacheFunctions) throws Exception {
    PipelinePhase pipelinePhase = phaseSpec.getPhase();
    BasicArguments arguments = new BasicArguments(sec);
    FunctionCache.Factory functionCacheFactory = FunctionCache.Factory.newInstance(cacheFunctions);
    MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(arguments, sec.getLogicalStartTime(), sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
    Map<String, EmittedRecords> emittedRecords = new HashMap<>();
    // should never happen, but removes warning
    if (pipelinePhase.getDag() == null) {
        throw new IllegalStateException("Pipeline phase has no connections.");
    }
    Set<String> uncombinableStages = new HashSet<>(uncombinableSinks);
    for (String uncombinableType : UNCOMBINABLE_PLUGIN_TYPES) {
        pipelinePhase.getStagesOfType(uncombinableType).stream().map(StageSpec::getName).forEach(s -> uncombinableStages.add(s));
    }
    CombinerDag groupedDag = new CombinerDag(pipelinePhase.getDag(), uncombinableStages);
    Map<String, Set<String>> groups = consolidateStages ? groupedDag.groupNodes() : Collections.emptyMap();
    if (!groups.isEmpty()) {
        LOG.debug("Stage consolidation is on.");
        int groupNum = 1;
        for (Set<String> group : groups.values()) {
            LOG.debug("Group{}: {}", groupNum, group);
            groupNum++;
        }
    }
    Set<String> branchers = new HashSet<>();
    for (String stageName : groupedDag.getNodes()) {
        if (groupedDag.getNodeOutputs(stageName).size() > 1) {
            branchers.add(stageName);
        }
    }
    Set<String> shufflers = pipelinePhase.getStagesOfType(BatchAggregator.PLUGIN_TYPE).stream().map(StageSpec::getName).collect(Collectors.toSet());
    Collection<Runnable> sinkRunnables = new ArrayList<>();
    for (String stageName : groupedDag.getTopologicalOrder()) {
        if (groups.containsKey(stageName)) {
            sinkRunnables.add(handleGroup(sec, phaseSpec, groups.get(stageName), groupedDag.getNodeInputs(stageName), emittedRecords, collectors));
            continue;
        }
        StageSpec stageSpec = pipelinePhase.getStage(stageName);
        String pluginType = stageSpec.getPluginType();
        EmittedRecords.Builder emittedBuilder = EmittedRecords.builder();
        // don't want to do an additional filter for stages that can emit errors,
        // but aren't connected to an ErrorTransform
        // similarly, don't want to do an additional filter for alerts when the stage isn't connected to
        // an AlertPublisher
        boolean hasErrorOutput = false;
        boolean hasAlertOutput = false;
        Set<String> outputs = pipelinePhase.getStageOutputs(stageName);
        for (String output : outputs) {
            String outputPluginType = pipelinePhase.getStage(output).getPluginType();
            // noinspection ConstantConditions
            if (ErrorTransform.PLUGIN_TYPE.equals(outputPluginType)) {
                hasErrorOutput = true;
            } else if (AlertPublisher.PLUGIN_TYPE.equals(outputPluginType)) {
                hasAlertOutput = true;
            }
        }
        SparkCollection<Object> stageData = null;
        Map<String, SparkCollection<Object>> inputDataCollections = new HashMap<>();
        Set<String> stageInputs = pipelinePhase.getStageInputs(stageName);
        for (String inputStageName : stageInputs) {
            StageSpec inputStageSpec = pipelinePhase.getStage(inputStageName);
            if (inputStageSpec == null) {
                // means the input to this stage is in a separate phase. For example, it is an action.
                continue;
            }
            String port = null;
            // not errors or alerts or output port records
            if (!Constants.Connector.PLUGIN_TYPE.equals(inputStageSpec.getPluginType()) && !Constants.Connector.PLUGIN_TYPE.equals(pluginType)) {
                port = inputStageSpec.getOutputPorts().get(stageName).getPort();
            }
            SparkCollection<Object> inputRecords = port == null ? emittedRecords.get(inputStageName).outputRecords : emittedRecords.get(inputStageName).outputPortRecords.get(port);
            inputDataCollections.put(inputStageName, inputRecords);
        }
        // initialize the stageRDD as the union of all input RDDs.
        if (!inputDataCollections.isEmpty()) {
            Iterator<SparkCollection<Object>> inputCollectionIter = inputDataCollections.values().iterator();
            stageData = inputCollectionIter.next();
            // don't union inputs records if we're joining or if we're processing errors
            while (!BatchJoiner.PLUGIN_TYPE.equals(pluginType) && !ErrorTransform.PLUGIN_TYPE.equals(pluginType) && inputCollectionIter.hasNext()) {
                stageData = stageData.union(inputCollectionIter.next());
            }
        }
        boolean isConnectorSource = Constants.Connector.PLUGIN_TYPE.equals(pluginType) && pipelinePhase.getSources().contains(stageName);
        boolean isConnectorSink = Constants.Connector.PLUGIN_TYPE.equals(pluginType) && pipelinePhase.getSinks().contains(stageName);
        StageStatisticsCollector collector = collectors.get(stageName) == null ? new NoopStageStatisticsCollector() : collectors.get(stageName);
        PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
        if (stageData == null) {
            // null in the other else-if conditions
            if (sourcePluginType.equals(pluginType) || isConnectorSource) {
                SparkCollection<RecordInfo<Object>> combinedData = getSource(stageSpec, functionCacheFactory, collector);
                emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, groupedDag, branchers, shufflers, hasErrorOutput, hasAlertOutput);
            } else {
                throw new IllegalStateException(String.format("Stage '%s' has no input and is not a source.", stageName));
            }
        } else if (BatchSink.PLUGIN_TYPE.equals(pluginType) || isConnectorSink) {
            sinkRunnables.add(stageData.createStoreTask(stageSpec, new BatchSinkFunction(pluginFunctionContext, functionCacheFactory.newCache())));
        } else if (SparkSink.PLUGIN_TYPE.equals(pluginType)) {
            SparkSink<Object> sparkSink = pluginContext.newPluginInstance(stageName, macroEvaluator);
            sinkRunnables.add(stageData.createStoreTask(stageSpec, sparkSink));
        } else if (AlertPublisher.PLUGIN_TYPE.equals(pluginType)) {
            // union all the alerts coming into this stage
            SparkCollection<Alert> inputAlerts = null;
            for (String inputStage : stageInputs) {
                SparkCollection<Alert> inputErrorsFromStage = emittedRecords.get(inputStage).alertRecords;
                if (inputErrorsFromStage == null) {
                    continue;
                }
                if (inputAlerts == null) {
                    inputAlerts = inputErrorsFromStage;
                } else {
                    inputAlerts = inputAlerts.union(inputErrorsFromStage);
                }
            }
            if (inputAlerts != null) {
                inputAlerts.publishAlerts(stageSpec, collector);
            }
        } else if (ErrorTransform.PLUGIN_TYPE.equals(pluginType)) {
            // union all the errors coming into this stage
            SparkCollection<ErrorRecord<Object>> inputErrors = null;
            for (String inputStage : stageInputs) {
                SparkCollection<ErrorRecord<Object>> inputErrorsFromStage = emittedRecords.get(inputStage).errorRecords;
                if (inputErrorsFromStage == null) {
                    continue;
                }
                if (inputErrors == null) {
                    inputErrors = inputErrorsFromStage;
                } else {
                    inputErrors = inputErrors.union(inputErrorsFromStage);
                }
            }
            if (inputErrors != null) {
                SparkCollection<RecordInfo<Object>> combinedData = inputErrors.flatMap(stageSpec, new ErrorTransformFunction<Object, Object>(pluginFunctionContext, functionCacheFactory.newCache()));
                emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, groupedDag, branchers, shufflers, hasErrorOutput, hasAlertOutput);
            }
        } else {
            Object plugin = pluginContext.newPluginInstance(stageName, macroEvaluator);
            Optional<EmittedRecords.Builder> declarativeBuilder = tryRelationalTransform(pipelinePhase, groupedDag, branchers, shufflers, stageName, stageSpec, emittedBuilder, hasErrorOutput, hasAlertOutput, stageData, inputDataCollections, plugin);
            if (declarativeBuilder.isPresent()) {
                emittedBuilder = declarativeBuilder.get();
            } else {
                emittedBuilder = transform(emittedBuilder, stagePartitions, pipelinePhase, functionCacheFactory, groupedDag, branchers, shufflers, stageName, stageSpec, pluginType, hasErrorOutput, hasAlertOutput, stageData, inputDataCollections, collector, pluginFunctionContext, plugin);
            }
        }
        emittedRecords.put(stageName, emittedBuilder.build());
    }
    boolean shouldWriteInParallel = Boolean.parseBoolean(sec.getRuntimeArguments().get("pipeline.spark.parallel.sinks.enabled"));
    if (!shouldWriteInParallel) {
        for (Runnable runnable : sinkRunnables) {
            runnable.run();
        }
        return;
    }
    Collection<Future> sinkFutures = new ArrayList<>(sinkRunnables.size());
    ExecutorService executorService = Executors.newFixedThreadPool(sinkRunnables.size(), new ThreadFactoryBuilder().setNameFormat("pipeline-sink-task").build());
    for (Runnable runnable : sinkRunnables) {
        sinkFutures.add(executorService.submit(runnable));
    }
    Throwable error = null;
    Iterator<Future> futureIter = sinkFutures.iterator();
    for (Future future : sinkFutures) {
        try {
            future.get();
        } catch (ExecutionException e) {
            error = e.getCause();
            break;
        } catch (InterruptedException e) {
            break;
        }
    }
    executorService.shutdownNow();
    if (error != null) {
        throw Throwables.propagate(error);
    }
}
Also used : DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ArrayList(java.util.ArrayList) PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) ExecutionException(java.util.concurrent.ExecutionException) FunctionCache(io.cdap.cdap.etl.spark.function.FunctionCache) HashSet(java.util.HashSet) NoopStageStatisticsCollector(io.cdap.cdap.etl.common.NoopStageStatisticsCollector) RecordInfo(io.cdap.cdap.etl.common.RecordInfo) CombinerDag(io.cdap.cdap.etl.planner.CombinerDag) BatchSinkFunction(io.cdap.cdap.etl.spark.function.BatchSinkFunction) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) NoopStageStatisticsCollector(io.cdap.cdap.etl.common.NoopStageStatisticsCollector) PipelinePhase(io.cdap.cdap.etl.common.PipelinePhase) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ErrorRecord(io.cdap.cdap.etl.api.ErrorRecord)

Example 7 with RecordInfo

use of io.cdap.cdap.etl.common.RecordInfo in project cdap by cdapio.

the class RDDCollection method reduceDatasetAggregate.

/**
 * Performs reduce aggregate using Dataset API. This allows SPARK to perform various optimizations that
 * are not available when working on the RDD level.
 */
private <GROUP_KEY, AGG_VALUE> SparkCollection<RecordInfo<Object>> reduceDatasetAggregate(StageSpec stageSpec, @Nullable Integer partitions, StageStatisticsCollector collector) {
    PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
    DatasetAggregationGetKeyFunction<GROUP_KEY, T, AGG_VALUE> groupByFunction = new DatasetAggregationGetKeyFunction<>(pluginFunctionContext, functionCacheFactory.newCache());
    DatasetAggregationReduceFunction<T, AGG_VALUE> reduceFunction = new DatasetAggregationReduceFunction<>(pluginFunctionContext, functionCacheFactory.newCache());
    DatasetAggregationFinalizeFunction<GROUP_KEY, T, AGG_VALUE, ?> postFunction = new DatasetAggregationFinalizeFunction<>(pluginFunctionContext, functionCacheFactory.newCache());
    MapFunction<Tuple2<GROUP_KEY, DatasetAggregationAccumulator<T, AGG_VALUE>>, GROUP_KEY> keyFromTuple = Tuple2::_1;
    MapFunction<Tuple2<GROUP_KEY, DatasetAggregationAccumulator<T, AGG_VALUE>>, DatasetAggregationAccumulator<T, AGG_VALUE>> valueFromTuple = Tuple2::_2;
    Dataset<T> dataset = sqlContext.createDataset(rdd.rdd(), objectEncoder());
    Dataset<RecordInfo<Object>> groupedDataset = dataset.flatMap(groupByFunction, tupleEncoder()).groupByKey(keyFromTuple, objectEncoder()).mapValues(valueFromTuple, objectEncoder()).reduceGroups(reduceFunction).flatMap(postFunction, objectEncoder());
    if (!ignorePartitionsDuringDatasetAggregation && partitions != null) {
        groupedDataset = groupedDataset.coalesce(partitions);
    }
    return wrap(groupedDataset.toJavaRDD());
}
Also used : DatasetAggregationGetKeyFunction(io.cdap.cdap.etl.spark.function.DatasetAggregationGetKeyFunction) RecordInfo(io.cdap.cdap.etl.common.RecordInfo) PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) Tuple2(scala.Tuple2) DatasetAggregationFinalizeFunction(io.cdap.cdap.etl.spark.function.DatasetAggregationFinalizeFunction) DatasetAggregationReduceFunction(io.cdap.cdap.etl.spark.function.DatasetAggregationReduceFunction) DatasetAggregationAccumulator(io.cdap.cdap.etl.spark.function.DatasetAggregationAccumulator)

Example 8 with RecordInfo

use of io.cdap.cdap.etl.common.RecordInfo in project cdap by cdapio.

the class SparkStreamingPipelineRunner method getSource.

@Override
protected SparkCollection<RecordInfo<Object>> getSource(StageSpec stageSpec, FunctionCache.Factory functionCacheFactory, StageStatisticsCollector collector) throws Exception {
    StreamingSource<Object> source;
    if (checkpointsDisabled) {
        PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
        source = pluginFunctionContext.createPlugin();
    } else {
        // check for macros in any StreamingSource. If checkpoints are enabled,
        // SparkStreaming will serialize all InputDStreams created in the checkpoint, which means
        // the InputDStream is deserialized directly from the checkpoint instead of instantiated through CDAP.
        // This means there isn't any way for us to perform macro evaluation on sources when they are loaded from
        // checkpoints. We can work around this in all other pipeline stages by dynamically instantiating the
        // plugin in all DStream functions, but can't for InputDStreams because the InputDStream constructor
        // adds itself to the context dag. Yay for constructors with global side effects.
        // TODO: (HYDRATOR-1030) figure out how to do this at configure time instead of run time
        MacroEvaluator macroEvaluator = new ErrorMacroEvaluator("Due to spark limitations, macro evaluation is not allowed in streaming sources when checkpointing " + "is enabled.");
        PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), spec.isStageLoggingEnabled(), spec.isProcessTimingEnabled());
        source = pluginContext.newPluginInstance(stageSpec.getName(), macroEvaluator);
    }
    DataTracer dataTracer = sec.getDataTracer(stageSpec.getName());
    StreamingContext sourceContext = new DefaultStreamingContext(stageSpec, sec, streamingContext);
    JavaDStream<Object> javaDStream = source.getStream(sourceContext);
    if (dataTracer.isEnabled()) {
        // it will create a new function for each RDD, which would limit each RDD but not the entire DStream.
        javaDStream = javaDStream.transform(new LimitingFunction<>(spec.getNumOfRecordsPreview()));
    }
    JavaDStream<RecordInfo<Object>> outputDStream = javaDStream.transform(new CountingTransformFunction<>(stageSpec.getName(), sec.getMetrics(), "records.out", dataTracer)).map(new WrapOutputTransformFunction<>(stageSpec.getName()));
    return new DStreamCollection<>(sec, functionCacheFactory, outputDStream);
}
Also used : DStreamCollection(io.cdap.cdap.etl.spark.streaming.DStreamCollection) PairDStreamCollection(io.cdap.cdap.etl.spark.streaming.PairDStreamCollection) StreamingContext(io.cdap.cdap.etl.api.streaming.StreamingContext) JavaStreamingContext(org.apache.spark.streaming.api.java.JavaStreamingContext) DefaultStreamingContext(io.cdap.cdap.etl.spark.streaming.DefaultStreamingContext) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginContext(io.cdap.cdap.api.plugin.PluginContext) RecordInfo(io.cdap.cdap.etl.common.RecordInfo) CountingTransformFunction(io.cdap.cdap.etl.spark.streaming.function.CountingTransformFunction) DefaultStreamingContext(io.cdap.cdap.etl.spark.streaming.DefaultStreamingContext) PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) DataTracer(io.cdap.cdap.api.preview.DataTracer) LimitingFunction(io.cdap.cdap.etl.spark.streaming.function.preview.LimitingFunction)

Example 9 with RecordInfo

use of io.cdap.cdap.etl.common.RecordInfo in project cdap by caskdata.

the class SparkPipelineRunner method runPipeline.

public void runPipeline(PhaseSpec phaseSpec, String sourcePluginType, JavaSparkExecutionContext sec, Map<String, Integer> stagePartitions, PluginContext pluginContext, Map<String, StageStatisticsCollector> collectors, Set<String> uncombinableSinks, boolean consolidateStages, boolean cacheFunctions) throws Exception {
    PipelinePhase pipelinePhase = phaseSpec.getPhase();
    BasicArguments arguments = new BasicArguments(sec);
    FunctionCache.Factory functionCacheFactory = FunctionCache.Factory.newInstance(cacheFunctions);
    MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(arguments, sec.getLogicalStartTime(), sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
    Map<String, EmittedRecords> emittedRecords = new HashMap<>();
    // should never happen, but removes warning
    if (pipelinePhase.getDag() == null) {
        throw new IllegalStateException("Pipeline phase has no connections.");
    }
    Set<String> uncombinableStages = new HashSet<>(uncombinableSinks);
    for (String uncombinableType : UNCOMBINABLE_PLUGIN_TYPES) {
        pipelinePhase.getStagesOfType(uncombinableType).stream().map(StageSpec::getName).forEach(s -> uncombinableStages.add(s));
    }
    CombinerDag groupedDag = new CombinerDag(pipelinePhase.getDag(), uncombinableStages);
    Map<String, Set<String>> groups = consolidateStages ? groupedDag.groupNodes() : Collections.emptyMap();
    if (!groups.isEmpty()) {
        LOG.debug("Stage consolidation is on.");
        int groupNum = 1;
        for (Set<String> group : groups.values()) {
            LOG.debug("Group{}: {}", groupNum, group);
            groupNum++;
        }
    }
    Set<String> branchers = new HashSet<>();
    for (String stageName : groupedDag.getNodes()) {
        if (groupedDag.getNodeOutputs(stageName).size() > 1) {
            branchers.add(stageName);
        }
    }
    Set<String> shufflers = pipelinePhase.getStagesOfType(BatchAggregator.PLUGIN_TYPE).stream().map(StageSpec::getName).collect(Collectors.toSet());
    Collection<Runnable> sinkRunnables = new ArrayList<>();
    for (String stageName : groupedDag.getTopologicalOrder()) {
        if (groups.containsKey(stageName)) {
            sinkRunnables.add(handleGroup(sec, phaseSpec, groups.get(stageName), groupedDag.getNodeInputs(stageName), emittedRecords, collectors));
            continue;
        }
        StageSpec stageSpec = pipelinePhase.getStage(stageName);
        String pluginType = stageSpec.getPluginType();
        EmittedRecords.Builder emittedBuilder = EmittedRecords.builder();
        // don't want to do an additional filter for stages that can emit errors,
        // but aren't connected to an ErrorTransform
        // similarly, don't want to do an additional filter for alerts when the stage isn't connected to
        // an AlertPublisher
        boolean hasErrorOutput = false;
        boolean hasAlertOutput = false;
        Set<String> outputs = pipelinePhase.getStageOutputs(stageName);
        for (String output : outputs) {
            String outputPluginType = pipelinePhase.getStage(output).getPluginType();
            // noinspection ConstantConditions
            if (ErrorTransform.PLUGIN_TYPE.equals(outputPluginType)) {
                hasErrorOutput = true;
            } else if (AlertPublisher.PLUGIN_TYPE.equals(outputPluginType)) {
                hasAlertOutput = true;
            }
        }
        SparkCollection<Object> stageData = null;
        Map<String, SparkCollection<Object>> inputDataCollections = new HashMap<>();
        Set<String> stageInputs = pipelinePhase.getStageInputs(stageName);
        for (String inputStageName : stageInputs) {
            StageSpec inputStageSpec = pipelinePhase.getStage(inputStageName);
            if (inputStageSpec == null) {
                // means the input to this stage is in a separate phase. For example, it is an action.
                continue;
            }
            String port = null;
            // not errors or alerts or output port records
            if (!Constants.Connector.PLUGIN_TYPE.equals(inputStageSpec.getPluginType()) && !Constants.Connector.PLUGIN_TYPE.equals(pluginType)) {
                port = inputStageSpec.getOutputPorts().get(stageName).getPort();
            }
            SparkCollection<Object> inputRecords = port == null ? emittedRecords.get(inputStageName).outputRecords : emittedRecords.get(inputStageName).outputPortRecords.get(port);
            inputDataCollections.put(inputStageName, inputRecords);
        }
        // initialize the stageRDD as the union of all input RDDs.
        if (!inputDataCollections.isEmpty()) {
            Iterator<SparkCollection<Object>> inputCollectionIter = inputDataCollections.values().iterator();
            stageData = inputCollectionIter.next();
            // don't union inputs records if we're joining or if we're processing errors
            while (!BatchJoiner.PLUGIN_TYPE.equals(pluginType) && !ErrorTransform.PLUGIN_TYPE.equals(pluginType) && inputCollectionIter.hasNext()) {
                stageData = stageData.union(inputCollectionIter.next());
            }
        }
        boolean isConnectorSource = Constants.Connector.PLUGIN_TYPE.equals(pluginType) && pipelinePhase.getSources().contains(stageName);
        boolean isConnectorSink = Constants.Connector.PLUGIN_TYPE.equals(pluginType) && pipelinePhase.getSinks().contains(stageName);
        StageStatisticsCollector collector = collectors.get(stageName) == null ? new NoopStageStatisticsCollector() : collectors.get(stageName);
        PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
        if (stageData == null) {
            // null in the other else-if conditions
            if (sourcePluginType.equals(pluginType) || isConnectorSource) {
                SparkCollection<RecordInfo<Object>> combinedData = getSource(stageSpec, functionCacheFactory, collector);
                emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, groupedDag, branchers, shufflers, hasErrorOutput, hasAlertOutput);
            } else {
                throw new IllegalStateException(String.format("Stage '%s' has no input and is not a source.", stageName));
            }
        } else if (BatchSink.PLUGIN_TYPE.equals(pluginType) || isConnectorSink) {
            sinkRunnables.add(stageData.createStoreTask(stageSpec, new BatchSinkFunction(pluginFunctionContext, functionCacheFactory.newCache())));
        } else if (SparkSink.PLUGIN_TYPE.equals(pluginType)) {
            SparkSink<Object> sparkSink = pluginContext.newPluginInstance(stageName, macroEvaluator);
            sinkRunnables.add(stageData.createStoreTask(stageSpec, sparkSink));
        } else if (AlertPublisher.PLUGIN_TYPE.equals(pluginType)) {
            // union all the alerts coming into this stage
            SparkCollection<Alert> inputAlerts = null;
            for (String inputStage : stageInputs) {
                SparkCollection<Alert> inputErrorsFromStage = emittedRecords.get(inputStage).alertRecords;
                if (inputErrorsFromStage == null) {
                    continue;
                }
                if (inputAlerts == null) {
                    inputAlerts = inputErrorsFromStage;
                } else {
                    inputAlerts = inputAlerts.union(inputErrorsFromStage);
                }
            }
            if (inputAlerts != null) {
                inputAlerts.publishAlerts(stageSpec, collector);
            }
        } else if (ErrorTransform.PLUGIN_TYPE.equals(pluginType)) {
            // union all the errors coming into this stage
            SparkCollection<ErrorRecord<Object>> inputErrors = null;
            for (String inputStage : stageInputs) {
                SparkCollection<ErrorRecord<Object>> inputErrorsFromStage = emittedRecords.get(inputStage).errorRecords;
                if (inputErrorsFromStage == null) {
                    continue;
                }
                if (inputErrors == null) {
                    inputErrors = inputErrorsFromStage;
                } else {
                    inputErrors = inputErrors.union(inputErrorsFromStage);
                }
            }
            if (inputErrors != null) {
                SparkCollection<RecordInfo<Object>> combinedData = inputErrors.flatMap(stageSpec, new ErrorTransformFunction<Object, Object>(pluginFunctionContext, functionCacheFactory.newCache()));
                emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, groupedDag, branchers, shufflers, hasErrorOutput, hasAlertOutput);
            }
        } else {
            Object plugin = pluginContext.newPluginInstance(stageName, macroEvaluator);
            Optional<EmittedRecords.Builder> declarativeBuilder = tryRelationalTransform(pipelinePhase, groupedDag, branchers, shufflers, stageName, stageSpec, emittedBuilder, hasErrorOutput, hasAlertOutput, stageData, inputDataCollections, plugin);
            if (declarativeBuilder.isPresent()) {
                emittedBuilder = declarativeBuilder.get();
            } else {
                emittedBuilder = transform(emittedBuilder, stagePartitions, pipelinePhase, functionCacheFactory, groupedDag, branchers, shufflers, stageName, stageSpec, pluginType, hasErrorOutput, hasAlertOutput, stageData, inputDataCollections, collector, pluginFunctionContext, plugin);
            }
        }
        emittedRecords.put(stageName, emittedBuilder.build());
    }
    boolean shouldWriteInParallel = Boolean.parseBoolean(sec.getRuntimeArguments().get("pipeline.spark.parallel.sinks.enabled"));
    if (!shouldWriteInParallel) {
        for (Runnable runnable : sinkRunnables) {
            runnable.run();
        }
        return;
    }
    Collection<Future> sinkFutures = new ArrayList<>(sinkRunnables.size());
    ExecutorService executorService = Executors.newFixedThreadPool(sinkRunnables.size(), new ThreadFactoryBuilder().setNameFormat("pipeline-sink-task").build());
    for (Runnable runnable : sinkRunnables) {
        sinkFutures.add(executorService.submit(runnable));
    }
    Throwable error = null;
    Iterator<Future> futureIter = sinkFutures.iterator();
    for (Future future : sinkFutures) {
        try {
            future.get();
        } catch (ExecutionException e) {
            error = e.getCause();
            break;
        } catch (InterruptedException e) {
            break;
        }
    }
    executorService.shutdownNow();
    if (error != null) {
        throw Throwables.propagate(error);
    }
}
Also used : DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ArrayList(java.util.ArrayList) PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) ExecutionException(java.util.concurrent.ExecutionException) FunctionCache(io.cdap.cdap.etl.spark.function.FunctionCache) HashSet(java.util.HashSet) NoopStageStatisticsCollector(io.cdap.cdap.etl.common.NoopStageStatisticsCollector) RecordInfo(io.cdap.cdap.etl.common.RecordInfo) CombinerDag(io.cdap.cdap.etl.planner.CombinerDag) BatchSinkFunction(io.cdap.cdap.etl.spark.function.BatchSinkFunction) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) NoopStageStatisticsCollector(io.cdap.cdap.etl.common.NoopStageStatisticsCollector) PipelinePhase(io.cdap.cdap.etl.common.PipelinePhase) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) ErrorRecord(io.cdap.cdap.etl.api.ErrorRecord)

Example 10 with RecordInfo

use of io.cdap.cdap.etl.common.RecordInfo in project cdap by caskdata.

the class BatchSparkPipelineDriver method getSource.

@Override
protected SparkCollection<RecordInfo<Object>> getSource(StageSpec stageSpec, FunctionCache.Factory functionCacheFactory, StageStatisticsCollector collector) {
    PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
    FlatMapFunction<Tuple2<Object, Object>, RecordInfo<Object>> sourceFunction = new BatchSourceFunction(pluginFunctionContext, functionCacheFactory.newCache());
    this.functionCacheFactory = functionCacheFactory;
    return new RDDCollection<>(sec, functionCacheFactory, jsc, new SQLContext(jsc), datasetContext, sinkFactory, sourceFactory.createRDD(sec, jsc, stageSpec.getName(), Object.class, Object.class).flatMap(sourceFunction));
}
Also used : PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) BatchSourceFunction(io.cdap.cdap.etl.spark.function.BatchSourceFunction) RecordInfo(io.cdap.cdap.etl.common.RecordInfo) Tuple2(scala.Tuple2) SQLContext(org.apache.spark.sql.SQLContext)

Aggregations

RecordInfo (io.cdap.cdap.etl.common.RecordInfo)14 PluginFunctionContext (io.cdap.cdap.etl.spark.function.PluginFunctionContext)12 Tuple2 (scala.Tuple2)8 MacroEvaluator (io.cdap.cdap.api.macro.MacroEvaluator)4 ImmutableSet (com.google.common.collect.ImmutableSet)2 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)2 PluginContext (io.cdap.cdap.api.plugin.PluginContext)2 DataTracer (io.cdap.cdap.api.preview.DataTracer)2 Alert (io.cdap.cdap.etl.api.Alert)2 ErrorRecord (io.cdap.cdap.etl.api.ErrorRecord)2 StreamingContext (io.cdap.cdap.etl.api.streaming.StreamingContext)2 BasicArguments (io.cdap.cdap.etl.common.BasicArguments)2 DefaultMacroEvaluator (io.cdap.cdap.etl.common.DefaultMacroEvaluator)2 NoopStageStatisticsCollector (io.cdap.cdap.etl.common.NoopStageStatisticsCollector)2 PipelinePhase (io.cdap.cdap.etl.common.PipelinePhase)2 StageStatisticsCollector (io.cdap.cdap.etl.common.StageStatisticsCollector)2 CombinerDag (io.cdap.cdap.etl.planner.CombinerDag)2 StageSpec (io.cdap.cdap.etl.proto.v2.spec.StageSpec)2 AggregatorAggregateFunction (io.cdap.cdap.etl.spark.function.AggregatorAggregateFunction)2 AggregatorFinalizeFunction (io.cdap.cdap.etl.spark.function.AggregatorFinalizeFunction)2