Search in sources :

Example 1 with BasicArguments

use of io.cdap.cdap.etl.common.BasicArguments in project cdap by caskdata.

the class SmartWorkflow method updateTokenWithTriggeringProperties.

private void updateTokenWithTriggeringProperties(TriggeringScheduleInfo scheduleInfo, TriggeringPropertyMapping propertiesMapping, WorkflowToken token) {
    List<ProgramStatusTriggerInfo> programStatusTriggerInfos = new ArrayList<>();
    for (TriggerInfo info : scheduleInfo.getTriggerInfos()) {
        if (info instanceof ProgramStatusTriggerInfo) {
            programStatusTriggerInfos.add((ProgramStatusTriggerInfo) info);
        }
    }
    // If no ProgramStatusTriggerInfo, no need of override the existing runtimeArgs
    if (programStatusTriggerInfos.isEmpty()) {
        return;
    }
    // Currently only expecting one trigger in a schedule
    ProgramStatusTriggerInfo triggerInfo = programStatusTriggerInfos.get(0);
    BasicArguments triggeringArguments = new BasicArguments(triggerInfo.getWorkflowToken(), triggerInfo.getRuntimeArguments());
    // Get the value of every triggering pipeline arguments specified in the propertiesMapping and update newRuntimeArgs
    List<ArgumentMapping> argumentMappings = propertiesMapping.getArguments();
    for (ArgumentMapping mapping : argumentMappings) {
        String sourceKey = mapping.getSource();
        if (sourceKey == null) {
            LOG.warn("The name of argument from the triggering pipeline cannot be null, " + "skip this argument mapping: '{}'.", mapping);
            continue;
        }
        String value = triggeringArguments.get(sourceKey);
        if (value == null) {
            LOG.warn("Runtime argument '{}' is not found in run '{}' of the triggering pipeline '{}' " + "in namespace '{}' ", sourceKey, triggerInfo.getRunId(), triggerInfo.getApplicationName(), triggerInfo.getNamespace());
            continue;
        }
        // Use the argument name in the triggering pipeline if target is not specified
        String targetKey = mapping.getTarget() == null ? sourceKey : mapping.getTarget();
        token.put(targetKey, value);
    }
    // Get the resolved plugin properties map from triggering pipeline's workflow token in triggeringArguments
    Map<String, Map<String, String>> resolvedProperties = GSON.fromJson(triggeringArguments.get(RESOLVED_PLUGIN_PROPERTIES_MAP), STAGE_PROPERTIES_MAP);
    for (PluginPropertyMapping mapping : propertiesMapping.getPluginProperties()) {
        String stageName = mapping.getStageName();
        if (stageName == null) {
            LOG.warn("The name of the stage cannot be null in plugin property mapping, skip this mapping: '{}'.", mapping);
            continue;
        }
        Map<String, String> pluginProperties = resolvedProperties.get(stageName);
        if (pluginProperties == null) {
            LOG.warn("No plugin properties can be found with stage name '{}' in triggering pipeline '{}' " + "in namespace '{}' ", mapping.getStageName(), triggerInfo.getApplicationName(), triggerInfo.getNamespace());
            continue;
        }
        String sourceKey = mapping.getSource();
        if (sourceKey == null) {
            LOG.warn("The name of argument from the triggering pipeline cannot be null, " + "skip this argument mapping: '{}'.", mapping);
            continue;
        }
        String value = pluginProperties.get(sourceKey);
        if (value == null) {
            LOG.warn("No property with name '{}' can be found in plugin '{}' of the triggering pipeline '{}' " + "in namespace '{}' ", sourceKey, stageName, triggerInfo.getApplicationName(), triggerInfo.getNamespace());
            continue;
        }
        // Use the argument name in the triggering pipeline if target is not specified
        String targetKey = mapping.getTarget() == null ? sourceKey : mapping.getTarget();
        token.put(targetKey, value);
    }
}
Also used : ArgumentMapping(io.cdap.cdap.etl.proto.v2.ArgumentMapping) ProgramStatusTriggerInfo(io.cdap.cdap.api.schedule.ProgramStatusTriggerInfo) ArrayList(java.util.ArrayList) TriggerInfo(io.cdap.cdap.api.schedule.TriggerInfo) ProgramStatusTriggerInfo(io.cdap.cdap.api.schedule.ProgramStatusTriggerInfo) PluginPropertyMapping(io.cdap.cdap.etl.proto.v2.PluginPropertyMapping) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Example 2 with BasicArguments

use of io.cdap.cdap.etl.common.BasicArguments in project cdap by caskdata.

the class BatchSparkPipelineDriver method run.

@Override
public void run(DatasetContext context) throws Exception {
    BatchPhaseSpec phaseSpec = GSON.fromJson(sec.getSpecification().getProperty(Constants.PIPELINEID), BatchPhaseSpec.class);
    Path configFile = sec.getLocalizationContext().getLocalFile("HydratorSpark.config").toPath();
    try (BufferedReader reader = Files.newBufferedReader(configFile, StandardCharsets.UTF_8)) {
        String object = reader.readLine();
        SparkBatchSourceSinkFactoryInfo sourceSinkInfo = GSON.fromJson(object, SparkBatchSourceSinkFactoryInfo.class);
        sourceFactory = sourceSinkInfo.getSparkBatchSourceFactory();
        sinkFactory = sourceSinkInfo.getSparkBatchSinkFactory();
        stagePartitions = sourceSinkInfo.getStagePartitions();
    }
    datasetContext = context;
    PipelinePluginContext pluginContext = new PipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
    Map<String, StageStatisticsCollector> collectors = new HashMap<>();
    if (phaseSpec.pipelineContainsCondition()) {
        Iterator<StageSpec> iterator = phaseSpec.getPhase().iterator();
        while (iterator.hasNext()) {
            StageSpec spec = iterator.next();
            collectors.put(spec.getName(), new SparkStageStatisticsCollector(jsc));
        }
    }
    boolean isSuccessful = true;
    try {
        PipelinePluginInstantiator pluginInstantiator = new PipelinePluginInstantiator(pluginContext, sec.getMetrics(), phaseSpec, new SingleConnectorFactory());
        boolean shouldConsolidateStages = Boolean.parseBoolean(sec.getRuntimeArguments().getOrDefault(Constants.CONSOLIDATE_STAGES, Boolean.TRUE.toString()));
        boolean shouldCacheFunctions = Boolean.parseBoolean(sec.getRuntimeArguments().getOrDefault(Constants.CACHE_FUNCTIONS, Boolean.TRUE.toString()));
        boolean isPreviewEnabled = phaseSpec.getPhase().size() == 0 || sec.getDataTracer(phaseSpec.getPhase().iterator().next().getName()).isEnabled();
        // Initialize SQL engine instance if needed.
        if (!isPreviewEnabled && phaseSpec.getSQLEngineStageSpec() != null) {
            String sqlEngineStage = SQLEngineUtils.buildStageName(phaseSpec.getSQLEngineStageSpec().getPlugin().getName());
            // Instantiate SQL engine and prepare run.
            try {
                MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(new BasicArguments(sec), sec.getLogicalStartTime(), sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
                Object instance = pluginInstantiator.newPluginInstance(sqlEngineStage, macroEvaluator);
                sqlEngineAdapter = new BatchSQLEngineAdapter((SQLEngine<?, ?, ?, ?>) instance, sec, jsc, collectors);
                sqlEngineAdapter.prepareRun();
            } catch (InstantiationException ie) {
                LOG.error("Could not create plugin instance for SQLEngine class", ie);
            } finally {
                if (sqlEngineAdapter == null) {
                    LOG.warn("Could not instantiate SQLEngine instance for Transformation Pushdown");
                }
            }
        }
        runPipeline(phaseSpec, BatchSource.PLUGIN_TYPE, sec, stagePartitions, pluginInstantiator, collectors, sinkFactory.getUncombinableSinks(), shouldConsolidateStages, shouldCacheFunctions);
    } catch (Throwable t) {
        // Mark this execution as not successful.
        isSuccessful = false;
        // Rethrow
        throw t;
    } finally {
        updateWorkflowToken(sec.getWorkflowToken(), collectors);
        // Close SQL Engine Adapter if neeeded,
        if (sqlEngineAdapter != null) {
            sqlEngineAdapter.onRunFinish(isSuccessful);
            sqlEngineAdapter.close();
        }
    }
}
Also used : Path(java.nio.file.Path) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) HashMap(java.util.HashMap) SingleConnectorFactory(io.cdap.cdap.etl.batch.connector.SingleConnectorFactory) SparkStageStatisticsCollector(io.cdap.cdap.etl.spark.SparkStageStatisticsCollector) SparkStageStatisticsCollector(io.cdap.cdap.etl.spark.SparkStageStatisticsCollector) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) SQLEngine(io.cdap.cdap.etl.api.engine.sql.SQLEngine) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) BufferedReader(java.io.BufferedReader) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) BatchPhaseSpec(io.cdap.cdap.etl.batch.BatchPhaseSpec) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) PipelinePluginInstantiator(io.cdap.cdap.etl.batch.PipelinePluginInstantiator) PipelinePluginContext(io.cdap.cdap.etl.common.plugin.PipelinePluginContext)

Example 3 with BasicArguments

use of io.cdap.cdap.etl.common.BasicArguments in project cdap by caskdata.

the class StreamingMultiSinkFunction method call.

@Override
public void call(JavaRDD<RecordInfo<Object>> data, Time batchTime) throws Exception {
    long logicalStartTime = batchTime.milliseconds();
    MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
    PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
    SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
    PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, logicalStartTime);
    Map<String, SubmitterLifecycle<?>> stages = createStages(evaluator);
    // call prepareRun() on all the stages in the group
    // need to call it in an order that guarantees that inputs are called before outputs
    // this is because plugins can call getArguments().set() in the prepareRun() method,
    // which downstream stages should be able to read
    List<String> traversalOrder = new ArrayList(group.size());
    for (String stageName : phaseSpec.getPhase().getDag().getTopologicalOrder()) {
        if (group.contains(stageName)) {
            traversalOrder.add(stageName);
        }
    }
    for (String stageName : traversalOrder) {
        SubmitterLifecycle<?> plugin = stages.get(stageName);
        StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
        try {
            prepareRun(pipelineRuntime, sinkFactory, stageSpec, plugin);
        } catch (Exception e) {
            LOG.error("Error preparing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
            return;
        }
    }
    // run the actual transforms and sinks in this group
    boolean ranSuccessfully = true;
    try {
        MultiSinkFunction multiSinkFunction = new MultiSinkFunction(sec, phaseSpec, group, collectors);
        Set<String> outputNames = sinkFactory.writeCombinedRDD(data.flatMapToPair(multiSinkFunction), sec, sinkNames);
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext context) throws Exception {
                for (String outputName : outputNames) {
                    ExternalDatasets.registerLineage(sec.getAdmin(), outputName, AccessType.WRITE, null, () -> context.getDataset(outputName));
                }
            }
        });
    } catch (Exception e) {
        LOG.error("Error writing to sinks {} for the batch for time {}.", sinkNames, logicalStartTime, e);
        ranSuccessfully = false;
    }
    // run onRunFinish() for each sink
    for (String stageName : traversalOrder) {
        SubmitterLifecycle<?> plugin = stages.get(stageName);
        StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
        try {
            onRunFinish(pipelineRuntime, sinkFactory, stageSpec, plugin, ranSuccessfully);
        } catch (Exception e) {
            LOG.warn("Unable to execute onRunFinish for sink {}", stageName, e);
        }
    }
}
Also used : SubmitterLifecycle(io.cdap.cdap.etl.api.SubmitterLifecycle) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) PipelineRuntime(io.cdap.cdap.etl.common.PipelineRuntime) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginContext(io.cdap.cdap.api.plugin.PluginContext) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) ArrayList(java.util.ArrayList) MultiSinkFunction(io.cdap.cdap.etl.spark.function.MultiSinkFunction) TransactionFailureException(org.apache.tephra.TransactionFailureException) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) SparkBatchSinkFactory(io.cdap.cdap.etl.spark.batch.SparkBatchSinkFactory) TxRunnable(io.cdap.cdap.api.TxRunnable) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) DatasetContext(io.cdap.cdap.api.data.DatasetContext)

Example 4 with BasicArguments

use of io.cdap.cdap.etl.common.BasicArguments in project cdap by caskdata.

the class StreamingBatchSinkFunction method call.

@Override
public void call(JavaRDD<T> data, Time batchTime) throws Exception {
    final long logicalStartTime = batchTime.milliseconds();
    MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
    PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), stageSpec.isStageLoggingEnabled(), stageSpec.isProcessTimingEnabled());
    final SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
    final String stageName = stageSpec.getName();
    final BatchSink<Object, Object, Object> batchSink = pluginContext.newPluginInstance(stageName, evaluator);
    final PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, logicalStartTime);
    boolean isPrepared = false;
    boolean isDone = false;
    try {
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext datasetContext) throws Exception {
                SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
                batchSink.prepareRun(sinkContext);
            }
        });
        isPrepared = true;
        PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, pipelineRuntime.getArguments().asMap(), batchTime.milliseconds(), new NoopStageStatisticsCollector());
        Set<String> outputNames = sinkFactory.writeFromRDD(data.flatMapToPair(new BatchSinkFunction<T, Object, Object>(pluginFunctionContext, functionCache)), sec, stageName);
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext context) throws Exception {
                for (String outputName : outputNames) {
                    ExternalDatasets.registerLineage(sec.getAdmin(), outputName, AccessType.WRITE, null, () -> context.getDataset(outputName));
                }
            }
        });
        isDone = true;
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext datasetContext) throws Exception {
                SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
                batchSink.onRunFinish(true, sinkContext);
            }
        });
    } catch (Exception e) {
        LOG.error("Error writing to sink {} for the batch for time {}.", stageName, logicalStartTime, e);
    } finally {
        if (isPrepared && !isDone) {
            sec.execute(new TxRunnable() {

                @Override
                public void run(DatasetContext datasetContext) throws Exception {
                    SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
                    batchSink.onRunFinish(false, sinkContext);
                }
            });
        }
    }
}
Also used : NoopStageStatisticsCollector(io.cdap.cdap.etl.common.NoopStageStatisticsCollector) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) PipelineRuntime(io.cdap.cdap.etl.common.PipelineRuntime) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginContext(io.cdap.cdap.api.plugin.PluginContext) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) SparkBatchSinkContext(io.cdap.cdap.etl.spark.batch.SparkBatchSinkContext) BatchSinkFunction(io.cdap.cdap.etl.spark.function.BatchSinkFunction) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) SparkBatchSinkFactory(io.cdap.cdap.etl.spark.batch.SparkBatchSinkFactory) TxRunnable(io.cdap.cdap.api.TxRunnable) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) DatasetContext(io.cdap.cdap.api.data.DatasetContext)

Example 5 with BasicArguments

use of io.cdap.cdap.etl.common.BasicArguments in project cdap by caskdata.

the class StreamingSparkSinkFunction method call.

@Override
public void call(JavaRDD<T> data, Time batchTime) throws Exception {
    if (data.isEmpty()) {
        return;
    }
    final long logicalStartTime = batchTime.milliseconds();
    MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
    final PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), stageSpec.isStageLoggingEnabled(), stageSpec.isProcessTimingEnabled());
    final PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, batchTime.milliseconds());
    final String stageName = stageSpec.getName();
    final SparkSink<T> sparkSink = pluginContext.newPluginInstance(stageName, evaluator);
    boolean isPrepared = false;
    boolean isDone = false;
    try {
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext datasetContext) throws Exception {
                SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
                sparkSink.prepareRun(context);
            }
        });
        isPrepared = true;
        final SparkExecutionPluginContext sparkExecutionPluginContext = new SparkStreamingExecutionContext(sec, JavaSparkContext.fromSparkContext(data.rdd().context()), logicalStartTime, stageSpec);
        final JavaRDD<T> countedRDD = data.map(new CountingFunction<T>(stageName, sec.getMetrics(), "records.in", null)).cache();
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext context) throws Exception {
                sparkSink.run(sparkExecutionPluginContext, countedRDD);
            }
        });
        isDone = true;
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext datasetContext) throws Exception {
                SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
                sparkSink.onRunFinish(true, context);
            }
        });
    } catch (Exception e) {
        LOG.error("Error while executing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
    } finally {
        if (isPrepared && !isDone) {
            sec.execute(new TxRunnable() {

                @Override
                public void run(DatasetContext datasetContext) throws Exception {
                    SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
                    sparkSink.onRunFinish(false, context);
                }
            });
        }
    }
}
Also used : DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) PipelineRuntime(io.cdap.cdap.etl.common.PipelineRuntime) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginContext(io.cdap.cdap.api.plugin.PluginContext) BasicSparkPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkPluginContext) SparkExecutionPluginContext(io.cdap.cdap.etl.api.batch.SparkExecutionPluginContext) SparkPluginContext(io.cdap.cdap.etl.api.batch.SparkPluginContext) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) SparkStreamingExecutionContext(io.cdap.cdap.etl.spark.streaming.SparkStreamingExecutionContext) CountingFunction(io.cdap.cdap.etl.spark.function.CountingFunction) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) SparkExecutionPluginContext(io.cdap.cdap.etl.api.batch.SparkExecutionPluginContext) TxRunnable(io.cdap.cdap.api.TxRunnable) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) DatasetContext(io.cdap.cdap.api.data.DatasetContext) BasicSparkPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkPluginContext) SparkPluginContext(io.cdap.cdap.etl.api.batch.SparkPluginContext) BasicSparkPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkPluginContext)

Aggregations

BasicArguments (io.cdap.cdap.etl.common.BasicArguments)14 MacroEvaluator (io.cdap.cdap.api.macro.MacroEvaluator)13 DefaultMacroEvaluator (io.cdap.cdap.etl.common.DefaultMacroEvaluator)13 StageSpec (io.cdap.cdap.etl.proto.v2.spec.StageSpec)6 PluginContext (io.cdap.cdap.api.plugin.PluginContext)5 PipelineRuntime (io.cdap.cdap.etl.common.PipelineRuntime)5 SparkPipelinePluginContext (io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext)5 HashMap (java.util.HashMap)5 Map (java.util.Map)5 MacroParserOptions (io.cdap.cdap.api.macro.MacroParserOptions)4 BatchPhaseSpec (io.cdap.cdap.etl.batch.BatchPhaseSpec)4 OAuthMacroEvaluator (io.cdap.cdap.etl.common.OAuthMacroEvaluator)4 SecureStoreMacroEvaluator (io.cdap.cdap.etl.common.SecureStoreMacroEvaluator)4 TxRunnable (io.cdap.cdap.api.TxRunnable)3 DatasetContext (io.cdap.cdap.api.data.DatasetContext)3 PipelinePluginContext (io.cdap.cdap.etl.common.plugin.PipelinePluginContext)3 SparkPipelineRuntime (io.cdap.cdap.etl.spark.SparkPipelineRuntime)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 Gson (com.google.gson.Gson)2 GsonBuilder (com.google.gson.GsonBuilder)2