Search in sources :

Example 6 with DefaultStageMetrics

use of io.cdap.cdap.etl.common.DefaultStageMetrics in project cdap by cdapio.

the class PipelinePluginContext method wrapPlugin.

private Object wrapPlugin(String pluginId, Object plugin) {
    Caller caller = getCaller(pluginId);
    StageMetrics stageMetrics = new DefaultStageMetrics(metrics, pluginId);
    OperationTimer operationTimer = processTimingEnabled ? new MetricsOperationTimer(stageMetrics) : NoOpOperationTimer.INSTANCE;
    if (plugin instanceof Action) {
        return new WrappedAction((Action) plugin, caller);
    } else if (plugin instanceof BatchSource) {
        return new WrappedBatchSource<>((BatchSource) plugin, caller, operationTimer);
    } else if (plugin instanceof BatchSink) {
        return new WrappedBatchSink<>((BatchSink) plugin, caller, operationTimer);
    } else if (plugin instanceof ErrorTransform) {
        return new WrappedErrorTransform<>((ErrorTransform) plugin, caller, operationTimer);
    } else if (plugin instanceof Transform) {
        return new WrappedTransform<>((Transform) plugin, caller, operationTimer);
    } else if (plugin instanceof BatchReducibleAggregator) {
        return new WrappedReduceAggregator<>((BatchReducibleAggregator) plugin, caller, operationTimer);
    } else if (plugin instanceof BatchAggregator) {
        return new WrappedBatchAggregator<>((BatchAggregator) plugin, caller, operationTimer);
    } else if (plugin instanceof BatchJoiner) {
        return new WrappedBatchJoiner<>((BatchJoiner) plugin, caller, operationTimer);
    } else if (plugin instanceof PostAction) {
        return new WrappedPostAction((PostAction) plugin, caller);
    } else if (plugin instanceof SplitterTransform) {
        return new WrappedSplitterTransform<>((SplitterTransform) plugin, caller, operationTimer);
    }
    return wrapUnknownPlugin(pluginId, plugin, caller);
}
Also used : Action(io.cdap.cdap.etl.api.action.Action) PostAction(io.cdap.cdap.etl.api.batch.PostAction) BatchSource(io.cdap.cdap.etl.api.batch.BatchSource) BatchAggregator(io.cdap.cdap.etl.api.batch.BatchAggregator) BatchSink(io.cdap.cdap.etl.api.batch.BatchSink) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics) StageMetrics(io.cdap.cdap.etl.api.StageMetrics) BatchReducibleAggregator(io.cdap.cdap.etl.api.batch.BatchReducibleAggregator) SplitterTransform(io.cdap.cdap.etl.api.SplitterTransform) BatchJoiner(io.cdap.cdap.etl.api.batch.BatchJoiner) ErrorTransform(io.cdap.cdap.etl.api.ErrorTransform) PostAction(io.cdap.cdap.etl.api.batch.PostAction) ErrorTransform(io.cdap.cdap.etl.api.ErrorTransform) SplitterTransform(io.cdap.cdap.etl.api.SplitterTransform) Transform(io.cdap.cdap.etl.api.Transform) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics)

Example 7 with DefaultStageMetrics

use of io.cdap.cdap.etl.common.DefaultStageMetrics in project cdap by cdapio.

the class BatchSQLEngineAdapter method write.

/**
 * Try to write the output directly to the SQLEngineOutput registered by this engine.
 *
 * @param datasetName     dataset to write
 * @param sqlEngineOutput output instance created by this engine
 * @return {@link SQLEngineJob<Boolean>} representing if the write operation succeded.
 */
public SQLEngineJob<Boolean> write(String datasetName, SQLEngineOutput sqlEngineOutput) {
    String outputStageName = sqlEngineOutput.getStageName();
    SQLEngineWriteJobKey writeJobKey = new SQLEngineWriteJobKey(datasetName, outputStageName, SQLEngineJobType.WRITE);
    // Run write job
    return runJob(writeJobKey, () -> {
        getDatasetForStage(datasetName);
        LOG.debug("Attempting write for dataset {} into {}", datasetName, sqlEngineOutput);
        SQLWriteResult writeResult = sqlEngine.write(new SQLWriteRequest(datasetName, sqlEngineOutput));
        LOG.debug("Write dataset {} into {} was {}", datasetName, sqlEngineOutput, writeResult.isSuccessful() ? "completed" : "refused");
        // If the result was successful, add stage metrics.
        if (writeResult.isSuccessful()) {
            DefaultStageMetrics stageMetrics = new DefaultStageMetrics(metrics, outputStageName);
            StageStatisticsCollector statisticsCollector = statsCollectors.get(outputStageName);
            countRecordsIn(writeResult.getNumRecords(), statisticsCollector, stageMetrics);
            countRecordsOut(writeResult.getNumRecords(), statisticsCollector, stageMetrics);
        }
        return writeResult.isSuccessful();
    });
}
Also used : SQLWriteRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLWriteRequest) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) SQLEngineWriteJobKey(io.cdap.cdap.etl.engine.SQLEngineWriteJobKey) SQLWriteResult(io.cdap.cdap.etl.api.engine.sql.request.SQLWriteResult) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics)

Example 8 with DefaultStageMetrics

use of io.cdap.cdap.etl.common.DefaultStageMetrics in project cdap by cdapio.

the class BaseRDDCollection method publishAlerts.

@Override
public void publishAlerts(StageSpec stageSpec, StageStatisticsCollector collector) throws Exception {
    PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
    AlertPublisher alertPublisher = pluginFunctionContext.createPlugin();
    PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec);
    AlertPublisherContext alertPublisherContext = new DefaultAlertPublisherContext(pipelineRuntime, stageSpec, sec.getMessagingContext(), sec.getAdmin());
    alertPublisher.initialize(alertPublisherContext);
    StageMetrics stageMetrics = new DefaultStageMetrics(sec.getMetrics(), stageSpec.getName());
    TrackedIterator<Alert> trackedAlerts = new TrackedIterator<>(((JavaRDD<Alert>) rdd).collect().iterator(), stageMetrics, Constants.Metrics.RECORDS_IN);
    alertPublisher.publish(trackedAlerts);
    alertPublisher.destroy();
}
Also used : PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) AlertPublisher(io.cdap.cdap.etl.api.AlertPublisher) PipelineRuntime(io.cdap.cdap.etl.common.PipelineRuntime) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) TrackedIterator(io.cdap.cdap.etl.common.TrackedIterator) Alert(io.cdap.cdap.etl.api.Alert) AlertPublisherContext(io.cdap.cdap.etl.api.AlertPublisherContext) DefaultAlertPublisherContext(io.cdap.cdap.etl.common.DefaultAlertPublisherContext) DefaultAlertPublisherContext(io.cdap.cdap.etl.common.DefaultAlertPublisherContext) StageMetrics(io.cdap.cdap.etl.api.StageMetrics) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics) JavaRDD(org.apache.spark.api.java.JavaRDD)

Example 9 with DefaultStageMetrics

use of io.cdap.cdap.etl.common.DefaultStageMetrics in project cdap by cdapio.

the class BatchSQLEngineAdapter method tryRelationalTransform.

/**
 * This method is called when engine is present and is willing to try performing a relational transform.
 *
 * @param stageSpec stage specification
 * @param transform transform plugin
 * @param input     input collections
 * @return resulting collection or empty optional if tranform can't be done with this engine
 */
public Optional<SQLEngineJob<SQLDataset>> tryRelationalTransform(StageSpec stageSpec, RelationalTransform transform, Map<String, SparkCollection<Object>> input) {
    String stageName = stageSpec.getName();
    Map<String, Relation> inputRelations = input.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> sqlEngine.getRelation(new SQLRelationDefinition(e.getKey(), stageSpec.getInputSchemas().get(e.getKey())))));
    BasicRelationalTransformContext pluginContext = new BasicRelationalTransformContext(getSQLRelationalEngine(), inputRelations, stageSpec.getInputSchemas(), stageSpec.getOutputSchema());
    if (!transform.transform(pluginContext)) {
        // Plugin was not able to do relational tranform with this engine
        return Optional.empty();
    }
    if (pluginContext.getOutputRelation() == null) {
        // Plugin said that tranformation was success but failed to set output
        throw new IllegalStateException("Plugin " + transform + " did not produce a relational output");
    }
    if (!pluginContext.getOutputRelation().isValid()) {
        // An output is set to invalid relation, probably some of transforms are not supported by an engine
        return Optional.empty();
    }
    // Ensure input and output schemas for this stage are supported by the engine
    if (stageSpec.getInputSchemas().values().stream().anyMatch(s -> !sqlEngine.supportsInputSchema(s))) {
        return Optional.empty();
    }
    if (!sqlEngine.supportsOutputSchema(stageSpec.getOutputSchema())) {
        return Optional.empty();
    }
    // Validate transformation definition with engine
    SQLTransformDefinition transformDefinition = new SQLTransformDefinition(stageName, pluginContext.getOutputRelation(), stageSpec.getOutputSchema(), Collections.emptyMap(), Collections.emptyMap());
    if (!sqlEngine.canTransform(transformDefinition)) {
        return Optional.empty();
    }
    return Optional.of(runJob(stageSpec.getName(), SQLEngineJobType.EXECUTE, () -> {
        // Push all stages that need to be pushed to execute this aggregation
        input.forEach((name, collection) -> {
            if (!exists(name)) {
                push(name, stageSpec.getInputSchemas().get(name), collection);
            }
        });
        // Initialize metrics collector
        DefaultStageMetrics stageMetrics = new DefaultStageMetrics(metrics, stageName);
        StageStatisticsCollector statisticsCollector = statsCollectors.get(stageName);
        // Collect input datasets and execute transformation
        Map<String, SQLDataset> inputDatasets = input.keySet().stream().collect(Collectors.toMap(Function.identity(), this::getDatasetForStage));
        // Count input records
        for (SQLDataset inputDataset : inputDatasets.values()) {
            countRecordsIn(inputDataset, statisticsCollector, stageMetrics);
        }
        // Execute transform
        SQLTransformRequest sqlContext = new SQLTransformRequest(inputDatasets, stageSpec.getName(), pluginContext.getOutputRelation(), stageSpec.getOutputSchema());
        SQLDataset transformed = sqlEngine.transform(sqlContext);
        // Count output records
        countRecordsOut(transformed, statisticsCollector, stageMetrics);
        return transformed;
    }));
}
Also used : RelationalTransform(io.cdap.cdap.etl.api.relational.RelationalTransform) SQLTransformRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLTransformRequest) DataFrames(io.cdap.cdap.api.spark.sql.DataFrames) Relation(io.cdap.cdap.etl.api.relational.Relation) SQLPullRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLPullRequest) LoggerFactory(org.slf4j.LoggerFactory) Constants(io.cdap.cdap.etl.common.Constants) StructuredRecord(io.cdap.cdap.api.data.format.StructuredRecord) SQLEngineJob(io.cdap.cdap.etl.engine.SQLEngineJob) SQLRelationDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLRelationDefinition) PullCapability(io.cdap.cdap.etl.api.engine.sql.capability.PullCapability) JavaSparkExecutionContext(io.cdap.cdap.api.spark.JavaSparkExecutionContext) StageMetrics(io.cdap.cdap.etl.api.StageMetrics) Map(java.util.Map) Objects(com.google.common.base.Objects) SQLPullDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLPullDataset) StructType(org.apache.spark.sql.types.StructType) SQLJoinDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLJoinDefinition) Threads(org.apache.twill.common.Threads) CancellationException(java.util.concurrent.CancellationException) Engine(io.cdap.cdap.etl.api.relational.Engine) Collection(java.util.Collection) SQLTransformDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLTransformDefinition) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) Metrics(io.cdap.cdap.api.metrics.Metrics) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) RecordCollection(io.cdap.cdap.etl.api.engine.sql.dataset.RecordCollection) SQLPushDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLPushDataset) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) SparkCollection(io.cdap.cdap.etl.spark.SparkCollection) List(java.util.List) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics) SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) SQLWriteRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLWriteRequest) PushCapability(io.cdap.cdap.etl.api.engine.sql.capability.PushCapability) Optional(java.util.Optional) TransformToPairFunction(io.cdap.cdap.etl.spark.function.TransformToPairFunction) SparkRecordCollection(io.cdap.cdap.etl.api.sql.engine.dataset.SparkRecordCollection) TransformFromPairFunction(io.cdap.cdap.etl.spark.function.TransformFromPairFunction) Dataset(org.apache.spark.sql.Dataset) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) SQLPushRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLPushRequest) SQLDatasetConsumer(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDatasetConsumer) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) SQLEngineJobKey(io.cdap.cdap.etl.engine.SQLEngineJobKey) SQLEngineWriteJobKey(io.cdap.cdap.etl.engine.SQLEngineWriteJobKey) HashSet(java.util.HashSet) SQLEngineJobType(io.cdap.cdap.etl.engine.SQLEngineJobType) SQLDatasetProducer(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDatasetProducer) JavaRDD(org.apache.spark.api.java.JavaRDD) ExecutorService(java.util.concurrent.ExecutorService) Nullable(javax.annotation.Nullable) SQLWriteResult(io.cdap.cdap.etl.api.engine.sql.request.SQLWriteResult) Logger(org.slf4j.Logger) JoinStage(io.cdap.cdap.etl.api.join.JoinStage) SQLEngineException(io.cdap.cdap.etl.api.engine.sql.SQLEngineException) SQLJoinRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLJoinRequest) SQLContext(org.apache.spark.sql.SQLContext) Row(org.apache.spark.sql.Row) Schema(io.cdap.cdap.api.data.schema.Schema) JavaPairRDD(org.apache.spark.api.java.JavaPairRDD) Closeable(java.io.Closeable) SQLEngineOutput(io.cdap.cdap.etl.api.engine.sql.SQLEngineOutput) SQLEngine(io.cdap.cdap.etl.api.engine.sql.SQLEngine) JoinDefinition(io.cdap.cdap.etl.api.join.JoinDefinition) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) Collections(java.util.Collections) SparkRecordCollectionImpl(io.cdap.cdap.etl.api.sql.engine.dataset.SparkRecordCollectionImpl) SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) SQLRelationDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLRelationDefinition) Relation(io.cdap.cdap.etl.api.relational.Relation) SQLTransformRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLTransformRequest) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) SQLTransformDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLTransformDefinition) Map(java.util.Map) HashMap(java.util.HashMap) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics)

Example 10 with DefaultStageMetrics

use of io.cdap.cdap.etl.common.DefaultStageMetrics in project cdap by cdapio.

the class MapReduceTransformExecutorFactory method getTransformation.

@SuppressWarnings("unchecked")
@Override
protected <IN, OUT> TrackedTransform<IN, OUT> getTransformation(StageSpec stageSpec) throws Exception {
    String stageName = stageSpec.getName();
    String pluginType = stageSpec.getPluginType();
    StageMetrics stageMetrics = new DefaultStageMetrics(metrics, stageName);
    TaskAttemptContext taskAttemptContext = (TaskAttemptContext) taskContext.getHadoopContext();
    StageStatisticsCollector collector = collectStageStatistics ? new MapReduceStageStatisticsCollector(stageName, taskAttemptContext) : new NoopStageStatisticsCollector();
    if (BatchAggregator.PLUGIN_TYPE.equals(pluginType)) {
        Object plugin = pluginInstantiator.newPluginInstance(stageName, macroEvaluator);
        BatchAggregator<?, ?, ?> batchAggregator;
        if (plugin instanceof BatchReducibleAggregator) {
            BatchReducibleAggregator<?, ?, ?, ?> reducibleAggregator = (BatchReducibleAggregator<?, ?, ?, ?>) plugin;
            batchAggregator = new AggregatorBridge<>(reducibleAggregator);
        } else {
            batchAggregator = (BatchAggregator<?, ?, ?>) plugin;
        }
        BatchRuntimeContext runtimeContext = createRuntimeContext(stageSpec);
        batchAggregator.initialize(runtimeContext);
        if (isMapPhase) {
            return getTrackedEmitKeyStep(new MapperAggregatorTransformation(batchAggregator, mapOutputKeyClassName, mapOutputValClassName), stageMetrics, getDataTracer(stageName), collector);
        } else {
            return getTrackedAggregateStep(new ReducerAggregatorTransformation(batchAggregator, mapOutputKeyClassName, mapOutputValClassName), stageMetrics, getDataTracer(stageName), collector);
        }
    } else if (BatchJoiner.PLUGIN_TYPE.equals(pluginType)) {
        Object plugin = pluginInstantiator.newPluginInstance(stageName, macroEvaluator);
        BatchJoiner<?, ?, ?> batchJoiner;
        Set<String> filterNullKeyStages = new HashSet<>();
        if (plugin instanceof BatchAutoJoiner) {
            BatchAutoJoiner autoJoiner = (BatchAutoJoiner) plugin;
            FailureCollector failureCollector = new LoggingFailureCollector(stageName, stageSpec.getInputSchemas());
            DefaultAutoJoinerContext context = DefaultAutoJoinerContext.from(stageSpec.getInputSchemas(), failureCollector);
            // definition will be non-null due to validate by PipelinePhasePreparer at the start of the run
            JoinDefinition joinDefinition = autoJoiner.define(context);
            JoinCondition condition = joinDefinition.getCondition();
            // should never happen as it's checked at deployment time, but add this to be safe.
            if (condition.getOp() != JoinCondition.Op.KEY_EQUALITY) {
                failureCollector.addFailure(String.format("Join stage '%s' uses a %s condition, which is not supported with the MapReduce engine.", stageName, condition.getOp()), "Switch to a different execution engine.");
            }
            failureCollector.getOrThrowException();
            batchJoiner = new JoinerBridge(stageName, autoJoiner, joinDefinition);
            // this is the same as filtering out records that have a null key if they are from an optional stage
            if (condition.getOp() == JoinCondition.Op.KEY_EQUALITY && !((JoinCondition.OnKeys) condition).isNullSafe()) {
                filterNullKeyStages = joinDefinition.getStages().stream().filter(s -> !s.isRequired()).map(JoinStage::getStageName).collect(Collectors.toSet());
            }
        } else {
            batchJoiner = (BatchJoiner<?, ?, ?>) plugin;
        }
        BatchJoinerRuntimeContext runtimeContext = createRuntimeContext(stageSpec);
        batchJoiner.initialize(runtimeContext);
        if (isMapPhase) {
            return getTrackedEmitKeyStep(new MapperJoinerTransformation(batchJoiner, mapOutputKeyClassName, mapOutputValClassName, filterNullKeyStages), stageMetrics, getDataTracer(stageName), collector);
        } else {
            return getTrackedMergeStep(new ReducerJoinerTransformation(batchJoiner, mapOutputKeyClassName, mapOutputValClassName, runtimeContext.getInputSchemas().size()), stageMetrics, getDataTracer(stageName), collector);
        }
    }
    return super.getTransformation(stageSpec);
}
Also used : LoggingFailureCollector(io.cdap.cdap.etl.validation.LoggingFailureCollector) Set(java.util.Set) HashSet(java.util.HashSet) DefaultAutoJoinerContext(io.cdap.cdap.etl.common.DefaultAutoJoinerContext) BatchAutoJoiner(io.cdap.cdap.etl.api.batch.BatchAutoJoiner) BatchRuntimeContext(io.cdap.cdap.etl.api.batch.BatchRuntimeContext) JoinDefinition(io.cdap.cdap.etl.api.join.JoinDefinition) StageMetrics(io.cdap.cdap.etl.api.StageMetrics) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics) BatchReducibleAggregator(io.cdap.cdap.etl.api.batch.BatchReducibleAggregator) JoinerBridge(io.cdap.cdap.etl.common.plugin.JoinerBridge) BatchJoinerRuntimeContext(io.cdap.cdap.etl.api.batch.BatchJoinerRuntimeContext) NoopStageStatisticsCollector(io.cdap.cdap.etl.common.NoopStageStatisticsCollector) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) BatchJoiner(io.cdap.cdap.etl.api.batch.BatchJoiner) JoinCondition(io.cdap.cdap.etl.api.join.JoinCondition) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) NoopStageStatisticsCollector(io.cdap.cdap.etl.common.NoopStageStatisticsCollector) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics) LoggingFailureCollector(io.cdap.cdap.etl.validation.LoggingFailureCollector) FailureCollector(io.cdap.cdap.etl.api.FailureCollector)

Aggregations

DefaultStageMetrics (io.cdap.cdap.etl.common.DefaultStageMetrics)20 StageMetrics (io.cdap.cdap.etl.api.StageMetrics)16 StageStatisticsCollector (io.cdap.cdap.etl.common.StageStatisticsCollector)12 Alert (io.cdap.cdap.etl.api.Alert)6 AlertPublisher (io.cdap.cdap.etl.api.AlertPublisher)6 AlertPublisherContext (io.cdap.cdap.etl.api.AlertPublisherContext)6 DefaultAlertPublisherContext (io.cdap.cdap.etl.common.DefaultAlertPublisherContext)6 NoopStageStatisticsCollector (io.cdap.cdap.etl.common.NoopStageStatisticsCollector)6 PipelineRuntime (io.cdap.cdap.etl.common.PipelineRuntime)6 TrackedIterator (io.cdap.cdap.etl.common.TrackedIterator)6 MacroEvaluator (io.cdap.cdap.api.macro.MacroEvaluator)4 BatchJoiner (io.cdap.cdap.etl.api.batch.BatchJoiner)4 BatchReducibleAggregator (io.cdap.cdap.etl.api.batch.BatchReducibleAggregator)4 PostAction (io.cdap.cdap.etl.api.batch.PostAction)4 SQLWriteRequest (io.cdap.cdap.etl.api.engine.sql.request.SQLWriteRequest)4 SQLWriteResult (io.cdap.cdap.etl.api.engine.sql.request.SQLWriteResult)4 SQLDataset (io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset)3 JoinDefinition (io.cdap.cdap.etl.api.join.JoinDefinition)3 StageSpec (io.cdap.cdap.etl.proto.v2.spec.StageSpec)3 ArrayList (java.util.ArrayList)3