Search in sources :

Example 1 with SQLEngineJob

use of io.cdap.cdap.etl.engine.SQLEngineJob in project cdap by caskdata.

the class BatchSQLEngineAdapter method pull.

/**
 * Creates a new job to pull a Spark Collection from the SQL engine
 *
 * @param job the job representing the compute stage for the dataset we need to pull.
 * @return Job representing this pull operation.
 */
@SuppressWarnings("unchecked,raw")
public <T> SQLEngineJob<JavaRDD<T>> pull(SQLEngineJob<SQLDataset> job) {
    // If this job already exists, return the existing instance.
    SQLEngineJobKey jobKey = new SQLEngineJobKey(job.getDatasetName(), SQLEngineJobType.PULL);
    if (jobs.containsKey(jobKey)) {
        return (SQLEngineJob<JavaRDD<T>>) jobs.get(jobKey);
    }
    CompletableFuture<JavaRDD<T>> future = new CompletableFuture<>();
    Runnable pullTask = () -> {
        try {
            LOG.debug("Starting pull for dataset '{}'", job.getDatasetName());
            waitForJobAndThrowException(job);
            JavaRDD<T> result = pullInternal(job.waitFor());
            LOG.debug("Completed pull for dataset '{}'", job.getDatasetName());
            future.complete(result);
        } catch (Throwable t) {
            future.completeExceptionally(t);
        }
    };
    executorService.submit(pullTask);
    SQLEngineJob<JavaRDD<T>> pullJob = new SQLEngineJob<>(jobKey, future);
    jobs.put(jobKey, pullJob);
    return pullJob;
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) SQLEngineJobKey(io.cdap.cdap.etl.engine.SQLEngineJobKey) SQLEngineJob(io.cdap.cdap.etl.engine.SQLEngineJob) JavaRDD(org.apache.spark.api.java.JavaRDD)

Example 2 with SQLEngineJob

use of io.cdap.cdap.etl.engine.SQLEngineJob in project cdap by caskdata.

the class BatchSQLEngineAdapter method push.

/**
 * Creates a new job tu push a SparkCollection into the SQL engine.
 *
 * @param datasetName the name of the dataset to push
 * @param schema      the schema for this dataset
 * @param collection  the Spark collection containing the dataset to push
 * @return Job representing this Push operation.
 */
@SuppressWarnings("unchecked,raw")
protected SQLEngineJob<SQLDataset> push(String datasetName, Schema schema, SparkCollection<?> collection) {
    // If this job already exists, return the existing instance.
    SQLEngineJobKey jobKey = new SQLEngineJobKey(datasetName, SQLEngineJobType.PUSH);
    if (jobs.containsKey(jobKey)) {
        return (SQLEngineJob<SQLDataset>) jobs.get(jobKey);
    }
    CompletableFuture<SQLDataset> future = new CompletableFuture<>();
    Runnable pushTask = () -> {
        try {
            LOG.debug("Starting push for dataset '{}'", datasetName);
            SQLDataset result = pushInternal(datasetName, schema, collection);
            LOG.debug("Completed push for dataset '{}'", datasetName);
            future.complete(result);
        } catch (Throwable t) {
            future.completeExceptionally(t);
        }
    };
    executorService.submit(pushTask);
    SQLEngineJob<SQLDataset> job = new SQLEngineJob<>(jobKey, future);
    jobs.put(jobKey, job);
    return job;
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) SQLEngineJobKey(io.cdap.cdap.etl.engine.SQLEngineJobKey) SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) SQLEngineJob(io.cdap.cdap.etl.engine.SQLEngineJob)

Example 3 with SQLEngineJob

use of io.cdap.cdap.etl.engine.SQLEngineJob in project cdap by caskdata.

the class BatchSQLEngineAdapter method getDatasetForStage.

/**
 * Function used to fetch the dataset for an input stage.
 *
 * @param stageName
 * @return
 */
private SQLDataset getDatasetForStage(String stageName) {
    // Wait for the previous push or execute jobs to complete
    SQLEngineJobKey pushJobKey = new SQLEngineJobKey(stageName, SQLEngineJobType.PUSH);
    SQLEngineJobKey execJobKey = new SQLEngineJobKey(stageName, SQLEngineJobType.EXECUTE);
    if (jobs.containsKey(pushJobKey)) {
        SQLEngineJob<SQLDataset> job = (SQLEngineJob<SQLDataset>) jobs.get(pushJobKey);
        waitForJobAndThrowException(job);
        return job.waitFor();
    } else if (jobs.containsKey(execJobKey)) {
        SQLEngineJob<SQLDataset> job = (SQLEngineJob<SQLDataset>) jobs.get(execJobKey);
        waitForJobAndThrowException(job);
        return job.waitFor();
    } else {
        throw new IllegalArgumentException("No SQL Engine job exists for stage " + stageName);
    }
}
Also used : SQLEngineJobKey(io.cdap.cdap.etl.engine.SQLEngineJobKey) SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) SQLEngineJob(io.cdap.cdap.etl.engine.SQLEngineJob)

Example 4 with SQLEngineJob

use of io.cdap.cdap.etl.engine.SQLEngineJob in project cdap by caskdata.

the class BatchSQLEngineAdapter method tryRelationalTransform.

/**
 * This method is called when engine is present and is willing to try performing a relational transform.
 *
 * @param stageSpec stage specification
 * @param transform transform plugin
 * @param input     input collections
 * @return resulting collection or empty optional if tranform can't be done with this engine
 */
public Optional<SQLEngineJob<SQLDataset>> tryRelationalTransform(StageSpec stageSpec, RelationalTransform transform, Map<String, SparkCollection<Object>> input) {
    String stageName = stageSpec.getName();
    Map<String, Relation> inputRelations = input.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> sqlEngine.getRelation(new SQLRelationDefinition(e.getKey(), stageSpec.getInputSchemas().get(e.getKey())))));
    BasicRelationalTransformContext pluginContext = new BasicRelationalTransformContext(getSQLRelationalEngine(), inputRelations, stageSpec.getInputSchemas(), stageSpec.getOutputSchema());
    if (!transform.transform(pluginContext)) {
        // Plugin was not able to do relational tranform with this engine
        return Optional.empty();
    }
    if (pluginContext.getOutputRelation() == null) {
        // Plugin said that tranformation was success but failed to set output
        throw new IllegalStateException("Plugin " + transform + " did not produce a relational output");
    }
    if (!pluginContext.getOutputRelation().isValid()) {
        // An output is set to invalid relation, probably some of transforms are not supported by an engine
        return Optional.empty();
    }
    // Ensure input and output schemas for this stage are supported by the engine
    if (stageSpec.getInputSchemas().values().stream().anyMatch(s -> !sqlEngine.supportsInputSchema(s))) {
        return Optional.empty();
    }
    if (!sqlEngine.supportsOutputSchema(stageSpec.getOutputSchema())) {
        return Optional.empty();
    }
    // Validate transformation definition with engine
    SQLTransformDefinition transformDefinition = new SQLTransformDefinition(stageName, pluginContext.getOutputRelation(), stageSpec.getOutputSchema(), Collections.emptyMap(), Collections.emptyMap());
    if (!sqlEngine.canTransform(transformDefinition)) {
        return Optional.empty();
    }
    return Optional.of(runJob(stageSpec.getName(), SQLEngineJobType.EXECUTE, () -> {
        // Push all stages that need to be pushed to execute this aggregation
        input.forEach((name, collection) -> {
            if (!exists(name)) {
                push(name, stageSpec.getInputSchemas().get(name), collection);
            }
        });
        // Initialize metrics collector
        DefaultStageMetrics stageMetrics = new DefaultStageMetrics(metrics, stageName);
        StageStatisticsCollector statisticsCollector = statsCollectors.get(stageName);
        // Collect input datasets and execute transformation
        Map<String, SQLDataset> inputDatasets = input.keySet().stream().collect(Collectors.toMap(Function.identity(), this::getDatasetForStage));
        // Count input records
        for (SQLDataset inputDataset : inputDatasets.values()) {
            countRecordsIn(inputDataset, statisticsCollector, stageMetrics);
        }
        // Execute transform
        SQLTransformRequest sqlContext = new SQLTransformRequest(inputDatasets, stageSpec.getName(), pluginContext.getOutputRelation(), stageSpec.getOutputSchema());
        SQLDataset transformed = sqlEngine.transform(sqlContext);
        // Count output records
        countRecordsOut(transformed, statisticsCollector, stageMetrics);
        return transformed;
    }));
}
Also used : RelationalTransform(io.cdap.cdap.etl.api.relational.RelationalTransform) SQLTransformRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLTransformRequest) DataFrames(io.cdap.cdap.api.spark.sql.DataFrames) Relation(io.cdap.cdap.etl.api.relational.Relation) SQLPullRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLPullRequest) LoggerFactory(org.slf4j.LoggerFactory) Constants(io.cdap.cdap.etl.common.Constants) StructuredRecord(io.cdap.cdap.api.data.format.StructuredRecord) SQLEngineJob(io.cdap.cdap.etl.engine.SQLEngineJob) SQLRelationDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLRelationDefinition) PullCapability(io.cdap.cdap.etl.api.engine.sql.capability.PullCapability) JavaSparkExecutionContext(io.cdap.cdap.api.spark.JavaSparkExecutionContext) StageMetrics(io.cdap.cdap.etl.api.StageMetrics) Map(java.util.Map) Objects(com.google.common.base.Objects) SQLPullDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLPullDataset) StructType(org.apache.spark.sql.types.StructType) SQLJoinDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLJoinDefinition) Threads(org.apache.twill.common.Threads) CancellationException(java.util.concurrent.CancellationException) Engine(io.cdap.cdap.etl.api.relational.Engine) Collection(java.util.Collection) SQLTransformDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLTransformDefinition) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) Metrics(io.cdap.cdap.api.metrics.Metrics) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) RecordCollection(io.cdap.cdap.etl.api.engine.sql.dataset.RecordCollection) SQLPushDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLPushDataset) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) SparkCollection(io.cdap.cdap.etl.spark.SparkCollection) List(java.util.List) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics) SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) SQLWriteRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLWriteRequest) PushCapability(io.cdap.cdap.etl.api.engine.sql.capability.PushCapability) Optional(java.util.Optional) TransformToPairFunction(io.cdap.cdap.etl.spark.function.TransformToPairFunction) SparkRecordCollection(io.cdap.cdap.etl.api.sql.engine.dataset.SparkRecordCollection) TransformFromPairFunction(io.cdap.cdap.etl.spark.function.TransformFromPairFunction) Dataset(org.apache.spark.sql.Dataset) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) SQLPushRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLPushRequest) SQLDatasetConsumer(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDatasetConsumer) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) SQLEngineJobKey(io.cdap.cdap.etl.engine.SQLEngineJobKey) SQLEngineWriteJobKey(io.cdap.cdap.etl.engine.SQLEngineWriteJobKey) HashSet(java.util.HashSet) SQLEngineJobType(io.cdap.cdap.etl.engine.SQLEngineJobType) SQLDatasetProducer(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDatasetProducer) JavaRDD(org.apache.spark.api.java.JavaRDD) ExecutorService(java.util.concurrent.ExecutorService) Nullable(javax.annotation.Nullable) SQLWriteResult(io.cdap.cdap.etl.api.engine.sql.request.SQLWriteResult) Logger(org.slf4j.Logger) JoinStage(io.cdap.cdap.etl.api.join.JoinStage) SQLEngineException(io.cdap.cdap.etl.api.engine.sql.SQLEngineException) SQLJoinRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLJoinRequest) SQLContext(org.apache.spark.sql.SQLContext) Row(org.apache.spark.sql.Row) Schema(io.cdap.cdap.api.data.schema.Schema) JavaPairRDD(org.apache.spark.api.java.JavaPairRDD) Closeable(java.io.Closeable) SQLEngineOutput(io.cdap.cdap.etl.api.engine.sql.SQLEngineOutput) SQLEngine(io.cdap.cdap.etl.api.engine.sql.SQLEngine) JoinDefinition(io.cdap.cdap.etl.api.join.JoinDefinition) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) Collections(java.util.Collections) SparkRecordCollectionImpl(io.cdap.cdap.etl.api.sql.engine.dataset.SparkRecordCollectionImpl) SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) SQLRelationDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLRelationDefinition) Relation(io.cdap.cdap.etl.api.relational.Relation) SQLTransformRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLTransformRequest) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) SQLTransformDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLTransformDefinition) Map(java.util.Map) HashMap(java.util.HashMap) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics)

Aggregations

SQLEngineJob (io.cdap.cdap.etl.engine.SQLEngineJob)4 SQLEngineJobKey (io.cdap.cdap.etl.engine.SQLEngineJobKey)4 SQLDataset (io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset)3 CompletableFuture (java.util.concurrent.CompletableFuture)2 Objects (com.google.common.base.Objects)1 StructuredRecord (io.cdap.cdap.api.data.format.StructuredRecord)1 Schema (io.cdap.cdap.api.data.schema.Schema)1 Metrics (io.cdap.cdap.api.metrics.Metrics)1 JavaSparkExecutionContext (io.cdap.cdap.api.spark.JavaSparkExecutionContext)1 DataFrames (io.cdap.cdap.api.spark.sql.DataFrames)1 StageMetrics (io.cdap.cdap.etl.api.StageMetrics)1 SQLEngine (io.cdap.cdap.etl.api.engine.sql.SQLEngine)1 SQLEngineException (io.cdap.cdap.etl.api.engine.sql.SQLEngineException)1 SQLEngineOutput (io.cdap.cdap.etl.api.engine.sql.SQLEngineOutput)1 PullCapability (io.cdap.cdap.etl.api.engine.sql.capability.PullCapability)1 PushCapability (io.cdap.cdap.etl.api.engine.sql.capability.PushCapability)1 RecordCollection (io.cdap.cdap.etl.api.engine.sql.dataset.RecordCollection)1 SQLDatasetConsumer (io.cdap.cdap.etl.api.engine.sql.dataset.SQLDatasetConsumer)1 SQLDatasetProducer (io.cdap.cdap.etl.api.engine.sql.dataset.SQLDatasetProducer)1 SQLPullDataset (io.cdap.cdap.etl.api.engine.sql.dataset.SQLPullDataset)1