Search in sources :

Example 1 with SQLDataset

use of io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset in project cdap by caskdata.

the class BatchSparkPipelineDriver method handleAutoJoin.

@Override
@SuppressWarnings("unchecked")
protected SparkCollection<Object> handleAutoJoin(String stageName, JoinDefinition joinDefinition, Map<String, SparkCollection<Object>> inputDataCollections, @Nullable Integer numPartitions) {
    if (sqlEngineAdapter != null && canJoinOnSQLEngine(stageName, joinDefinition, inputDataCollections)) {
        // collections representing data that has been pushed to the SQL engine.
        for (JoinStage joinStage : joinDefinition.getStages()) {
            String joinStageName = joinStage.getStageName();
            // If the input collection is already a SQL Engine collection, there's no need to push.
            if (inputDataCollections.get(joinStageName) instanceof SQLBackedCollection) {
                continue;
            }
            SparkCollection<Object> collection = inputDataCollections.get(joinStage.getStageName());
            SQLEngineJob<SQLDataset> pushJob = sqlEngineAdapter.push(joinStageName, joinStage.getSchema(), collection);
            inputDataCollections.put(joinStageName, new SQLEngineCollection<>(sec, functionCacheFactory, jsc, new SQLContext(jsc), datasetContext, sinkFactory, collection, joinStageName, sqlEngineAdapter, pushJob));
        }
    }
    return super.handleAutoJoin(stageName, joinDefinition, inputDataCollections, numPartitions);
}
Also used : JoinStage(io.cdap.cdap.etl.api.join.JoinStage) SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) SQLContext(org.apache.spark.sql.SQLContext)

Example 2 with SQLDataset

use of io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset in project cdap by caskdata.

the class BatchSQLEngineAdapter method push.

/**
 * Creates a new job tu push a SparkCollection into the SQL engine.
 *
 * @param datasetName the name of the dataset to push
 * @param schema      the schema for this dataset
 * @param collection  the Spark collection containing the dataset to push
 * @return Job representing this Push operation.
 */
@SuppressWarnings("unchecked,raw")
protected SQLEngineJob<SQLDataset> push(String datasetName, Schema schema, SparkCollection<?> collection) {
    // If this job already exists, return the existing instance.
    SQLEngineJobKey jobKey = new SQLEngineJobKey(datasetName, SQLEngineJobType.PUSH);
    if (jobs.containsKey(jobKey)) {
        return (SQLEngineJob<SQLDataset>) jobs.get(jobKey);
    }
    CompletableFuture<SQLDataset> future = new CompletableFuture<>();
    Runnable pushTask = () -> {
        try {
            LOG.debug("Starting push for dataset '{}'", datasetName);
            SQLDataset result = pushInternal(datasetName, schema, collection);
            LOG.debug("Completed push for dataset '{}'", datasetName);
            future.complete(result);
        } catch (Throwable t) {
            future.completeExceptionally(t);
        }
    };
    executorService.submit(pushTask);
    SQLEngineJob<SQLDataset> job = new SQLEngineJob<>(jobKey, future);
    jobs.put(jobKey, job);
    return job;
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) SQLEngineJobKey(io.cdap.cdap.etl.engine.SQLEngineJobKey) SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) SQLEngineJob(io.cdap.cdap.etl.engine.SQLEngineJob)

Example 3 with SQLDataset

use of io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset in project cdap by caskdata.

the class BatchSQLEngineAdapter method joinInternal.

/**
 * Join implementation. This method has blocking calls and should be executed in a separate thread.
 *
 * @param joinRequest the Join Request
 * @return
 * @throws SQLEngineException if any of the preceding jobs fails.
 */
private SQLDataset joinInternal(SQLJoinRequest joinRequest) throws SQLEngineException {
    String datasetName = joinRequest.getDatasetName();
    DefaultStageMetrics stageMetrics = new DefaultStageMetrics(metrics, datasetName);
    StageStatisticsCollector statisticsCollector = statsCollectors.get(datasetName);
    // Count input metrics for each of the preceding stages.
    for (SQLDataset inputDataset : joinRequest.getInputDatasets()) {
        countRecordsIn(inputDataset, statisticsCollector, stageMetrics);
    }
    // Execute Join job.
    SQLDataset joinDataset = (SQLDataset) sqlEngine.join(joinRequest);
    // Count output rows and complete future.
    countRecordsOut(joinDataset, statisticsCollector, stageMetrics);
    return joinDataset;
}
Also used : SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics)

Example 4 with SQLDataset

use of io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset in project cdap by caskdata.

the class BatchSQLEngineAdapter method pullInternal.

/**
 * Pull implementation. This method has blocking calls and should be executed in a separate thread.
 *
 * @param dataset the dataset to pull.
 * @return {@link JavaRDD} representing the records contained in this dataset.
 * @throws SQLEngineException if the pull process fails.
 */
@SuppressWarnings("unchecked,raw")
private <T> JavaRDD<T> pullInternal(SQLDataset dataset) throws SQLEngineException {
    // Create pull operation for this dataset and wait until completion
    SQLPullRequest pullRequest = new SQLPullRequest(dataset);
    // If so, we will process this request using a producer.
    for (PullCapability capability : sqlEngine.getPullCapabilities()) {
        SQLDatasetProducer producer = sqlEngine.getProducer(pullRequest, capability);
        // If a producer is able to produce records for this pull request, extract the RDD from this request.
        if (producer != null) {
            RecordCollection recordCollection = producer.produce(dataset);
            // If the collection that got generarted is not an instance of a SparkRecordCollection, skip.
            if (recordCollection instanceof SparkRecordCollection) {
                Schema schema = dataset.getSchema();
                return (JavaRDD<T>) ((SparkRecordCollection) recordCollection).getDataFrame().javaRDD().map(r -> DataFrames.fromRow((Row) r, schema));
            }
        }
    }
    // If no capabilities could be used to produce records, proceed using the Pull Provider.
    SQLPullDataset<StructuredRecord, ?, ?> sqlPullDataset = sqlEngine.getPullProvider(pullRequest);
    // Run operation to read from the InputFormatProvider supplied by this operation.
    ClassLoader classLoader = Objects.firstNonNull(Thread.currentThread().getContextClassLoader(), getClass().getClassLoader());
    JavaPairRDD pairRDD = RDDUtils.readUsingInputFormat(jsc, sqlPullDataset, classLoader, Object.class, Object.class);
    return pairRDD.flatMap(new TransformFromPairFunction(sqlPullDataset.fromKeyValue()));
}
Also used : RelationalTransform(io.cdap.cdap.etl.api.relational.RelationalTransform) SQLTransformRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLTransformRequest) DataFrames(io.cdap.cdap.api.spark.sql.DataFrames) Relation(io.cdap.cdap.etl.api.relational.Relation) SQLPullRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLPullRequest) LoggerFactory(org.slf4j.LoggerFactory) Constants(io.cdap.cdap.etl.common.Constants) StructuredRecord(io.cdap.cdap.api.data.format.StructuredRecord) SQLEngineJob(io.cdap.cdap.etl.engine.SQLEngineJob) SQLRelationDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLRelationDefinition) PullCapability(io.cdap.cdap.etl.api.engine.sql.capability.PullCapability) JavaSparkExecutionContext(io.cdap.cdap.api.spark.JavaSparkExecutionContext) StageMetrics(io.cdap.cdap.etl.api.StageMetrics) Map(java.util.Map) Objects(com.google.common.base.Objects) SQLPullDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLPullDataset) StructType(org.apache.spark.sql.types.StructType) SQLJoinDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLJoinDefinition) Threads(org.apache.twill.common.Threads) CancellationException(java.util.concurrent.CancellationException) Engine(io.cdap.cdap.etl.api.relational.Engine) Collection(java.util.Collection) SQLTransformDefinition(io.cdap.cdap.etl.api.engine.sql.request.SQLTransformDefinition) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) Metrics(io.cdap.cdap.api.metrics.Metrics) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) RecordCollection(io.cdap.cdap.etl.api.engine.sql.dataset.RecordCollection) SQLPushDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLPushDataset) StageStatisticsCollector(io.cdap.cdap.etl.common.StageStatisticsCollector) SparkCollection(io.cdap.cdap.etl.spark.SparkCollection) List(java.util.List) DefaultStageMetrics(io.cdap.cdap.etl.common.DefaultStageMetrics) SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) SQLWriteRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLWriteRequest) PushCapability(io.cdap.cdap.etl.api.engine.sql.capability.PushCapability) Optional(java.util.Optional) TransformToPairFunction(io.cdap.cdap.etl.spark.function.TransformToPairFunction) SparkRecordCollection(io.cdap.cdap.etl.api.sql.engine.dataset.SparkRecordCollection) TransformFromPairFunction(io.cdap.cdap.etl.spark.function.TransformFromPairFunction) Dataset(org.apache.spark.sql.Dataset) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) SQLPushRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLPushRequest) SQLDatasetConsumer(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDatasetConsumer) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) SQLEngineJobKey(io.cdap.cdap.etl.engine.SQLEngineJobKey) SQLEngineWriteJobKey(io.cdap.cdap.etl.engine.SQLEngineWriteJobKey) HashSet(java.util.HashSet) SQLEngineJobType(io.cdap.cdap.etl.engine.SQLEngineJobType) SQLDatasetProducer(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDatasetProducer) JavaRDD(org.apache.spark.api.java.JavaRDD) ExecutorService(java.util.concurrent.ExecutorService) Nullable(javax.annotation.Nullable) SQLWriteResult(io.cdap.cdap.etl.api.engine.sql.request.SQLWriteResult) Logger(org.slf4j.Logger) JoinStage(io.cdap.cdap.etl.api.join.JoinStage) SQLEngineException(io.cdap.cdap.etl.api.engine.sql.SQLEngineException) SQLJoinRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLJoinRequest) SQLContext(org.apache.spark.sql.SQLContext) Row(org.apache.spark.sql.Row) Schema(io.cdap.cdap.api.data.schema.Schema) JavaPairRDD(org.apache.spark.api.java.JavaPairRDD) Closeable(java.io.Closeable) SQLEngineOutput(io.cdap.cdap.etl.api.engine.sql.SQLEngineOutput) SQLEngine(io.cdap.cdap.etl.api.engine.sql.SQLEngine) JoinDefinition(io.cdap.cdap.etl.api.join.JoinDefinition) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) Collections(java.util.Collections) SparkRecordCollectionImpl(io.cdap.cdap.etl.api.sql.engine.dataset.SparkRecordCollectionImpl) SQLDatasetProducer(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDatasetProducer) PullCapability(io.cdap.cdap.etl.api.engine.sql.capability.PullCapability) Schema(io.cdap.cdap.api.data.schema.Schema) TransformFromPairFunction(io.cdap.cdap.etl.spark.function.TransformFromPairFunction) SparkRecordCollection(io.cdap.cdap.etl.api.sql.engine.dataset.SparkRecordCollection) StructuredRecord(io.cdap.cdap.api.data.format.StructuredRecord) JavaRDD(org.apache.spark.api.java.JavaRDD) SQLPullRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLPullRequest) JavaPairRDD(org.apache.spark.api.java.JavaPairRDD) RecordCollection(io.cdap.cdap.etl.api.engine.sql.dataset.RecordCollection) SparkRecordCollection(io.cdap.cdap.etl.api.sql.engine.dataset.SparkRecordCollection)

Example 5 with SQLDataset

use of io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset in project cdap by caskdata.

the class BatchSQLEngineAdapter method join.

/**
 * Executes a Join operation in the SQL engine
 *
 * @param datasetName    the dataset name to use to store the result of the join operation
 * @param joinDefinition the Join Definition
 * @return Job representing this join operation
 */
@SuppressWarnings("unchecked,raw")
public SQLEngineJob<SQLDataset> join(String datasetName, JoinDefinition joinDefinition) {
    return runJob(datasetName, SQLEngineJobType.EXECUTE, () -> {
        Collection<SQLDataset> inputDatasets = getJoinInputDatasets(joinDefinition);
        SQLJoinRequest joinRequest = new SQLJoinRequest(datasetName, joinDefinition, inputDatasets);
        if (!sqlEngine.canJoin(joinRequest)) {
            throw new IllegalArgumentException("Unable to execute this join in the SQL engine");
        }
        return joinInternal(joinRequest);
    });
}
Also used : SQLDataset(io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset) SQLJoinRequest(io.cdap.cdap.etl.api.engine.sql.request.SQLJoinRequest)

Aggregations

SQLDataset (io.cdap.cdap.etl.api.engine.sql.dataset.SQLDataset)8 SQLEngineJob (io.cdap.cdap.etl.engine.SQLEngineJob)4 SQLEngineJobKey (io.cdap.cdap.etl.engine.SQLEngineJobKey)4 SQLJoinRequest (io.cdap.cdap.etl.api.engine.sql.request.SQLJoinRequest)3 JoinStage (io.cdap.cdap.etl.api.join.JoinStage)3 SparkRecordCollection (io.cdap.cdap.etl.api.sql.engine.dataset.SparkRecordCollection)3 DefaultStageMetrics (io.cdap.cdap.etl.common.DefaultStageMetrics)3 StageStatisticsCollector (io.cdap.cdap.etl.common.StageStatisticsCollector)3 Row (org.apache.spark.sql.Row)3 Objects (com.google.common.base.Objects)2 StructuredRecord (io.cdap.cdap.api.data.format.StructuredRecord)2 Schema (io.cdap.cdap.api.data.schema.Schema)2 Metrics (io.cdap.cdap.api.metrics.Metrics)2 JavaSparkExecutionContext (io.cdap.cdap.api.spark.JavaSparkExecutionContext)2 DataFrames (io.cdap.cdap.api.spark.sql.DataFrames)2 StageMetrics (io.cdap.cdap.etl.api.StageMetrics)2 SQLEngine (io.cdap.cdap.etl.api.engine.sql.SQLEngine)2 SQLEngineException (io.cdap.cdap.etl.api.engine.sql.SQLEngineException)2 SQLEngineOutput (io.cdap.cdap.etl.api.engine.sql.SQLEngineOutput)2 PullCapability (io.cdap.cdap.etl.api.engine.sql.capability.PullCapability)2