Search in sources :

Example 11 with TxRunnable

use of io.cdap.cdap.api.TxRunnable in project cdap by cdapio.

the class HiveExploreStructuredRecordTestRun method testObjectMappedTable.

@Test
public void testObjectMappedTable() throws Exception {
    // Add a ObjectMappedTable instance
    final DatasetId datasetId = NAMESPACE_ID.dataset("person");
    datasetFramework.addInstance(ObjectMappedTable.class.getName(), datasetId, ObjectMappedTableProperties.builder().setType(Person.class).setRowKeyExploreName("id").setRowKeyExploreType(Schema.Type.STRING).build());
    // Insert data using sql
    String command = String.format("INSERT into %s (id, firstname, lastname, age) VALUES (\"%s\", \"%s\", \"%s\", %d)", getDatasetHiveName(datasetId), "bobby", "Bobby", "Bob", 15);
    ExploreExecutionResult result = exploreClient.submit(NAMESPACE_ID, command).get();
    Assert.assertEquals(QueryStatus.OpStatus.FINISHED, result.getStatus().getStatus());
    transactional.execute(new TxRunnable() {

        @Override
        public void run(DatasetContext context) throws Exception {
            // Read the data back via dataset directly
            ObjectMappedTable<Person> objTable = context.getDataset(datasetId.getDataset());
            Person person = objTable.read("bobby");
            Assert.assertNotNull(person);
            Assert.assertEquals("Bobby", person.getFirstName());
            Assert.assertEquals("Bob", person.getLastName());
            Assert.assertEquals(15, person.getAge());
        }
    });
    // Delete the dataset, hence also drop the table.
    datasetFramework.deleteInstance(datasetId);
}
Also used : TxRunnable(io.cdap.cdap.api.TxRunnable) ObjectMappedTable(io.cdap.cdap.api.dataset.lib.ObjectMappedTable) DatasetContext(io.cdap.cdap.api.data.DatasetContext) ExploreExecutionResult(io.cdap.cdap.explore.client.ExploreExecutionResult) DatasetId(io.cdap.cdap.proto.id.DatasetId) Test(org.junit.Test)

Example 12 with TxRunnable

use of io.cdap.cdap.api.TxRunnable in project cdap by cdapio.

the class HiveExploreStructuredRecordTestRun method start.

@BeforeClass
public static void start() throws Exception {
    initialize(tmpFolder);
    DatasetModuleId moduleId = NAMESPACE_ID.datasetModule("email");
    datasetFramework.addModule(moduleId, new EmailTableDefinition.EmailTableModule());
    datasetFramework.addInstance("email", MY_TABLE, DatasetProperties.EMPTY);
    transactional = Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), transactionSystemClient, NAMESPACE_ID, Collections.<String, String>emptyMap(), null, null));
    transactional.execute(new TxRunnable() {

        @Override
        public void run(DatasetContext context) throws Exception {
            // Accessing dataset instance to perform data operations
            EmailTableDefinition.EmailTable table = context.getDataset(MY_TABLE.getDataset());
            Assert.assertNotNull(table);
            table.writeEmail("email1", "this is the subject", "this is the body", "sljackson@boss.com");
        }
    });
    datasetFramework.addModule(NAMESPACE_ID.datasetModule("TableWrapper"), new TableWrapperDefinition.Module());
}
Also used : DatasetModuleId(io.cdap.cdap.proto.id.DatasetModuleId) MultiThreadDatasetCache(io.cdap.cdap.data2.dataset2.MultiThreadDatasetCache) SystemDatasetInstantiator(io.cdap.cdap.data.dataset.SystemDatasetInstantiator) TxRunnable(io.cdap.cdap.api.TxRunnable) TableWrapperDefinition(io.cdap.cdap.explore.service.datasets.TableWrapperDefinition) EmailTableDefinition(io.cdap.cdap.explore.service.datasets.EmailTableDefinition) DatasetContext(io.cdap.cdap.api.data.DatasetContext) BeforeClass(org.junit.BeforeClass)

Example 13 with TxRunnable

use of io.cdap.cdap.api.TxRunnable in project cdap by cdapio.

the class StreamingMultiSinkFunction method call.

@Override
public void call(JavaRDD<RecordInfo<Object>> data, Time batchTime) throws Exception {
    long logicalStartTime = batchTime.milliseconds();
    MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
    PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
    SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
    PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, logicalStartTime);
    Map<String, SubmitterLifecycle<?>> stages = createStages(evaluator);
    // call prepareRun() on all the stages in the group
    // need to call it in an order that guarantees that inputs are called before outputs
    // this is because plugins can call getArguments().set() in the prepareRun() method,
    // which downstream stages should be able to read
    List<String> traversalOrder = new ArrayList(group.size());
    for (String stageName : phaseSpec.getPhase().getDag().getTopologicalOrder()) {
        if (group.contains(stageName)) {
            traversalOrder.add(stageName);
        }
    }
    for (String stageName : traversalOrder) {
        SubmitterLifecycle<?> plugin = stages.get(stageName);
        StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
        try {
            prepareRun(pipelineRuntime, sinkFactory, stageSpec, plugin);
        } catch (Exception e) {
            LOG.error("Error preparing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
            return;
        }
    }
    // run the actual transforms and sinks in this group
    boolean ranSuccessfully = true;
    try {
        MultiSinkFunction multiSinkFunction = new MultiSinkFunction(sec, phaseSpec, group, collectors);
        Set<String> outputNames = sinkFactory.writeCombinedRDD(data.flatMapToPair(multiSinkFunction), sec, sinkNames);
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext context) throws Exception {
                for (String outputName : outputNames) {
                    ExternalDatasets.registerLineage(sec.getAdmin(), outputName, AccessType.WRITE, null, () -> context.getDataset(outputName));
                }
            }
        });
    } catch (Exception e) {
        LOG.error("Error writing to sinks {} for the batch for time {}.", sinkNames, logicalStartTime, e);
        ranSuccessfully = false;
    }
    // run onRunFinish() for each sink
    for (String stageName : traversalOrder) {
        SubmitterLifecycle<?> plugin = stages.get(stageName);
        StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
        try {
            onRunFinish(pipelineRuntime, sinkFactory, stageSpec, plugin, ranSuccessfully);
        } catch (Exception e) {
            LOG.warn("Unable to execute onRunFinish for sink {}", stageName, e);
        }
    }
}
Also used : SubmitterLifecycle(io.cdap.cdap.etl.api.SubmitterLifecycle) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) PipelineRuntime(io.cdap.cdap.etl.common.PipelineRuntime) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginContext(io.cdap.cdap.api.plugin.PluginContext) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) ArrayList(java.util.ArrayList) MultiSinkFunction(io.cdap.cdap.etl.spark.function.MultiSinkFunction) TransactionFailureException(org.apache.tephra.TransactionFailureException) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) SparkBatchSinkFactory(io.cdap.cdap.etl.spark.batch.SparkBatchSinkFactory) TxRunnable(io.cdap.cdap.api.TxRunnable) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) DatasetContext(io.cdap.cdap.api.data.DatasetContext)

Example 14 with TxRunnable

use of io.cdap.cdap.api.TxRunnable in project cdap by cdapio.

the class StreamingSparkSinkFunction method call.

@Override
public void call(JavaRDD<T> data, Time batchTime) throws Exception {
    if (data.isEmpty()) {
        return;
    }
    final long logicalStartTime = batchTime.milliseconds();
    MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
    final PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), stageSpec.isStageLoggingEnabled(), stageSpec.isProcessTimingEnabled());
    final PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, batchTime.milliseconds());
    final String stageName = stageSpec.getName();
    final SparkSink<T> sparkSink = pluginContext.newPluginInstance(stageName, evaluator);
    boolean isPrepared = false;
    boolean isDone = false;
    try {
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext datasetContext) throws Exception {
                SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
                sparkSink.prepareRun(context);
            }
        });
        isPrepared = true;
        final SparkExecutionPluginContext sparkExecutionPluginContext = new SparkStreamingExecutionContext(sec, JavaSparkContext.fromSparkContext(data.rdd().context()), logicalStartTime, stageSpec);
        final JavaRDD<T> countedRDD = data.map(new CountingFunction<T>(stageName, sec.getMetrics(), "records.in", null)).cache();
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext context) throws Exception {
                sparkSink.run(sparkExecutionPluginContext, countedRDD);
            }
        });
        isDone = true;
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext datasetContext) throws Exception {
                SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
                sparkSink.onRunFinish(true, context);
            }
        });
    } catch (Exception e) {
        LOG.error("Error while executing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
    } finally {
        if (isPrepared && !isDone) {
            sec.execute(new TxRunnable() {

                @Override
                public void run(DatasetContext datasetContext) throws Exception {
                    SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
                    sparkSink.onRunFinish(false, context);
                }
            });
        }
    }
}
Also used : DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) PipelineRuntime(io.cdap.cdap.etl.common.PipelineRuntime) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginContext(io.cdap.cdap.api.plugin.PluginContext) BasicSparkPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkPluginContext) SparkExecutionPluginContext(io.cdap.cdap.etl.api.batch.SparkExecutionPluginContext) SparkPluginContext(io.cdap.cdap.etl.api.batch.SparkPluginContext) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) SparkStreamingExecutionContext(io.cdap.cdap.etl.spark.streaming.SparkStreamingExecutionContext) CountingFunction(io.cdap.cdap.etl.spark.function.CountingFunction) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) SparkExecutionPluginContext(io.cdap.cdap.etl.api.batch.SparkExecutionPluginContext) TxRunnable(io.cdap.cdap.api.TxRunnable) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) DatasetContext(io.cdap.cdap.api.data.DatasetContext) BasicSparkPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkPluginContext) SparkPluginContext(io.cdap.cdap.etl.api.batch.SparkPluginContext) BasicSparkPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkPluginContext)

Example 15 with TxRunnable

use of io.cdap.cdap.api.TxRunnable in project cdap by cdapio.

the class DynamicSparkCompute method lazyInit.

// when checkpointing is enabled, and Spark is loading DStream operations from an existing checkpoint,
// delegate will be null and the initialize() method won't have been called. So we need to instantiate
// the delegate and initialize it.
private void lazyInit(final JavaSparkContext jsc) throws Exception {
    if (delegate == null) {
        PluginFunctionContext pluginFunctionContext = dynamicDriverContext.getPluginFunctionContext();
        delegate = pluginFunctionContext.createPlugin();
        final StageSpec stageSpec = pluginFunctionContext.getStageSpec();
        final JavaSparkExecutionContext sec = dynamicDriverContext.getSparkExecutionContext();
        Transactionals.execute(sec, new TxRunnable() {

            @Override
            public void run(DatasetContext datasetContext) throws Exception {
                PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec);
                SparkExecutionPluginContext sparkPluginContext = new BasicSparkExecutionPluginContext(sec, jsc, datasetContext, pipelineRuntime, stageSpec);
                delegate.initialize(sparkPluginContext);
            }
        }, Exception.class);
    }
}
Also used : BasicSparkExecutionPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkExecutionPluginContext) PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) BasicSparkExecutionPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkExecutionPluginContext) SparkExecutionPluginContext(io.cdap.cdap.etl.api.batch.SparkExecutionPluginContext) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) PipelineRuntime(io.cdap.cdap.etl.common.PipelineRuntime) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) TxRunnable(io.cdap.cdap.api.TxRunnable) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) JavaSparkExecutionContext(io.cdap.cdap.api.spark.JavaSparkExecutionContext) DatasetContext(io.cdap.cdap.api.data.DatasetContext)

Aggregations

TxRunnable (io.cdap.cdap.api.TxRunnable)32 DatasetContext (io.cdap.cdap.api.data.DatasetContext)30 PipelineRuntime (io.cdap.cdap.etl.common.PipelineRuntime)10 SparkPipelineRuntime (io.cdap.cdap.etl.spark.SparkPipelineRuntime)10 JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)8 MacroEvaluator (io.cdap.cdap.api.macro.MacroEvaluator)6 PluginContext (io.cdap.cdap.api.plugin.PluginContext)6 SparkExecutionPluginContext (io.cdap.cdap.etl.api.batch.SparkExecutionPluginContext)6 BasicArguments (io.cdap.cdap.etl.common.BasicArguments)6 DefaultMacroEvaluator (io.cdap.cdap.etl.common.DefaultMacroEvaluator)6 StageSpec (io.cdap.cdap.etl.proto.v2.spec.StageSpec)6 TransactionFailureException (org.apache.tephra.TransactionFailureException)6 Put (io.cdap.cdap.api.dataset.table.Put)5 Table (io.cdap.cdap.api.dataset.table.Table)5 SparkPipelinePluginContext (io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext)5 IOException (java.io.IOException)5 AtomicReference (java.util.concurrent.atomic.AtomicReference)5 NoopStageStatisticsCollector (io.cdap.cdap.etl.common.NoopStageStatisticsCollector)4 DataSetException (io.cdap.cdap.api.dataset.DataSetException)3 FileSet (io.cdap.cdap.api.dataset.lib.FileSet)3