use of io.cdap.cdap.api.data.DatasetContext in project cdap by caskdata.
the class NoSQLTransactionals method createTransactional.
/**
* Create a transactional for an entity table. The regular {@link io.cdap.cdap.api.Transactionals} class cannot be
* used due to cyclic dependency between dataset service and NoSQL StructuredTable.
*
* @param txClient transaction client
* @param datasetSupplier supplies the dataset for the entity table
* @return transactional for the entity table
*/
public static Transactional createTransactional(TransactionSystemClient txClient, TableDatasetSupplier datasetSupplier) {
return new Transactional() {
@Override
public void execute(io.cdap.cdap.api.TxRunnable runnable) throws TransactionFailureException {
TransactionContext txContext = new TransactionContext(txClient);
try (EntityTableDatasetContext datasetContext = new EntityTableDatasetContext(txContext, datasetSupplier)) {
txContext.start();
finishExecute(txContext, datasetContext, runnable);
} catch (Exception e) {
Throwables.propagateIfPossible(e, TransactionFailureException.class);
}
}
@Override
public void execute(int timeout, io.cdap.cdap.api.TxRunnable runnable) throws TransactionFailureException {
TransactionContext txContext = new TransactionContext(txClient);
try (EntityTableDatasetContext datasetContext = new EntityTableDatasetContext(txContext, datasetSupplier)) {
txContext.start(timeout);
finishExecute(txContext, datasetContext, runnable);
} catch (Exception e) {
Throwables.propagateIfPossible(e, TransactionFailureException.class);
}
}
private void finishExecute(TransactionContext txContext, DatasetContext dsContext, io.cdap.cdap.api.TxRunnable runnable) throws TransactionFailureException {
try {
runnable.run(dsContext);
} catch (Exception e) {
txContext.abort(new TransactionFailureException("Exception raised from TxRunnable.run() " + runnable, e));
}
// The call the txContext.abort above will always have exception thrown
// Hence we'll only reach here if and only if the runnable.run() returns normally.
txContext.finish();
}
};
}
use of io.cdap.cdap.api.data.DatasetContext in project cdap by caskdata.
the class HiveExploreStructuredRecordTestRun method start.
@BeforeClass
public static void start() throws Exception {
initialize(tmpFolder);
DatasetModuleId moduleId = NAMESPACE_ID.datasetModule("email");
datasetFramework.addModule(moduleId, new EmailTableDefinition.EmailTableModule());
datasetFramework.addInstance("email", MY_TABLE, DatasetProperties.EMPTY);
transactional = Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), transactionSystemClient, NAMESPACE_ID, Collections.<String, String>emptyMap(), null, null));
transactional.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
// Accessing dataset instance to perform data operations
EmailTableDefinition.EmailTable table = context.getDataset(MY_TABLE.getDataset());
Assert.assertNotNull(table);
table.writeEmail("email1", "this is the subject", "this is the body", "sljackson@boss.com");
}
});
datasetFramework.addModule(NAMESPACE_ID.datasetModule("TableWrapper"), new TableWrapperDefinition.Module());
}
use of io.cdap.cdap.api.data.DatasetContext in project cdap by caskdata.
the class TxLookupProvider method executeLookup.
@Nullable
private <T, R> R executeLookup(final String table, final Map<String, String> arguments, final Function<Lookup<T>, R> func) {
final AtomicReference<R> result = new AtomicReference<>();
Transactionals.execute(tx, new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
Lookup<T> lookup = getLookup(table, context.getDataset(table, arguments));
result.set(func.apply(lookup));
}
});
return result.get();
}
use of io.cdap.cdap.api.data.DatasetContext in project cdap by caskdata.
the class DStreamCollection method compute.
@Override
public <U> SparkCollection<U> compute(StageSpec stageSpec, SparkCompute<T, U> compute) throws Exception {
SparkCompute<T, U> wrappedCompute = new DynamicSparkCompute<>(new DynamicDriverContext(stageSpec, sec, new NoopStageStatisticsCollector()), compute);
Transactionals.execute(sec, new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec);
SparkExecutionPluginContext sparkPluginContext = new BasicSparkExecutionPluginContext(sec, JavaSparkContext.fromSparkContext(stream.context().sparkContext()), datasetContext, pipelineRuntime, stageSpec);
wrappedCompute.initialize(sparkPluginContext);
}
}, Exception.class);
return wrap(stream.transform(new ComputeTransformFunction<>(sec, stageSpec, wrappedCompute)));
}
use of io.cdap.cdap.api.data.DatasetContext in project cdap by caskdata.
the class StreamingMultiSinkFunction method call.
@Override
public void call(JavaRDD<RecordInfo<Object>> data, Time batchTime) throws Exception {
long logicalStartTime = batchTime.milliseconds();
MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, logicalStartTime);
Map<String, SubmitterLifecycle<?>> stages = createStages(evaluator);
// call prepareRun() on all the stages in the group
// need to call it in an order that guarantees that inputs are called before outputs
// this is because plugins can call getArguments().set() in the prepareRun() method,
// which downstream stages should be able to read
List<String> traversalOrder = new ArrayList(group.size());
for (String stageName : phaseSpec.getPhase().getDag().getTopologicalOrder()) {
if (group.contains(stageName)) {
traversalOrder.add(stageName);
}
}
for (String stageName : traversalOrder) {
SubmitterLifecycle<?> plugin = stages.get(stageName);
StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
try {
prepareRun(pipelineRuntime, sinkFactory, stageSpec, plugin);
} catch (Exception e) {
LOG.error("Error preparing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
return;
}
}
// run the actual transforms and sinks in this group
boolean ranSuccessfully = true;
try {
MultiSinkFunction multiSinkFunction = new MultiSinkFunction(sec, phaseSpec, group, collectors);
Set<String> outputNames = sinkFactory.writeCombinedRDD(data.flatMapToPair(multiSinkFunction), sec, sinkNames);
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
for (String outputName : outputNames) {
ExternalDatasets.registerLineage(sec.getAdmin(), outputName, AccessType.WRITE, null, () -> context.getDataset(outputName));
}
}
});
} catch (Exception e) {
LOG.error("Error writing to sinks {} for the batch for time {}.", sinkNames, logicalStartTime, e);
ranSuccessfully = false;
}
// run onRunFinish() for each sink
for (String stageName : traversalOrder) {
SubmitterLifecycle<?> plugin = stages.get(stageName);
StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
try {
onRunFinish(pipelineRuntime, sinkFactory, stageSpec, plugin, ranSuccessfully);
} catch (Exception e) {
LOG.warn("Unable to execute onRunFinish for sink {}", stageName, e);
}
}
}
Aggregations