use of co.cask.cdap.api.data.DatasetContext in project cdap by caskdata.
the class DefaultStore method setWorkerInstances.
@Override
public void setWorkerInstances(final ProgramId id, final int instances) {
Preconditions.checkArgument(instances > 0, "Cannot change number of worker instances to %s", instances);
Transactions.executeUnchecked(transactional, new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
AppMetadataStore metaStore = getAppMetadataStore(context);
ApplicationSpecification appSpec = getAppSpecOrFail(metaStore, id);
WorkerSpecification workerSpec = getWorkerSpecOrFail(id, appSpec);
WorkerSpecification newSpecification = new WorkerSpecification(workerSpec.getClassName(), workerSpec.getName(), workerSpec.getDescription(), workerSpec.getProperties(), workerSpec.getDatasets(), workerSpec.getResources(), instances);
ApplicationSpecification newAppSpec = replaceWorkerInAppSpec(appSpec, id, newSpecification);
metaStore.updateAppSpec(id.getNamespace(), id.getApplication(), id.getVersion(), newAppSpec);
}
});
LOG.trace("Setting program instances: namespace: {}, application: {}, worker: {}, new instances count: {}", id.getNamespaceId(), id.getApplication(), id.getProgram(), instances);
}
use of co.cask.cdap.api.data.DatasetContext in project cdap by caskdata.
the class DStreamCollection method compute.
@Override
public <U> SparkCollection<U> compute(final StageInfo stageInfo, SparkCompute<T, U> compute) throws Exception {
final SparkCompute<T, U> wrappedCompute = new DynamicSparkCompute<>(new DynamicDriverContext(stageInfo, sec), compute);
Transactionals.execute(sec, new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkExecutionPluginContext sparkPluginContext = new BasicSparkExecutionPluginContext(sec, JavaSparkContext.fromSparkContext(stream.context().sparkContext()), datasetContext, stageInfo);
wrappedCompute.initialize(sparkPluginContext);
}
}, Exception.class);
return wrap(stream.transform(new ComputeTransformFunction<>(sec, stageInfo, wrappedCompute)));
}
use of co.cask.cdap.api.data.DatasetContext in project cdap by caskdata.
the class DynamicSparkCompute method lazyInit.
// when checkpointing is enabled, and Spark is loading DStream operations from an existing checkpoint,
// delegate will be null and the initialize() method won't have been called. So we need to instantiate
// the delegate and initialize it.
private void lazyInit(final JavaSparkContext jsc) throws Exception {
if (delegate == null) {
PluginFunctionContext pluginFunctionContext = dynamicDriverContext.getPluginFunctionContext();
delegate = pluginFunctionContext.createPlugin();
final StageInfo stageInfo = pluginFunctionContext.getStageInfo();
final JavaSparkExecutionContext sec = dynamicDriverContext.getSparkExecutionContext();
Transactionals.execute(sec, new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkExecutionPluginContext sparkPluginContext = new BasicSparkExecutionPluginContext(sec, jsc, datasetContext, stageInfo);
delegate.initialize(sparkPluginContext);
}
}, Exception.class);
}
}
use of co.cask.cdap.api.data.DatasetContext in project cdap by caskdata.
the class MockAction method run.
@Override
public void run(ActionContext context) throws Exception {
context.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
Table table = context.getDataset(config.tableName);
Put put = new Put(config.rowKey);
put.add(config.columnKey, config.value);
table.put(put);
}
});
// Set the same value in the arguments as well.
context.getArguments().set(config.rowKey + config.columnKey, config.value);
}
use of co.cask.cdap.api.data.DatasetContext in project cdap by caskdata.
the class NotificationTest method useTransactionTest.
@Test
public void useTransactionTest() throws Exception {
// Performing admin operations to create dataset instance
// keyValueTable is a system dataset module
namespaceAdmin.create(new NamespaceMeta.Builder().setName(namespace).build());
DatasetId myTableInstance = namespace.dataset("myTable");
dsFramework.addInstance("keyValueTable", myTableInstance, DatasetProperties.EMPTY);
final CountDownLatch receivedLatch = new CountDownLatch(1);
Assert.assertTrue(feedManager.createFeed(FEED1_INFO));
try {
Cancellable cancellable = notificationService.subscribe(FEED1, new NotificationHandler<String>() {
private int received = 0;
@Override
public Type getNotificationType() {
return String.class;
}
@Override
public void received(final String notification, NotificationContext notificationContext) {
notificationContext.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
KeyValueTable table = context.getDataset("myTable");
table.write("foo", String.format("%s-%d", notification, received++));
receivedLatch.countDown();
}
}, TxRetryPolicy.maxRetries(5));
}
});
// Short delay for the subscriber to setup the subscription.
TimeUnit.MILLISECONDS.sleep(500);
try {
notificationService.publish(FEED1, "foobar");
// Waiting for the subscriber to receive that notification
Assert.assertTrue(receivedLatch.await(5, TimeUnit.SECONDS));
// Read the KeyValueTable for the value updated from the subscriber.
// Need to poll it couple times since after the received method returned,
// the tx may not yet committed when we try to read it here.
final KeyValueTable table = dsFramework.getDataset(myTableInstance, DatasetDefinition.NO_ARGUMENTS, null);
Assert.assertNotNull(table);
final TransactionContext txContext = new TransactionContext(txClient, table);
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
txContext.start();
try {
return "foobar-0".equals(Bytes.toString(table.read("foo")));
} finally {
txContext.finish();
}
}
}, 5, TimeUnit.SECONDS);
} finally {
cancellable.cancel();
}
} finally {
dsFramework.deleteInstance(myTableInstance);
feedManager.deleteFeed(FEED1);
namespaceAdmin.delete(namespace);
}
}
Aggregations