use of co.cask.cdap.app.runtime.Arguments in project cdap by caskdata.
the class DistributedMapReduceTaskContextProvider method createInjector.
private static Injector createInjector(CConfiguration cConf, Configuration hConf) {
MapReduceContextConfig mapReduceContextConfig = new MapReduceContextConfig(hConf);
// principal will be null if running on a kerberos distributed cluster
Arguments arguments = mapReduceContextConfig.getProgramOptions().getArguments();
String principal = arguments.getOption(ProgramOptionConstants.PRINCIPAL);
String runId = arguments.getOption(ProgramOptionConstants.RUN_ID);
String instanceId = arguments.getOption(ProgramOptionConstants.INSTANCE_ID);
return Guice.createInjector(new DistributedProgramRunnableModule(cConf, hConf).createModule(mapReduceContextConfig.getProgramId(), runId, instanceId, principal));
}
use of co.cask.cdap.app.runtime.Arguments in project cdap by caskdata.
the class SparkProgramRunner method run.
@Override
public ProgramController run(Program program, ProgramOptions options) {
// Get the RunId first. It is used for the creation of the ClassLoader closing thread.
Arguments arguments = options.getArguments();
RunId runId = ProgramRunners.getRunId(options);
Deque<Closeable> closeables = new LinkedList<>();
try {
// Extract and verify parameters
ApplicationSpecification appSpec = program.getApplicationSpecification();
Preconditions.checkNotNull(appSpec, "Missing application specification.");
ProgramType processorType = program.getType();
Preconditions.checkNotNull(processorType, "Missing processor type.");
Preconditions.checkArgument(processorType == ProgramType.SPARK, "Only Spark process type is supported.");
SparkSpecification spec = appSpec.getSpark().get(program.getName());
Preconditions.checkNotNull(spec, "Missing SparkSpecification for %s", program.getName());
String host = options.getArguments().getOption(ProgramOptionConstants.HOST);
Preconditions.checkArgument(host != null, "No hostname is provided");
// Get the WorkflowProgramInfo if it is started by Workflow
WorkflowProgramInfo workflowInfo = WorkflowProgramInfo.create(arguments);
DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, appSpec);
// Setup dataset framework context, if required
if (programDatasetFramework instanceof ProgramContextAware) {
ProgramId programId = program.getId();
((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programId.run(runId)));
}
PluginInstantiator pluginInstantiator = createPluginInstantiator(options, program.getClassLoader());
if (pluginInstantiator != null) {
closeables.addFirst(pluginInstantiator);
}
SparkRuntimeContext runtimeContext = new SparkRuntimeContext(new Configuration(hConf), program, options, cConf, host, txClient, programDatasetFramework, discoveryServiceClient, metricsCollectionService, streamAdmin, workflowInfo, pluginInstantiator, secureStore, secureStoreManager, authorizationEnforcer, authenticationContext, messagingService);
closeables.addFirst(runtimeContext);
Spark spark;
try {
spark = new InstantiatorFactory(false).get(TypeToken.of(program.<Spark>getMainClass())).create();
} catch (Exception e) {
LOG.error("Failed to instantiate Spark class for {}", spec.getClassName(), e);
throw Throwables.propagate(e);
}
SparkSubmitter submitter = SparkRuntimeContextConfig.isLocal(hConf) ? new LocalSparkSubmitter() : new DistributedSparkSubmitter(hConf, locationFactory, host, runtimeContext, options.getArguments().getOption(Constants.AppFabric.APP_SCHEDULER_QUEUE));
Service sparkRuntimeService = new SparkRuntimeService(cConf, spark, getPluginArchive(options), runtimeContext, submitter);
sparkRuntimeService.addListener(createRuntimeServiceListener(program.getId(), runId, arguments, options.getUserArguments(), closeables, runtimeStore), Threads.SAME_THREAD_EXECUTOR);
ProgramController controller = new SparkProgramController(sparkRuntimeService, runtimeContext);
LOG.debug("Starting Spark Job. Context: {}", runtimeContext);
if (SparkRuntimeContextConfig.isLocal(hConf) || UserGroupInformation.isSecurityEnabled()) {
sparkRuntimeService.start();
} else {
ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), sparkRuntimeService);
}
return controller;
} catch (Throwable t) {
closeAll(closeables);
throw Throwables.propagate(t);
}
}
use of co.cask.cdap.app.runtime.Arguments in project cdap by caskdata.
the class FlowTest method testFlowPendingMetric.
@Test
public void testFlowPendingMetric() throws Exception {
final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(PendingMetricTestApp.class, TEMP_FOLDER_SUPPLIER);
File tempFolder = TEMP_FOLDER_SUPPLIER.get();
ProgramController controller = null;
for (ProgramDescriptor programDescriptor : app.getPrograms()) {
// running mapreduce is out of scope of this tests (there's separate unit-test for that)
if (programDescriptor.getProgramId().getType() == ProgramType.FLOW) {
Arguments args = new BasicArguments(ImmutableMap.of("temp", tempFolder.getAbsolutePath(), "count", "4"));
controller = AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), args, TEMP_FOLDER_SUPPLIER);
}
}
Assert.assertNotNull(controller);
Map<String, String> tagsForSourceToOne = metricTagsForQueue("source", "ints", "forward-one");
Map<String, String> tagsForSourceToTwo = metricTagsForQueue("source", null, "forward-two");
Map<String, String> tagsForSourceToTwoInts = metricTagsForQueue("source", "ints", "forward-two");
Map<String, String> tagsForSourceToTwoStrings = metricTagsForQueue("source", "strings", "forward-two");
Map<String, String> tagsForOneToSink = metricTagsForQueue("forward-one", "queue", "sink");
Map<String, String> tagsForTwoToSink = metricTagsForQueue("forward-two", "queue", "sink");
Map<String, String> tagsForAllToOne = metricTagsForQueue(null, null, "forward-one");
Map<String, String> tagsForAllToTwo = metricTagsForQueue(null, null, "forward-two");
Map<String, String> tagsForAllToSink = metricTagsForQueue(null, null, "sink");
Map<String, String> tagsForAll = metricTagsForQueue(null, null, null);
try {
// source emits 4, then forward-one reads 1, hence 3 should be pending
// wait a little longer as flow needs to start
waitForPending(tagsForSourceToOne, 3, 5000);
// wait a little longer as flow needs to start
waitForPending(tagsForAllToOne, 3, 100);
// forward-two receives each of the 4 as a string and an int, but could have read 1 at most per each queue
// so there should be either 3 + 4 = 7 pending or 3 + 3 = 6 pending, or 4 + 4 = 8 pending
// but we don't know whether the queue pending count will be 4, 3 or 3, 4 or 3, 3 or 4, 4
long intPending = waitForPending(tagsForSourceToTwoInts, 3, 4L, 1000);
long stringPending = waitForPending(tagsForSourceToTwoStrings, 3, 4L, 1000);
long totalPending = intPending + stringPending;
Assert.assertTrue(String.format("Expected the pending events count to be 6, 7 or 8. But it was %d", totalPending), totalPending == 6 || totalPending == 7 || totalPending == 8);
waitForPending(tagsForSourceToTwo, 7, 6L, 500);
waitForPending(tagsForAllToTwo, 7, 6L, 100);
// neither one nor two have emitted, so the total pending should be = 12 - 1 (forward-one) - 1 or 2 (forward-two)
// => 10 or 9 events
waitForPending(tagsForAll, 10, 9L, 100);
// kick on forward-one, it should now consume all its events
Assert.assertTrue(new File(tempFolder, "one").createNewFile());
waitForPending(tagsForSourceToOne, 0, 2000);
waitForPending(tagsForAllToOne, 0, 100);
// sink has received 4 but started to read 1, so it has 3 pending
waitForPending(tagsForOneToSink, 3, 1000);
waitForPending(tagsForAllToSink, 3, 100);
// kick-off forward-two, it should now consume all its integer and string events
Assert.assertTrue(new File(tempFolder, "two-i").createNewFile());
Assert.assertTrue(new File(tempFolder, "two-s").createNewFile());
// pending events for all of forward-two's queues should go to zero
waitForPending(tagsForSourceToTwoInts, 0, 2000);
waitForPending(tagsForSourceToTwoStrings, 0, 1000);
waitForPending(tagsForSourceToTwo, 0, 1000);
waitForPending(tagsForAllToTwo, 0, 100);
// but now sink should have 8 more events waiting
waitForPending(tagsForOneToSink, 3, 1000);
waitForPending(tagsForTwoToSink, 8, 1000);
waitForPending(tagsForAllToSink, 11, 100);
// kick off sink, its pending events should now go to zero
Assert.assertTrue(new File(tempFolder, "three").createNewFile());
waitForPending(tagsForOneToSink, 0, 2000);
waitForPending(tagsForTwoToSink, 0, 2000);
waitForPending(tagsForAllToSink, 0, 100);
} finally {
controller.stop();
}
}
Aggregations