use of io.cdap.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.
the class AbstractProgramRuntimeServiceTest method testScopingRuntimeArguments.
@Test
public void testScopingRuntimeArguments() throws Exception {
Map<ProgramId, Arguments> argumentsMap = new ConcurrentHashMap<>();
ProgramRunnerFactory runnerFactory = createProgramRunnerFactory(argumentsMap);
final Program program = createDummyProgram();
final ProgramRuntimeService runtimeService = new AbstractProgramRuntimeService(CConfiguration.create(), runnerFactory, null, new NoOpProgramStateWriter(), null) {
@Override
public ProgramLiveInfo getLiveInfo(ProgramId programId) {
return new ProgramLiveInfo(programId, "runtime") {
};
}
@Override
protected Program createProgram(CConfiguration cConf, ProgramRunner programRunner, ProgramDescriptor programDescriptor, ArtifactDetail artifactDetail, File tempDir) throws IOException {
return program;
}
@Override
protected ArtifactDetail getArtifactDetail(ArtifactId artifactId) throws IOException, ArtifactNotFoundException {
io.cdap.cdap.api.artifact.ArtifactId id = new io.cdap.cdap.api.artifact.ArtifactId("dummy", new ArtifactVersion("1.0"), ArtifactScope.USER);
return new ArtifactDetail(new ArtifactDescriptor(NamespaceId.DEFAULT.getEntityName(), id, Locations.toLocation(TEMP_FOLDER.newFile())), new ArtifactMeta(ArtifactClasses.builder().build()));
}
};
runtimeService.startAndWait();
try {
try {
ProgramDescriptor descriptor = new ProgramDescriptor(program.getId(), null, NamespaceId.DEFAULT.artifact("test", "1.0"));
// Set of scopes to test
String programScope = program.getType().getScope();
String clusterName = "c1";
List<String> scopes = Arrays.asList("cluster.*.", "cluster." + clusterName + ".", "cluster." + clusterName + ".app.*.", "app.*.", "app." + program.getApplicationId() + ".", "app." + program.getApplicationId() + "." + programScope + ".*.", "app." + program.getApplicationId() + "." + programScope + "." + program.getName() + ".", programScope + ".*.", programScope + "." + program.getName() + ".", "");
for (String scope : scopes) {
ProgramOptions programOptions = new SimpleProgramOptions(program.getId(), new BasicArguments(Collections.singletonMap(Constants.CLUSTER_NAME, clusterName)), new BasicArguments(Collections.singletonMap(scope + "size", Integer.toString(scope.length()))));
final ProgramController controller = runtimeService.run(descriptor, programOptions, RunIds.generate()).getController();
Tasks.waitFor(ProgramController.State.COMPLETED, controller::getState, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Should get an argument
Arguments args = argumentsMap.get(program.getId());
Assert.assertNotNull(args);
Assert.assertEquals(scope.length(), Integer.parseInt(args.getOption("size")));
}
} finally {
runtimeService.stopAndWait();
}
} finally {
runtimeService.stopAndWait();
}
}
use of io.cdap.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.
the class OpenCloseDataSetTest method testDataSetsAreClosed.
@Test(timeout = 120000)
public void testDataSetsAreClosed() throws Exception {
final String tableName = "foo";
TrackingTable.resetTracker();
ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(DummyAppWithTrackingTable.class, TEMP_FOLDER_SUPPLIER);
List<ProgramController> controllers = Lists.newArrayList();
// start the programs
for (ProgramDescriptor programDescriptor : app.getPrograms()) {
if (programDescriptor.getProgramId().getType().equals(ProgramType.MAPREDUCE)) {
continue;
}
// Start service with 1 thread
Map<String, String> args = Collections.singletonMap(SystemArguments.SERVICE_THREADS, "1");
controllers.add(AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(args), TEMP_FOLDER_SUPPLIER));
}
DiscoveryServiceClient discoveryServiceClient = AppFabricTestHelper.getInjector().getInstance(DiscoveryServiceClient.class);
Discoverable discoverable = new RandomEndpointStrategy(() -> discoveryServiceClient.discover(String.format("%s.%s.%s.%s", ProgramType.SERVICE.getDiscoverableTypeName(), DefaultId.NAMESPACE.getEntityName(), "dummy", "DummyService"))).pick(5, TimeUnit.SECONDS);
Assert.assertNotNull(discoverable);
// write some data to the tracking table through the service
for (int i = 0; i < 4; i++) {
String msg = "x" + i;
URL url = URIScheme.createURI(discoverable, "v3/namespaces/default/apps/dummy/services/DummyService/methods/%s", msg).toURL();
HttpRequests.execute(HttpRequest.put(url).build(), new DefaultHttpRequestConfig(false));
}
// get the number of writes to the foo table
Assert.assertEquals(4, TrackingTable.getTracker(tableName, "write"));
// only 2 "open" calls should be tracked:
// 1. the service has started with one instance (service is loaded lazily on 1st request)
// 2. DatasetSystemMetadataWriter also instantiates the dataset because it needs to add some system tags
// for the dataset
Assert.assertEquals(2, TrackingTable.getTracker(tableName, "open"));
// now query data from the service
URL url = URIScheme.createURI(discoverable, "v3/namespaces/default/apps/dummy/services/DummyService/methods/x1").toURL();
HttpResponse response = HttpRequests.execute(HttpRequest.get(url).build(), new DefaultHttpRequestConfig(false));
String responseContent = new Gson().fromJson(response.getResponseBodyAsString(), String.class);
Assert.assertEquals("x1", responseContent);
// now the dataset must have a read and another open operation
Assert.assertEquals(1, TrackingTable.getTracker(tableName, "read"));
// since the same service instance is used, there shouldn't be any new open
Assert.assertEquals(2, TrackingTable.getTracker(tableName, "open"));
// The dataset that was instantiated by the DatasetSystemMetadataWriter should have been closed
Assert.assertEquals(1, TrackingTable.getTracker(tableName, "close"));
// stop all programs, they should both close the data set foo
for (ProgramController controller : controllers) {
controller.stop().get();
}
int timesOpened = TrackingTable.getTracker(tableName, "open");
Assert.assertTrue(timesOpened >= 2);
Assert.assertEquals(timesOpened, TrackingTable.getTracker(tableName, "close"));
// now start the m/r job
ProgramController controller = null;
for (ProgramDescriptor programDescriptor : app.getPrograms()) {
if (programDescriptor.getProgramId().getType().equals(ProgramType.MAPREDUCE)) {
controller = AppFabricTestHelper.submit(app, programDescriptor.getSpecification().getClassName(), new BasicArguments(), TEMP_FOLDER_SUPPLIER);
}
}
Assert.assertNotNull(controller);
while (!controller.getState().equals(ProgramController.State.COMPLETED)) {
TimeUnit.MILLISECONDS.sleep(100);
}
// M/r job is done, one mapper and the m/r client should have opened and closed the data set foo
// we don't know the exact number of times opened, but it is at least once, and it must be closed the same number
// of times.
Assert.assertTrue(timesOpened < TrackingTable.getTracker(tableName, "open"));
Assert.assertEquals(TrackingTable.getTracker(tableName, "open"), TrackingTable.getTracker(tableName, "close"));
Assert.assertTrue(0 < TrackingTable.getTracker("bar", "open"));
Assert.assertEquals(TrackingTable.getTracker("bar", "open"), TrackingTable.getTracker("bar", "close"));
}
use of io.cdap.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.
the class DefaultRuntimeJob method run.
@Override
public void run(RuntimeJobEnvironment runtimeJobEnv) throws Exception {
// Setup process wide settings
Thread.setDefaultUncaughtExceptionHandler(new UncaughtExceptionHandler());
SLF4JBridgeHandler.removeHandlersForRootLogger();
SLF4JBridgeHandler.install();
// Get Program Options
ProgramOptions programOpts = readJsonFile(new File(DistributedProgramRunner.PROGRAM_OPTIONS_FILE_NAME), ProgramOptions.class);
ProgramRunId programRunId = programOpts.getProgramId().run(ProgramRunners.getRunId(programOpts));
ProgramId programId = programRunId.getParent();
Arguments systemArgs = programOpts.getArguments();
// Setup logging context for the program
LoggingContextAccessor.setLoggingContext(LoggingContextHelper.getLoggingContextWithRunId(programRunId, systemArgs.asMap()));
// Get the cluster launch type
Cluster cluster = GSON.fromJson(systemArgs.getOption(ProgramOptionConstants.CLUSTER), Cluster.class);
// Get App spec
ApplicationSpecification appSpec = readJsonFile(new File(DistributedProgramRunner.APP_SPEC_FILE_NAME), ApplicationSpecification.class);
ProgramDescriptor programDescriptor = new ProgramDescriptor(programId, appSpec);
// Create injector and get program runner
Injector injector = Guice.createInjector(createModules(runtimeJobEnv, createCConf(runtimeJobEnv, programOpts), programRunId, programOpts));
CConfiguration cConf = injector.getInstance(CConfiguration.class);
// Initialize log appender
LogAppenderInitializer logAppenderInitializer = injector.getInstance(LogAppenderInitializer.class);
logAppenderInitializer.initialize();
SystemArguments.setLogLevel(programOpts.getUserArguments(), logAppenderInitializer);
ProxySelector oldProxySelector = ProxySelector.getDefault();
RuntimeMonitors.setupMonitoring(injector, programOpts);
Deque<Service> coreServices = createCoreServices(injector, systemArgs, cluster);
startCoreServices(coreServices);
// regenerate app spec
ConfiguratorFactory configuratorFactory = injector.getInstance(ConfiguratorFactory.class);
try {
Map<String, String> systemArguments = new HashMap<>(programOpts.getArguments().asMap());
File pluginDir = new File(programOpts.getArguments().getOption(ProgramOptionConstants.PLUGIN_DIR, DistributedProgramRunner.PLUGIN_DIR));
// create a directory to store plugin artifacts for the regeneration of app spec to fetch plugin artifacts
DirUtils.mkdirs(pluginDir);
if (!programOpts.getArguments().hasOption(ProgramOptionConstants.PLUGIN_DIR)) {
systemArguments.put(ProgramOptionConstants.PLUGIN_DIR, DistributedProgramRunner.PLUGIN_DIR);
}
// remember the file names in the artifact folder before app regeneration
List<String> pluginFiles = DirUtils.listFiles(pluginDir, File::isFile).stream().map(File::getName).collect(Collectors.toList());
ApplicationSpecification generatedAppSpec = regenerateAppSpec(systemArguments, programOpts.getUserArguments().asMap(), programId, appSpec, programDescriptor, configuratorFactory);
appSpec = generatedAppSpec != null ? generatedAppSpec : appSpec;
programDescriptor = new ProgramDescriptor(programDescriptor.getProgramId(), appSpec);
List<String> pluginFilesAfter = DirUtils.listFiles(pluginDir, File::isFile).stream().map(File::getName).collect(Collectors.toList());
if (pluginFilesAfter.isEmpty()) {
systemArguments.remove(ProgramOptionConstants.PLUGIN_DIR);
}
// recreate it from the folders
if (!pluginFiles.equals(pluginFilesAfter)) {
systemArguments.remove(ProgramOptionConstants.PLUGIN_ARCHIVE);
}
// update program options
programOpts = new SimpleProgramOptions(programOpts.getProgramId(), new BasicArguments(systemArguments), programOpts.getUserArguments(), programOpts.isDebug());
} catch (Exception e) {
LOG.warn("Failed to regenerate the app spec for program {}, using the existing app spec", programId, e);
}
ProgramStateWriter programStateWriter = injector.getInstance(ProgramStateWriter.class);
RuntimeClientService runtimeClientService = injector.getInstance(RuntimeClientService.class);
CompletableFuture<ProgramController.State> programCompletion = new CompletableFuture<>();
try {
ProgramRunner programRunner = injector.getInstance(ProgramRunnerFactory.class).create(programId.getType());
// Create and run the program. The program files should be present in current working directory.
try (Program program = createProgram(cConf, programRunner, programDescriptor, programOpts)) {
ProgramController controller = programRunner.run(program, programOpts);
controllerFuture.complete(controller);
runtimeClientService.onProgramStopRequested(controller::stop);
controller.addListener(new AbstractListener() {
@Override
public void completed() {
programCompletion.complete(ProgramController.State.COMPLETED);
}
@Override
public void killed() {
// Write an extra state to make sure there is always a terminal state even
// if the program application run failed to write out the state.
programStateWriter.killed(programRunId);
programCompletion.complete(ProgramController.State.KILLED);
}
@Override
public void error(Throwable cause) {
// Write an extra state to make sure there is always a terminal state even
// if the program application run failed to write out the state.
programStateWriter.error(programRunId, cause);
programCompletion.completeExceptionally(cause);
}
}, Threads.SAME_THREAD_EXECUTOR);
if (stopRequested) {
controller.stop();
}
// Block on the completion
programCompletion.get();
} finally {
if (programRunner instanceof Closeable) {
Closeables.closeQuietly((Closeable) programRunner);
}
}
} catch (Throwable t) {
controllerFuture.completeExceptionally(t);
if (!programCompletion.isDone()) {
// We log here so that the logs would still send back to the program logs collection.
// Only log if the program completion is not done.
// Otherwise the program runner itself should have logged the error.
LOG.error("Failed to execute program {}", programRunId, t);
// If the program completion is not done, then this exception
// is due to systematic failure in which fail to run the program.
// We write out an extra error state for the program to make sure the program state get transited.
programStateWriter.error(programRunId, t);
}
throw t;
} finally {
stopCoreServices(coreServices, logAppenderInitializer);
ProxySelector.setDefault(oldProxySelector);
Authenticator.setDefault(null);
runCompletedLatch.countDown();
}
}
use of io.cdap.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.
the class RemoteExecutionTwillRunnerService method createControllerIfNeeded.
/**
* Creates a {@link TwillController} based on the given {@link RunRecordDetail} if it should be monitored by this
* service.
*/
private boolean createControllerIfNeeded(RunRecordDetail runRecordDetail) {
// Program runs in PENDING and STARTING state will eventually start and controller will be created later.
if (runRecordDetail.getStatus() != ProgramRunStatus.RUNNING && runRecordDetail.getStatus() != ProgramRunStatus.SUSPENDED) {
LOG.debug("Skip creating controller for run {} with status {}", runRecordDetail.getProgramRunId(), runRecordDetail.getStatus());
return false;
}
Map<String, String> systemArgs = runRecordDetail.getSystemArgs();
try {
ClusterMode clusterMode = ClusterMode.valueOf(systemArgs.getOrDefault(ProgramOptionConstants.CLUSTER_MODE, ClusterMode.ON_PREMISE.name()));
if (clusterMode != ClusterMode.ISOLATED) {
LOG.debug("Ignore run {} of non supported cluster mode {}", runRecordDetail.getProgramRunId(), clusterMode);
return false;
}
} catch (IllegalArgumentException e) {
LOG.warn("Ignore run record with an invalid cluster mode", e);
return false;
}
ProgramOptions programOpts = new SimpleProgramOptions(runRecordDetail.getProgramRunId().getParent(), new BasicArguments(runRecordDetail.getSystemArgs()), new BasicArguments(runRecordDetail.getUserArgs()));
// Creates a controller via the controller factory.
// Since there is no startup start needed, the timeout is arbitrarily short
new ControllerFactory(runRecordDetail.getProgramRunId(), programOpts).create(null, 5, TimeUnit.SECONDS);
return true;
}
use of io.cdap.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.
the class DistributedWorkflowProgramRunner method setupLaunchConfig.
@Override
protected void setupLaunchConfig(ProgramLaunchConfig launchConfig, Program program, ProgramOptions options, CConfiguration cConf, Configuration hConf, File tempDir) throws IOException {
WorkflowSpecification spec = program.getApplicationSpecification().getWorkflows().get(program.getName());
List<ClassAcceptor> acceptors = new ArrayList<>();
acceptors.add(launchConfig.getClassAcceptor());
// Only interested in MapReduce and Spark nodes.
// This is because CUSTOM_ACTION types are running inside the driver
Set<SchedulableProgramType> runnerTypes = EnumSet.of(SchedulableProgramType.MAPREDUCE, SchedulableProgramType.SPARK);
Iterable<ScheduleProgramInfo> programInfos = spec.getNodeIdMap().values().stream().filter(WorkflowActionNode.class::isInstance).map(WorkflowActionNode.class::cast).map(WorkflowActionNode::getProgram).filter(programInfo -> runnerTypes.contains(programInfo.getProgramType()))::iterator;
// Can't use Stream.forEach as we want to preserve the IOException being thrown
for (ScheduleProgramInfo programInfo : programInfos) {
ProgramType programType = ProgramType.valueOfSchedulableType(programInfo.getProgramType());
ProgramRunner runner = programRunnerFactory.create(programType);
try {
if (runner instanceof DistributedProgramRunner) {
// Call setupLaunchConfig with the corresponding program.
// Need to constructs a new ProgramOptions with the scope extracted for the given program
ProgramId programId = program.getId().getParent().program(programType, programInfo.getProgramName());
Map<String, String> programUserArgs = RuntimeArguments.extractScope(programId.getType().getScope(), programId.getProgram(), options.getUserArguments().asMap());
ProgramOptions programOptions = new SimpleProgramOptions(programId, options.getArguments(), new BasicArguments(programUserArgs));
((DistributedProgramRunner) runner).setupLaunchConfig(launchConfig, Programs.create(cConf, program, programId, runner), programOptions, cConf, hConf, tempDir);
acceptors.add(launchConfig.getClassAcceptor());
}
} finally {
if (runner instanceof Closeable) {
Closeables.closeQuietly((Closeable) runner);
}
}
}
// Set the class acceptor
launchConfig.setClassAcceptor(new AndClassAcceptor(acceptors));
// Find out the default resources requirements based on the programs inside the workflow
// At least gives the Workflow driver 768 mb of container memory
Map<String, Resources> runnablesResources = Maps.transformValues(launchConfig.getRunnables(), this::getResources);
Resources defaultResources = maxResources(new Resources(768), findDriverResources(spec.getNodes(), runnablesResources));
// Clear and set the runnable for the workflow driver.
launchConfig.clearRunnables();
// Extract scoped runtime arguments that only meant for the workflow but not for child nodes
Map<String, String> runtimeArgs = RuntimeArguments.extractScope("task", "workflow", options.getUserArguments().asMap());
launchConfig.addRunnable(spec.getName(), new WorkflowTwillRunnable(spec.getName()), 1, runtimeArgs, defaultResources, 0);
}
Aggregations