use of io.cdap.cdap.app.program.Program in project cdap by caskdata.
the class AbstractProgramRuntimeService method run.
@Override
public final RuntimeInfo run(ProgramDescriptor programDescriptor, ProgramOptions options, RunId runId) {
ProgramId programId = programDescriptor.getProgramId();
ProgramRunId programRunId = programId.run(runId);
ClusterMode clusterMode = ProgramRunners.getClusterMode(options);
// Creates the ProgramRunner based on the cluster mode
ProgramRunner runner = (clusterMode == ClusterMode.ON_PREMISE ? programRunnerFactory : Optional.ofNullable(remoteProgramRunnerFactory).orElseThrow(UnsupportedOperationException::new)).create(programId.getType());
File tempDir = createTempDirectory(programId, runId);
AtomicReference<Runnable> cleanUpTaskRef = new AtomicReference<>(createCleanupTask(tempDir, runner));
DelayedProgramController controller = new DelayedProgramController(programRunId);
RuntimeInfo runtimeInfo = createRuntimeInfo(controller, programId, () -> cleanUpTaskRef.get().run());
updateRuntimeInfo(runtimeInfo);
executor.execute(() -> {
try {
// Get the artifact details and save it into the program options.
ArtifactId artifactId = programDescriptor.getArtifactId();
ArtifactDetail artifactDetail = getArtifactDetail(artifactId);
ApplicationSpecification appSpec = programDescriptor.getApplicationSpecification();
ProgramDescriptor newProgramDescriptor = programDescriptor;
boolean isPreview = Boolean.valueOf(options.getArguments().getOption(ProgramOptionConstants.IS_PREVIEW, "false"));
// for preview we already have a resolved app spec, so no need to regenerate the app spec again
if (!isPreview && appSpec != null && ClusterMode.ON_PREMISE.equals(clusterMode)) {
try {
ApplicationSpecification generatedAppSpec = regenerateAppSpec(artifactDetail, programId, artifactId, appSpec, options);
appSpec = generatedAppSpec != null ? generatedAppSpec : appSpec;
newProgramDescriptor = new ProgramDescriptor(programDescriptor.getProgramId(), appSpec);
} catch (Exception e) {
LOG.warn("Failed to regenerate the app spec for program {}, using the existing app spec", programId);
}
}
ProgramOptions runtimeProgramOptions = updateProgramOptions(artifactId, programId, options, runId, clusterMode, Iterables.getFirst(artifactDetail.getMeta().getClasses().getApps(), null));
// Take a snapshot of all the plugin artifacts used by the program
ProgramOptions optionsWithPlugins = createPluginSnapshot(runtimeProgramOptions, programId, tempDir, newProgramDescriptor.getApplicationSpecification());
// Create and run the program
Program executableProgram = createProgram(cConf, runner, newProgramDescriptor, artifactDetail, tempDir);
cleanUpTaskRef.set(createCleanupTask(cleanUpTaskRef.get(), executableProgram));
controller.setProgramController(runner.run(executableProgram, optionsWithPlugins));
} catch (Exception e) {
controller.failed(e);
programStateWriter.error(programRunId, e);
LOG.error("Exception while trying to run program", e);
}
});
return runtimeInfo;
}
use of io.cdap.cdap.app.program.Program in project cdap by caskdata.
the class AbstractProgramRuntimeService method list.
@Override
public Map<RunId, RuntimeInfo> list(ProgramType type) {
Map<RunId, RuntimeInfo> result = new HashMap<>();
Lock lock = runtimeInfosLock.readLock();
lock.lock();
try {
result.putAll(runtimeInfos.row(type));
} finally {
lock.unlock();
}
// Add any missing RuntimeInfo from the remote twill runner
if (remoteTwillRunnerService == null) {
return Collections.unmodifiableMap(result);
}
for (TwillRunner.LiveInfo liveInfo : remoteTwillRunnerService.lookupLive()) {
ProgramId programId = TwillAppNames.fromTwillAppName(liveInfo.getApplicationName(), false);
if (programId == null || !programId.getType().equals(type)) {
continue;
}
for (TwillController controller : liveInfo.getControllers()) {
// For remote twill runner, the twill run id and cdap run id are the same
RunId runId = controller.getRunId();
if (result.computeIfAbsent(runId, rid -> createRuntimeInfo(programId, runId, controller)) == null) {
LOG.warn("Unable to create runtime info for program {} with run id {}", programId, runId);
}
}
}
return Collections.unmodifiableMap(result);
}
use of io.cdap.cdap.app.program.Program in project cdap by caskdata.
the class DefaultProgramWorkflowRunner method create.
@Override
public Runnable create(String name) {
ProgramRunner programRunner = programRunnerFactory.create(programType);
try {
ProgramId programId = workflowProgram.getId().getParent().program(programType, name);
Program program = Programs.create(cConf, workflowProgram, programId, programRunner);
return getProgramRunnable(name, programRunner, program);
} catch (Exception e) {
closeProgramRunner(programRunner);
throw Throwables.propagate(e);
}
}
use of io.cdap.cdap.app.program.Program in project cdap by caskdata.
the class MapReduceTaskContextProvider method createCacheLoader.
/**
* Creates a {@link CacheLoader} for the task context cache.
*/
private CacheLoader<ContextCacheKey, BasicMapReduceTaskContext> createCacheLoader(final Injector injector) {
DiscoveryServiceClient discoveryServiceClient = injector.getInstance(DiscoveryServiceClient.class);
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
SecureStore secureStore = injector.getInstance(SecureStore.class);
SecureStoreManager secureStoreManager = injector.getInstance(SecureStoreManager.class);
MessagingService messagingService = injector.getInstance(MessagingService.class);
// Multiple instances of BasicMapReduceTaskContext can share the same program.
AtomicReference<Program> programRef = new AtomicReference<>();
MetadataReader metadataReader = injector.getInstance(MetadataReader.class);
MetadataPublisher metadataPublisher = injector.getInstance(MetadataPublisher.class);
FieldLineageWriter fieldLineageWriter = injector.getInstance(FieldLineageWriter.class);
RemoteClientFactory remoteClientFactory = injector.getInstance(RemoteClientFactory.class);
return new CacheLoader<ContextCacheKey, BasicMapReduceTaskContext>() {
@Override
public BasicMapReduceTaskContext load(ContextCacheKey key) throws Exception {
TaskAttemptID taskAttemptId = key.getTaskAttemptID();
// taskAttemptId could be null if used from a org.apache.hadoop.mapreduce.Partitioner or
// from a org.apache.hadoop.io.RawComparator, in which case we can get the JobId from the conf. Note that the
// JobId isn't in the conf for the OutputCommitter#setupJob method, in which case we use the taskAttemptId
Path txFile = MainOutputCommitter.getTxFile(key.getConfiguration(), taskAttemptId != null ? taskAttemptId.getJobID() : null);
FileSystem fs = txFile.getFileSystem(key.getConfiguration());
Transaction transaction = null;
if (fs.exists(txFile)) {
try (FSDataInputStream txFileInputStream = fs.open(txFile)) {
transaction = new TransactionCodec().decode(ByteStreams.toByteArray(txFileInputStream));
}
}
MapReduceContextConfig contextConfig = new MapReduceContextConfig(key.getConfiguration());
MapReduceClassLoader classLoader = MapReduceClassLoader.getFromConfiguration(key.getConfiguration());
Program program = programRef.get();
if (program == null) {
// Creation of program is relatively cheap, so just create and do compare and set.
programRef.compareAndSet(null, createProgram(contextConfig, classLoader.getProgramClassLoader()));
program = programRef.get();
}
WorkflowProgramInfo workflowInfo = contextConfig.getWorkflowProgramInfo();
DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, program.getApplicationSpecification());
// Setup dataset framework context, if required
if (programDatasetFramework instanceof ProgramContextAware) {
ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(contextConfig.getProgramOptions()));
((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
}
MapReduceSpecification spec = program.getApplicationSpecification().getMapReduce().get(program.getName());
MetricsCollectionService metricsCollectionService = null;
MapReduceMetrics.TaskType taskType = null;
String taskId = null;
ProgramOptions options = contextConfig.getProgramOptions();
// from a org.apache.hadoop.io.RawComparator
if (taskAttemptId != null) {
taskId = taskAttemptId.getTaskID().toString();
if (MapReduceMetrics.TaskType.hasType(taskAttemptId.getTaskType())) {
taskType = MapReduceMetrics.TaskType.from(taskAttemptId.getTaskType());
// if this is not for a mapper or a reducer, we don't need the metrics collection service
metricsCollectionService = injector.getInstance(MetricsCollectionService.class);
options = new SimpleProgramOptions(options.getProgramId(), options.getArguments(), new BasicArguments(RuntimeArguments.extractScope("task", taskType.toString().toLowerCase(), contextConfig.getProgramOptions().getUserArguments().asMap())), options.isDebug());
}
}
CConfiguration cConf = injector.getInstance(CConfiguration.class);
TransactionSystemClient txClient = injector.getInstance(TransactionSystemClient.class);
NamespaceQueryAdmin namespaceQueryAdmin = injector.getInstance(NamespaceQueryAdmin.class);
return new BasicMapReduceTaskContext(program, options, cConf, taskType, taskId, spec, workflowInfo, discoveryServiceClient, metricsCollectionService, txClient, transaction, programDatasetFramework, classLoader.getPluginInstantiator(), contextConfig.getLocalizedResources(), secureStore, secureStoreManager, accessEnforcer, authenticationContext, messagingService, mapReduceClassLoader, metadataReader, metadataPublisher, namespaceQueryAdmin, fieldLineageWriter, remoteClientFactory);
}
};
}
use of io.cdap.cdap.app.program.Program in project cdap by caskdata.
the class AbstractProgramRuntimeServiceTest method testDeadlock.
@Test(timeout = 5000)
public void testDeadlock() throws IOException, ExecutionException, InterruptedException, TimeoutException {
// This test is for testing condition in (CDAP-3579)
// The race condition is if a program finished very fast such that inside the AbstractProgramRuntimeService is
// still in the run method, it holds the object lock, making the callback from the listener block forever.
ProgramRunnerFactory runnerFactory = createProgramRunnerFactory();
Program program = createDummyProgram();
ProgramRuntimeService runtimeService = new TestProgramRuntimeService(CConfiguration.create(), runnerFactory, program, null, null);
runtimeService.startAndWait();
try {
ProgramDescriptor descriptor = new ProgramDescriptor(program.getId(), null, NamespaceId.DEFAULT.artifact("test", "1.0"));
ProgramController controller = runtimeService.run(descriptor, new SimpleProgramOptions(program.getId()), RunIds.generate()).getController();
Tasks.waitFor(ProgramController.State.COMPLETED, controller::getState, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
Tasks.waitFor(true, () -> runtimeService.list(ProgramType.WORKER).isEmpty(), 5, TimeUnit.SECONDS, 100, TimeUnit.MICROSECONDS);
} finally {
runtimeService.stopAndWait();
}
}
Aggregations