use of io.cdap.cdap.api.spark.SparkSpecification in project cdap by cdapio.
the class SparkProgramRunner method run.
@Override
public ProgramController run(Program program, ProgramOptions options) {
LOG.trace("Starting Spark program {} with SparkProgramRunner of ClassLoader {}", program.getId(), getClass().getClassLoader());
// Get the RunId first. It is used for the creation of the ClassLoader closing thread.
Arguments arguments = options.getArguments();
RunId runId = ProgramRunners.getRunId(options);
Deque<Closeable> closeables = new LinkedList<>();
try {
// Extract and verify parameters
ApplicationSpecification appSpec = program.getApplicationSpecification();
Preconditions.checkNotNull(appSpec, "Missing application specification.");
ProgramType processorType = program.getType();
Preconditions.checkNotNull(processorType, "Missing processor type.");
Preconditions.checkArgument(processorType == ProgramType.SPARK, "Only Spark process type is supported.");
SparkSpecification spec = appSpec.getSpark().get(program.getName());
Preconditions.checkNotNull(spec, "Missing SparkSpecification for %s", program.getName());
String host = options.getArguments().getOption(ProgramOptionConstants.HOST);
Preconditions.checkArgument(host != null, "No hostname is provided");
// Get the WorkflowProgramInfo if it is started by Workflow
WorkflowProgramInfo workflowInfo = WorkflowProgramInfo.create(arguments);
DatasetFramework programDatasetFramework = workflowInfo == null ? datasetFramework : NameMappedDatasetFramework.createFromWorkflowProgramInfo(datasetFramework, workflowInfo, appSpec);
// Setup dataset framework context, if required
if (programDatasetFramework instanceof ProgramContextAware) {
ProgramId programId = program.getId();
((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programId.run(runId)));
}
PluginInstantiator pluginInstantiator = createPluginInstantiator(options, program.getClassLoader());
if (pluginInstantiator != null) {
closeables.addFirst(pluginInstantiator);
}
SparkRuntimeContext runtimeContext = new SparkRuntimeContext(new Configuration(hConf), program, options, cConf, host, txClient, programDatasetFramework, metricsCollectionService, workflowInfo, pluginInstantiator, secureStore, secureStoreManager, accessEnforcer, authenticationContext, messagingService, serviceAnnouncer, pluginFinder, locationFactory, metadataReader, metadataPublisher, namespaceQueryAdmin, fieldLineageWriter, remoteClientFactory, () -> {
});
closeables.addFirst(runtimeContext);
Spark spark;
try {
spark = new InstantiatorFactory(false).get(TypeToken.of(program.<Spark>getMainClass())).create();
} catch (Exception e) {
LOG.error("Failed to instantiate Spark class for {}", spec.getClassName(), e);
throw Throwables.propagate(e);
}
boolean isLocal = SparkRuntimeContextConfig.isLocal(options);
SparkSubmitter submitter;
// If MasterEnvironment is not available, use non-master env spark submitters
MasterEnvironment masterEnv = MasterEnvironments.getMasterEnvironment();
if (masterEnv != null && cConf.getBoolean(Constants.Environment.PROGRAM_SUBMISSION_MASTER_ENV_ENABLED, true)) {
submitter = new MasterEnvironmentSparkSubmitter(cConf, locationFactory, host, runtimeContext, masterEnv);
} else {
submitter = isLocal ? new LocalSparkSubmitter() : new DistributedSparkSubmitter(hConf, locationFactory, host, runtimeContext, options.getArguments().getOption(Constants.AppFabric.APP_SCHEDULER_QUEUE));
}
Service sparkRuntimeService = new SparkRuntimeService(cConf, spark, getPluginArchive(options), runtimeContext, submitter, locationFactory, isLocal, fieldLineageWriter, masterEnv);
sparkRuntimeService.addListener(createRuntimeServiceListener(closeables), Threads.SAME_THREAD_EXECUTOR);
ProgramController controller = new SparkProgramController(sparkRuntimeService, runtimeContext);
LOG.debug("Starting Spark Job. Context: {}", runtimeContext);
if (isLocal || UserGroupInformation.isSecurityEnabled()) {
sparkRuntimeService.start();
} else {
ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), sparkRuntimeService);
}
return controller;
} catch (Throwable t) {
closeAllQuietly(closeables);
throw Throwables.propagate(t);
}
}
use of io.cdap.cdap.api.spark.SparkSpecification in project cdap by cdapio.
the class DistributedSparkProgramRunner method validateOptions.
@Override
protected void validateOptions(Program program, ProgramOptions options) {
super.validateOptions(program, options);
// Extract and verify parameters
ApplicationSpecification appSpec = program.getApplicationSpecification();
Preconditions.checkNotNull(appSpec, "Missing application specification for %s", program.getId());
ProgramType processorType = program.getType();
Preconditions.checkNotNull(processorType, "Missing processor type for %s", program.getId());
Preconditions.checkArgument(processorType == ProgramType.SPARK, "Only SPARK process type is supported. Program type is %s for %s", processorType, program.getId());
SparkSpecification spec = appSpec.getSpark().get(program.getName());
Preconditions.checkNotNull(spec, "Missing SparkSpecification for %s", program.getId());
}
use of io.cdap.cdap.api.spark.SparkSpecification in project cdap by cdapio.
the class SparkSpecificationCodec method deserialize.
@Override
public SparkSpecification deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) throws JsonParseException {
JsonObject jsonObj = json.getAsJsonObject();
String className = jsonObj.get("className").getAsString();
String name = jsonObj.get("name").getAsString();
String description = jsonObj.get("description").getAsString();
Map<String, Plugin> plugins = deserializeMap(jsonObj.get("plugins"), context, Plugin.class);
String mainClassName = jsonObj.has("mainClassName") ? jsonObj.get("mainClassName").getAsString() : null;
Set<String> datasets = deserializeSet(jsonObj.get("datasets"), context, String.class);
Map<String, String> properties = deserializeMap(jsonObj.get("properties"), context, String.class);
Resources clientResources = deserializeResources(jsonObj, "client", context);
Resources driverResources = deserializeResources(jsonObj, "driver", context);
Resources executorResources = deserializeResources(jsonObj, "executor", context);
List<SparkHttpServiceHandlerSpecification> handlers = deserializeList(jsonObj.get("handlers"), context, SparkHttpServiceHandlerSpecification.class);
return new SparkSpecification(className, name, description, mainClassName, datasets, properties, clientResources, driverResources, executorResources, handlers, plugins);
}
use of io.cdap.cdap.api.spark.SparkSpecification in project cdap by caskdata.
the class SparkSpecificationCodec method deserialize.
@Override
public SparkSpecification deserialize(JsonElement json, Type typeOfT, JsonDeserializationContext context) throws JsonParseException {
JsonObject jsonObj = json.getAsJsonObject();
String className = jsonObj.get("className").getAsString();
String name = jsonObj.get("name").getAsString();
String description = jsonObj.get("description").getAsString();
Map<String, Plugin> plugins = deserializeMap(jsonObj.get("plugins"), context, Plugin.class);
String mainClassName = jsonObj.has("mainClassName") ? jsonObj.get("mainClassName").getAsString() : null;
Set<String> datasets = deserializeSet(jsonObj.get("datasets"), context, String.class);
Map<String, String> properties = deserializeMap(jsonObj.get("properties"), context, String.class);
Resources clientResources = deserializeResources(jsonObj, "client", context);
Resources driverResources = deserializeResources(jsonObj, "driver", context);
Resources executorResources = deserializeResources(jsonObj, "executor", context);
List<SparkHttpServiceHandlerSpecification> handlers = deserializeList(jsonObj.get("handlers"), context, SparkHttpServiceHandlerSpecification.class);
return new SparkSpecification(className, name, description, mainClassName, datasets, properties, clientResources, driverResources, executorResources, handlers, plugins);
}
use of io.cdap.cdap.api.spark.SparkSpecification in project cdap by caskdata.
the class DefaultSparkConfigurer method createSpecification.
public SparkSpecification createSpecification() {
Set<String> datasets = new HashSet<>();
// Grab all @Property and @Dataset fields
Reflections.visit(spark, spark.getClass(), new PropertyFieldExtractor(properties), new DataSetFieldExtractor(datasets));
return new SparkSpecification(spark.getClass().getName(), name, description, mainClassName, datasets, properties, clientResources, driverResources, executorResources, getHandlers(), getPlugins());
}
Aggregations