use of co.cask.cdap.api.Resources in project cdap by caskdata.
the class WorkerSpecificationCodec method serialize.
@Override
public JsonElement serialize(WorkerSpecification spec, Type typeOfSrc, JsonSerializationContext context) {
JsonObject object = new JsonObject();
object.addProperty("className", spec.getClassName());
object.addProperty("name", spec.getName());
object.addProperty("description", spec.getDescription());
object.add("properties", serializeMap(spec.getProperties(), context, String.class));
object.add("resources", context.serialize(spec.getResources(), Resources.class));
object.add("datasets", serializeSet(spec.getDatasets(), context, String.class));
object.addProperty("instances", spec.getInstances());
return object;
}
use of co.cask.cdap.api.Resources in project cdap by caskdata.
the class DistributedSparkProgramRunner method setupLaunchConfig.
@Override
protected void setupLaunchConfig(LaunchConfig launchConfig, Program program, ProgramOptions options, CConfiguration cConf, Configuration hConf, File tempDir) throws IOException {
// Update the container hConf
hConf.setBoolean(SparkRuntimeContextConfig.HCONF_ATTR_CLUSTER_MODE, true);
hConf.set("hive.metastore.token.signature", HiveAuthFactory.HS2_CLIENT_TOKEN);
if (SecurityUtil.isKerberosEnabled(cConf)) {
// Need to divide the interval by 0.8 because Spark logic has a 0.8 discount on the interval
// If we don't offset it, it will look for the new credentials too soon
// Also add 5 seconds to the interval to give master time to push the changes to the Spark client container
hConf.setLong(SparkRuntimeContextConfig.HCONF_ATTR_CREDENTIALS_UPDATE_INTERVAL_MS, (long) ((secureStoreRenewer.getUpdateInterval() + 5000) / 0.8));
}
// Setup the launch config
ApplicationSpecification appSpec = program.getApplicationSpecification();
SparkSpecification spec = appSpec.getSpark().get(program.getName());
Map<String, String> clientArgs = RuntimeArguments.extractScope("task", "client", options.getUserArguments().asMap());
Resources resources = SystemArguments.getResources(clientArgs, spec.getClientResources());
// Add runnable. Only one instance for the spark client
launchConfig.addRunnable(spec.getName(), new SparkTwillRunnable(spec.getName()), resources, 1);
// Add extra resources, classpath, dependencies, env and setup ClassAcceptor
Map<String, LocalizeResource> localizeResources = new HashMap<>();
Map<String, String> extraEnv = new HashMap<>(SparkPackageUtils.getSparkClientEnv());
SparkPackageUtils.prepareSparkResources(sparkCompat, locationFactory, tempDir, localizeResources, extraEnv);
// Add the mapreduce resources and path as well for the InputFormat/OutputFormat classes
MapReduceContainerHelper.localizeFramework(hConf, localizeResources);
extraEnv.put(Constants.SPARK_COMPAT_ENV, sparkCompat.getCompat());
launchConfig.addExtraResources(localizeResources).addExtraDependencies(SparkProgramRuntimeProvider.class).addExtraEnv(extraEnv).addExtraClasspath(MapReduceContainerHelper.addMapReduceClassPath(hConf, new ArrayList<String>())).setClassAcceptor(createBundlerClassAcceptor());
}
use of co.cask.cdap.api.Resources in project cdap by caskdata.
the class DistributedMapReduceProgramRunner method setupLaunchConfig.
@Override
protected void setupLaunchConfig(LaunchConfig launchConfig, Program program, ProgramOptions options, CConfiguration cConf, Configuration hConf, File tempDir) throws IOException {
ApplicationSpecification appSpec = program.getApplicationSpecification();
MapReduceSpecification spec = appSpec.getMapReduce().get(program.getName());
// Get the resource for the container that runs the mapred client that will launch the actual mapred job.
Map<String, String> clientArgs = RuntimeArguments.extractScope("task", "client", options.getUserArguments().asMap());
Resources resources = SystemArguments.getResources(clientArgs, spec.getDriverResources());
// Add runnable. Only one instance for the MR driver
launchConfig.addRunnable(spec.getName(), new MapReduceTwillRunnable(spec.getName()), resources, 1).addExtraResources(MapReduceContainerHelper.localizeFramework(hConf, new HashMap<String, LocalizeResource>())).addExtraClasspath(MapReduceContainerHelper.addMapReduceClassPath(hConf, new ArrayList<String>())).addExtraDependencies(YarnClientProtocolProvider.class);
}
use of co.cask.cdap.api.Resources in project cdap by caskdata.
the class SystemArgumentsTest method testSystemResources.
@Test
public void testSystemResources() {
Resources defaultResources = new Resources();
// Nothing specified
Resources resources = SystemArguments.getResources(ImmutableMap.<String, String>of(), defaultResources);
Assert.assertEquals(defaultResources, resources);
// Specify memory
resources = SystemArguments.getResources(ImmutableMap.of("system.resources.memory", "10"), defaultResources);
Assert.assertEquals(new Resources(10), resources);
// Specify cores
resources = SystemArguments.getResources(ImmutableMap.of("system.resources.cores", "8"), defaultResources);
Assert.assertEquals(new Resources(defaultResources.getMemoryMB(), 8), resources);
// Specify both memory and cores
resources = SystemArguments.getResources(ImmutableMap.of("system.resources.memory", "10", "system.resources.cores", "8"), defaultResources);
Assert.assertEquals(new Resources(10, 8), resources);
// Specify invalid memory
resources = SystemArguments.getResources(ImmutableMap.of("system.resources.memory", "-10"), defaultResources);
Assert.assertEquals(defaultResources, resources);
// Specify invalid cores
resources = SystemArguments.getResources(ImmutableMap.of("system.resources.cores", "abc"), defaultResources);
Assert.assertEquals(defaultResources, resources);
// Specify invalid memory and value cores
resources = SystemArguments.getResources(ImmutableMap.of("system.resources.memory", "xyz", "system.resources.cores", "8"), defaultResources);
Assert.assertEquals(new Resources(defaultResources.getMemoryMB(), 8), resources);
// Specify valid memory and invalid cores
resources = SystemArguments.getResources(ImmutableMap.of("system.resources.memory", "10", "system.resources.cores", "-8"), defaultResources);
Assert.assertEquals(new Resources(10, defaultResources.getVirtualCores()), resources);
// Specify invalid memory and invalid cores
resources = SystemArguments.getResources(ImmutableMap.of("system.resources.memory", "-1", "system.resources.cores", "-8"), defaultResources);
Assert.assertEquals(defaultResources, resources);
}
use of co.cask.cdap.api.Resources in project cdap by caskdata.
the class ServiceSpecificationCodec method serialize.
@Override
public JsonElement serialize(ServiceSpecification spec, Type typeOfSrc, JsonSerializationContext context) {
JsonObject object = new JsonObject();
object.addProperty("className", spec.getClassName());
object.addProperty("name", spec.getName());
object.addProperty("description", spec.getDescription());
object.add("handlers", serializeMap(spec.getHandlers(), context, HttpServiceHandlerSpecification.class));
object.add("resources", context.serialize(spec.getResources(), Resources.class));
object.addProperty("instances", spec.getInstances());
return object;
}
Aggregations