use of io.cdap.cdap.api.Resources in project cdap by cdapio.
the class ServiceSpecificationCodec method decodeOldSpec.
private ServiceSpecification decodeOldSpec(JsonObject json) {
String className = json.get("classname").getAsString();
TwillSpecification twillSpec = twillSpecificationAdapter.fromJson(json.get("spec").getAsString()).getTwillSpecification();
Map<String, HttpServiceHandlerSpecification> handlers = Maps.newHashMap();
RuntimeSpecification handlerSpec = twillSpec.getRunnables().get(twillSpec.getName());
Map<String, String> configs = handlerSpec.getRunnableSpecification().getConfigs();
// Get the class names of all handlers. It is stored in the handler runnable spec configs
List<String> handlerClasses = GSON.fromJson(configs.get("service.runnable.handlers"), new TypeToken<List<String>>() {
}.getType());
List<JsonObject> handlerSpecs = GSON.fromJson(configs.get("service.runnable.handler.spec"), new TypeToken<List<JsonObject>>() {
}.getType());
for (int i = 0; i < handlerClasses.size(); i++) {
String handlerClass = handlerClasses.get(i);
JsonObject spec = handlerSpecs.get(i);
Map<String, String> properties = GSON.fromJson(spec.get("properties"), new TypeToken<Map<String, String>>() {
}.getType());
// Reconstruct the HttpServiceSpecification. However there is no way to determine the datasets or endpoints
// as it is not recorded in old spec. It's ok since the spec is only used to load data from MDS during redeploy.
handlers.put(spec.get("name").getAsString(), new HttpServiceHandlerSpecification(handlerClass, spec.get("name").getAsString(), spec.get("description").getAsString(), properties, Collections.emptySet(), Collections.emptyList()));
}
ResourceSpecification resourceSpec = handlerSpec.getResourceSpecification();
return new ServiceSpecification(className, twillSpec.getName(), twillSpec.getName(), handlers, new Resources(resourceSpec.getMemorySize(), resourceSpec.getVirtualCores()), resourceSpec.getInstances(), Collections.emptyMap());
}
use of io.cdap.cdap.api.Resources in project cdap by cdapio.
the class ServiceSpecificationCodec method serialize.
@Override
public JsonElement serialize(ServiceSpecification spec, Type typeOfSrc, JsonSerializationContext context) {
JsonObject object = new JsonObject();
object.addProperty("className", spec.getClassName());
object.addProperty("name", spec.getName());
object.addProperty("description", spec.getDescription());
object.add("plugins", serializeMap(spec.getPlugins(), context, Plugin.class));
object.add("handlers", serializeMap(spec.getHandlers(), context, HttpServiceHandlerSpecification.class));
object.add("resources", context.serialize(spec.getResources(), Resources.class));
object.addProperty("instances", spec.getInstances());
object.add("properties", serializeMap(spec.getProperties(), context, String.class));
return object;
}
use of io.cdap.cdap.api.Resources in project cdap by cdapio.
the class DistributedWorkflowProgramRunner method findDriverResources.
/**
* Returns the {@link Resources} requirement for the workflow runnable deduced by Spark
* or MapReduce driver resources requirement.
*/
private Resources findDriverResources(Collection<WorkflowNode> nodes, Map<String, Resources> runnablesResources) {
// Find the resource requirements for the workflow based on the nodes memory requirements
Resources resources = new Resources();
for (WorkflowNode node : nodes) {
switch(node.getType()) {
case ACTION:
String programName = ((WorkflowActionNode) node).getProgram().getProgramName();
Resources runnableResources = runnablesResources.get(programName);
if (runnableResources != null) {
resources = maxResources(resources, runnableResources);
}
break;
case FORK:
Resources forkResources = ((WorkflowForkNode) node).getBranches().stream().map(branches -> findDriverResources(branches, runnablesResources)).reduce(this::mergeForkResources).orElse(resources);
resources = maxResources(resources, forkResources);
break;
case CONDITION:
Resources branchesResources = maxResources(findDriverResources(((WorkflowConditionNode) node).getIfBranch(), runnablesResources), findDriverResources(((WorkflowConditionNode) node).getElseBranch(), runnablesResources));
resources = maxResources(resources, branchesResources);
break;
default:
// This shouldn't happen unless we add new node type
LOG.warn("Ignoring unsupported Workflow node type {}", node.getType());
}
}
return resources;
}
use of io.cdap.cdap.api.Resources in project cdap by cdapio.
the class PipelineSpecGeneratorTest method testInputSchemasWithDifferentName.
@Test
public void testInputSchemasWithDifferentName() {
ETLBatchConfig etlConfig = ETLBatchConfig.builder().addStage(new ETLStage("s1", MOCK_SOURCE)).addStage(new ETLStage("s2", MOCK_SOURCE2)).addStage(new ETLStage("sink", MOCK_SINK)).addConnection("s1", "sink").addConnection("s2", "sink").setNumOfRecordsPreview(100).build();
Map<String, String> emptyMap = Collections.emptyMap();
PipelineSpec expected = BatchPipelineSpec.builder().addStage(StageSpec.builder("s1", new PluginSpec(BatchSource.PLUGIN_TYPE, "mocksource", emptyMap, ARTIFACT_ID)).addOutput(SCHEMA_A, "sink").build()).addStage(StageSpec.builder("s2", new PluginSpec(BatchSource.PLUGIN_TYPE, "mocksource2", emptyMap, ARTIFACT_ID)).addOutput(SCHEMA_A2, "sink").build()).addStage(StageSpec.builder("sink", new PluginSpec(BatchSink.PLUGIN_TYPE, "mocksink", emptyMap, ARTIFACT_ID)).addInputSchemas(ImmutableMap.of("s1", SCHEMA_A, "s2", SCHEMA_A2)).setErrorSchema(SCHEMA_A).build()).addConnections(etlConfig.getConnections()).setResources(etlConfig.getResources()).setDriverResources(new Resources(1024, 1)).setClientResources(new Resources(1024, 1)).setStageLoggingEnabled(etlConfig.isStageLoggingEnabled()).setNumOfRecordsPreview(etlConfig.getNumOfRecordsPreview()).build();
PipelineSpec actual = specGenerator.generateSpec(etlConfig);
Assert.assertEquals(expected, actual);
}
use of io.cdap.cdap.api.Resources in project cdap by cdapio.
the class ETLBatchConfigTest method testUpgrade.
@Test
public void testUpgrade() throws Exception {
final ArtifactSelectorConfig artifact = new ArtifactSelectorConfig("SYSTEM", "universal", "1.0.0");
ETLStage source = new ETLStage("DataGenerator", ImmutableMap.of("p1", "v1"), null);
io.cdap.cdap.etl.proto.v1.ETLStage sourceNew = new io.cdap.cdap.etl.proto.v1.ETLStage("DataGenerator.1", new Plugin(source.getName(), source.getProperties(), artifact), source.getErrorDatasetName());
ETLStage transform1 = new ETLStage("Script", ImmutableMap.of("script", "something"), null);
io.cdap.cdap.etl.proto.v1.ETLStage transform1New = new io.cdap.cdap.etl.proto.v1.ETLStage("Script.2", new Plugin(transform1.getName(), transform1.getProperties(), artifact), transform1.getErrorDatasetName());
ETLStage transform2 = new ETLStage("Script", null, null);
io.cdap.cdap.etl.proto.v1.ETLStage transform2New = new io.cdap.cdap.etl.proto.v1.ETLStage("Script.3", new Plugin(transform2.getName(), transform2.getProperties(), artifact), transform2.getErrorDatasetName());
ETLStage transform3 = new ETLStage("Validator", ImmutableMap.of("p1", "v1", "p2", "v2"), "errorDS");
io.cdap.cdap.etl.proto.v1.ETLStage transform3New = new io.cdap.cdap.etl.proto.v1.ETLStage("Validator.4", new Plugin(transform3.getName(), transform3.getProperties(), artifact), transform3.getErrorDatasetName());
ETLStage sink1 = new ETLStage("Table", ImmutableMap.of("rowkey", "xyz"), null);
io.cdap.cdap.etl.proto.v1.ETLStage sink1New = new io.cdap.cdap.etl.proto.v1.ETLStage("Table.5", new Plugin(sink1.getName(), sink1.getProperties(), artifact), sink1.getErrorDatasetName());
ETLStage sink2 = new ETLStage("HDFS", ImmutableMap.of("name", "abc"), null);
io.cdap.cdap.etl.proto.v1.ETLStage sink2New = new io.cdap.cdap.etl.proto.v1.ETLStage("HDFS.6", new Plugin(sink2.getName(), sink2.getProperties(), artifact), sink2.getErrorDatasetName());
ETLStage action = new ETLStage("Email", ImmutableMap.of("email", "slj@example.com"), null);
io.cdap.cdap.etl.proto.v1.ETLStage actionNew = new io.cdap.cdap.etl.proto.v1.ETLStage("Email.1", new Plugin(action.getName(), action.getProperties(), artifact), action.getErrorDatasetName());
List<Connection> connections = new ArrayList<>();
connections.add(new Connection(sourceNew.getName(), transform1New.getName()));
connections.add(new Connection(transform1New.getName(), transform2New.getName()));
connections.add(new Connection(transform2New.getName(), transform3New.getName()));
connections.add(new Connection(transform3New.getName(), sink1New.getName()));
connections.add(new Connection(transform3New.getName(), sink2New.getName()));
String schedule = "*/5 * * * *";
Resources resources = new Resources(1024, 1);
ETLBatchConfig config = new ETLBatchConfig(schedule, source, ImmutableList.of(sink1, sink2), ImmutableList.of(transform1, transform2, transform3), resources, ImmutableList.of(action));
io.cdap.cdap.etl.proto.v1.ETLBatchConfig configNew = io.cdap.cdap.etl.proto.v1.ETLBatchConfig.builder(schedule).setSource(sourceNew).addSink(sink1New).addSink(sink2New).addTransform(transform1New).addTransform(transform2New).addTransform(transform3New).addConnections(connections).setResources(resources).setDriverResources(resources).addAction(actionNew).build();
Assert.assertEquals(configNew, config.upgrade(new UpgradeContext() {
@Nullable
@Override
public ArtifactSelectorConfig getPluginArtifact(String pluginType, String pluginName) {
return new ArtifactSelectorConfig(ArtifactScope.SYSTEM.name(), "universal", "1.0.0");
}
}));
}
Aggregations