use of alien4cloud.paas.wf.TopologyContext in project alien4cloud by alien4cloud.
the class TopologyCompositionService method processTopologyComposition.
public void processTopologyComposition(Topology topology) {
Deque<CompositionCouple> stack = new ArrayDeque<CompositionCouple>();
recursivelyBuildSubstitutionStack(topology, stack, "");
// now this stack contains all the embedded topology templates
if (!stack.isEmpty()) {
// iterate over the stack in descending order (manage the deepest topologies at a first time).
Iterator<CompositionCouple> compositionIterator = stack.descendingIterator();
while (compositionIterator.hasNext()) {
processComposition(compositionIterator.next());
}
if (log.isDebugEnabled()) {
log.debug(String.format("Topology composition has been processed for topology <%s> substituting %d embeded topologies", topology.getId(), stack.size()));
}
// std workflows are reinitialized when some composition is processed
// TODO: find a better way to manage this
TopologyContext topologyContext = workflowBuilderService.buildTopologyContext(topology);
workflowBuilderService.reinitWorkflow(INSTALL, topologyContext, false);
workflowBuilderService.reinitWorkflow(START, topologyContext, false);
workflowBuilderService.reinitWorkflow(STOP, topologyContext, false);
workflowBuilderService.reinitWorkflow(UNINSTALL, topologyContext, false);
workflowBuilderService.postProcessTopologyWorkflows(topologyContext);
}
}
use of alien4cloud.paas.wf.TopologyContext in project alien4cloud by alien4cloud.
the class ArchiveIndexer method indexTopology.
private void indexTopology(final ArchiveRoot archiveRoot, List<ParsingError> parsingErrors, String archiveName, String archiveVersion) {
Topology topology = archiveRoot.getTopology();
if (topology == null || topology.isEmpty()) {
return;
}
topology.setTags(archiveRoot.getArchive().getTags());
if (archiveRoot.hasToscaTypes()) {
// The archive contains types, we assume those types are used in the embedded topology so we add the dependency to this CSAR
CSARDependency selfDependency = new CSARDependency(archiveRoot.getArchive().getName(), archiveRoot.getArchive().getVersion(), archiveRoot.getArchive().getHash());
topology.getDependencies().add(selfDependency);
}
// init the workflows
TopologyContext topologyContext = workflowBuilderService.buildCachedTopologyContext(new TopologyContext() {
@Override
public String getDSLVersion() {
return archiveRoot.getArchive().getToscaDefinitionsVersion();
}
@Override
public Topology getTopology() {
return topology;
}
@Override
public <T extends AbstractToscaType> T findElement(Class<T> clazz, String elementId) {
return ToscaContext.get(clazz, elementId);
}
});
workflowBuilderService.initWorkflows(topologyContext);
parsingErrors.add(new ParsingError(ParsingErrorLevel.INFO, ErrorCode.TOPOLOGY_DETECTED, "", null, "A topology template has been detected", null, archiveName));
topologyServiceCore.saveTopology(topology);
topologySubstitutionService.updateSubstitutionType(topology, archiveRoot.getArchive());
}
use of alien4cloud.paas.wf.TopologyContext in project alien4cloud by alien4cloud.
the class ReplaceNodeProcessor method process.
@Override
public void process(Csar csar, Topology topology, ReplaceNodeOperation operation) {
// Retrieve existing node template
Map<String, NodeTemplate> nodeTemplates = TopologyUtils.getNodeTemplates(topology);
NodeTemplate oldNodeTemplate = TopologyUtils.getNodeTemplate(topology.getId(), operation.getNodeName(), nodeTemplates);
String[] splittedId = operation.getNewTypeId().split(":");
// When we replace the target of a relationship, we should stash the relationship contains in the source when we remove or add the target node
Map<String, Map<String, RelationshipTemplate>> relationshipsSwapped = removeTheRelationshipsOnSource(topology, csar, operation.getNodeName());
// Unload and remove old node template
topologyService.unloadType(topology, oldNodeTemplate.getType());
// remove the node from the topology and the workflows
nodeTemplates.remove(oldNodeTemplate.getName());
workflowBuilderService.removeNode(topology, csar, oldNodeTemplate.getName());
// Build the new one
NodeType newType = toscaTypeSearchService.findOrFail(NodeType.class, splittedId[0], splittedId[1]);
// Load the new type to the topology in order to update its dependencies
newType = topologyService.loadType(topology, newType);
NodeTemplate newNodeTemplate = TemplateBuilder.buildNodeTemplate(newType, oldNodeTemplate, false);
newNodeTemplate.setName(operation.getNodeName());
newNodeTemplate.setTags(oldNodeTemplate.getTags());
newNodeTemplate.setName(oldNodeTemplate.getName());
newNodeTemplate.setRelationships(oldNodeTemplate.getRelationships());
// Put the new one in the topology
nodeTemplates.put(oldNodeTemplate.getName(), newNodeTemplate);
// When replacing a node with another some relationships target capabilities or requirements may be impacted and moved to another capability/requirement
// name.
updateRelationshipsCapabilitiesRelationships(topology, newNodeTemplate);
// FIXME we should remove outputs/inputs, others here ?
if (topology.getSubstitutionMapping() != null) {
removeNodeTemplateSubstitutionTargetMapEntry(oldNodeTemplate.getName(), topology.getSubstitutionMapping().getCapabilities());
removeNodeTemplateSubstitutionTargetMapEntry(oldNodeTemplate.getName(), topology.getSubstitutionMapping().getRequirements());
}
log.debug("Replacing the node template[ {} ] with [ {} ] bound to the node type [ {} ] on the topology [ {} ] .", oldNodeTemplate.getName(), oldNodeTemplate.getName(), operation.getNewTypeId(), topology.getId());
// add the new node to the workflow
TopologyContext topologyContext = workflowBuilderService.buildTopologyContext(topology, csar);
workflowBuilderService.addNode(topologyContext, oldNodeTemplate.getName());
// add the relationship previouly swaped
addTheRelationshipsOnSource(topology, relationshipsSwapped);
// add the relationships from the new node to the workflow
safe(newNodeTemplate.getRelationships()).forEach((relationshipId, relationshipTemplate) -> workflowBuilderService.addRelationship(topologyContext, newNodeTemplate.getName(), relationshipId));
// add the relationships to the new node to the workflow
TopologyUtils.getTargetRelationships(oldNodeTemplate.getName(), nodeTemplates).forEach(relationshipEntry -> workflowBuilderService.addRelationship(topologyContext, relationshipEntry.getSource().getName(), relationshipEntry.getRelationshipId()));
if (!operation.isSkipAutoCompletion()) {
danglingRequirementService.addDanglingRequirements(topology, topologyContext, newNodeTemplate, null);
}
}
use of alien4cloud.paas.wf.TopologyContext in project alien4cloud by alien4cloud.
the class WorkflowPostProcessor method processWorkflows.
/**
* Process workflows of a topology
*
* @param topology the topology to process workflow
* @param topologyNode the yaml node of the topology
*/
public void processWorkflows(Topology topology, Node topologyNode) {
// Workflow validation if any are defined
TopologyContext topologyContext = workflowBuilderService.buildCachedTopologyContext(new TopologyContext() {
@Override
public String getDSLVersion() {
return ParsingContextExecution.getDefinitionVersion();
}
@Override
public Topology getTopology() {
return topology;
}
@Override
public <T extends AbstractToscaType> T findElement(Class<T> clazz, String id) {
return ToscaContext.get(clazz, id);
}
});
// If the workflow contains steps with multiple activities then split them into single activity steps
splitMultipleActivitiesSteps(topologyContext);
finalizeParsedWorkflows(topologyContext, topologyNode);
}
Aggregations