use of org.alien4cloud.tosca.model.templates.Topology in project alien4cloud by alien4cloud.
the class PostMatchingNodeSetupModifier method doMergeNode.
@Override
protected boolean doMergeNode(Topology topology, FlowExecutionContext context, String nodeTemplateId, NodePropsOverride nodePropsOverride) {
final ConfigChanged configChanged = new ConfigChanged();
// play the super method first. This will process nodetemplate properties
configChanged.changed = super.doMergeNode(topology, context, nodeTemplateId, nodePropsOverride);
// Then process capabilities
NodeTemplate nodeTemplate = topology.getNodeTemplates().get(nodeTemplateId);
Iterator<Entry<String, NodeCapabilitiesPropsOverride>> capabilitiesOverrideIter = safe(nodePropsOverride.getCapabilities()).entrySet().iterator();
while (capabilitiesOverrideIter.hasNext()) {
Entry<String, NodeCapabilitiesPropsOverride> overrideCapabilityProperties = capabilitiesOverrideIter.next();
Capability capability = safe(nodeTemplate.getCapabilities()).get(overrideCapabilityProperties.getKey());
if (capability == null) {
// Manage clean logic
configChanged.changed = true;
capabilitiesOverrideIter.remove();
} else {
// Merge logic
// When a selected node has changed we may need to cleanup properties that where defined but are not anymore on the capability
CapabilityType capabilityType = ToscaContext.get(CapabilityType.class, capability.getType());
capability.setProperties(mergeProperties(overrideCapabilityProperties.getValue().getProperties(), capability.getProperties(), capabilityType.getProperties(), s -> {
configChanged.changed = true;
context.log().info("The property [" + s + "] previously specified to configure capability [" + overrideCapabilityProperties.getKey() + "] of node [" + nodeTemplateId + "] cannot be set anymore as it is already specified by the matched location resource or in the topology.");
}));
}
}
return configChanged.changed;
}
use of org.alien4cloud.tosca.model.templates.Topology in project alien4cloud by alien4cloud.
the class WorkflowsBuilderService method removeRelationship.
public void removeRelationship(Topology topology, Csar csar, String sourceNodeId, String relationshipName, RelationshipTemplate relationshipTemplate) {
TopologyContext topologyContext = buildTopologyContext(topology, csar);
topologyContext.getTopology().getWorkflows().putAll(topologyContext.getTopology().getUnprocessedWorkflows());
NodeTemplate sourceNode = topology.getNodeTemplates().get(sourceNodeId);
String targetNodeId = relationshipTemplate.getTarget();
NodeTemplate targetNode = topologyContext.getTopology().getNodeTemplates().get(targetNodeId);
for (Workflow wf : topology.getWorkflows().values()) {
AbstractWorkflowBuilder builder = getWorkflowBuilder(topologyContext.getDSLVersion(), wf);
// Remove relationships from source to target
// Remove relationships from target to source
Map<String, RelationshipTemplate> sourceRelationships = sourceNode.getRelationships().entrySet().stream().filter(relationshipEntry -> relationshipEntry.getValue().getTarget().equals(targetNodeId)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Map<String, RelationshipTemplate> targetRelationships = AlienUtils.safe(targetNode.getRelationships()).entrySet().stream().filter(relationshipEntry -> relationshipEntry.getValue().getTarget().equals(sourceNodeId)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
builder.removeRelationships(wf, sourceNodeId, sourceRelationships, targetNodeId, targetRelationships);
sourceRelationships.entrySet().stream().filter(entry -> !entry.getKey().equals(relationshipName)).forEach(entry -> builder.addRelationship(wf, sourceNode.getName(), sourceNode, entry.getKey(), entry.getValue(), topologyContext));
targetRelationships.forEach((id, relationship) -> builder.addRelationship(wf, targetNodeId, targetNode, id, relationship, topologyContext));
// Remove unique relationship that we really want to remove
WorkflowUtils.fillHostId(wf, topologyContext);
}
postProcessTopologyWorkflows(topologyContext);
debugWorkflow(topologyContext.getTopology());
}
use of org.alien4cloud.tosca.model.templates.Topology in project alien4cloud by alien4cloud.
the class TopologyCompositionService method recursivelyBuildSubstitutionStack.
/**
* Deeply explore this topology to detect if some type must be substituted by the corresponding topology template content and feed the {@link Deque}. <br>
* BTW, rename the nodes by prefixing all the node names.
*/
private void recursivelyBuildSubstitutionStack(Topology topology, Deque<CompositionCouple> stack, String prefix) {
if (topology == null || topology.getNodeTemplates() == null || topology.getNodeTemplates().isEmpty()) {
return;
}
for (Entry<String, NodeTemplate> nodeEntry : topology.getNodeTemplates().entrySet()) {
String nodeName = nodeEntry.getKey();
String type = nodeEntry.getValue().getType();
// FIXME use tosca context, beware of child topologies (dependencies to use ? conflicts ?)
NodeType nodeType = csarRepoSearchService.getRequiredElementInDependencies(NodeType.class, type, topology.getDependencies());
if (nodeType.getSubstitutionTopologyId() != null) {
// this node type is a proxy for a topology template
Topology child = topologyServiceCore.getOrFail(nodeType.getSubstitutionTopologyId());
CompositionCouple couple = new CompositionCouple(topology, child, nodeName, nodeName + "_");
renameNodes(couple);
stack.offer(couple);
recursivelyBuildSubstitutionStack(child, stack, nodeName + "_");
}
}
}
use of org.alien4cloud.tosca.model.templates.Topology in project alien4cloud by alien4cloud.
the class ManagedServiceResourceService method create.
/**
* Create a Service resource associated with the given environment.
*
* @param environmentId The environment to create a service for, the service version will be the one of the environment current associated version.
* @param serviceName The name of the service as it should appears.
* @param fromRuntime If we should try to create the service from the runtime topology related to the environment.
* @return the id of the created service
*
* @throws AlreadyExistException if a service with the given name, or related to the given environment already exists
* @throws alien4cloud.exception.NotFoundException if <b>fromRuntime</b> is set to true, but the environment is not deployed
* @throws MissingSubstitutionException if topology related to the environment doesn't define a substitution type
*/
public synchronized String create(String serviceName, String environmentId, boolean fromRuntime) {
ApplicationEnvironment environment = checkAndGetApplicationEnvironment(environmentId);
// check that the service does not exists already for this environment
if (alienDAO.buildQuery(ServiceResource.class).setFilters(fromKeyValueCouples("environmentId", environmentId)).count() > 0) {
throw new AlreadyExistException("A service resource for environment <" + environmentId + "> and version <" + environment.getTopologyVersion() + "> already exists.");
}
Topology topology;
String state = ToscaNodeLifecycleConstants.INITIAL;
Deployment deployment = null;
if (fromRuntime) {
deployment = deploymentService.getActiveDeploymentOrFail(environmentId);
topology = deploymentRuntimeStateService.getRuntimeTopology(deployment.getId());
DeploymentStatus currentStatus = deploymentRuntimeStateService.getDeploymentStatus(deployment);
state = managedServiceResourceEventService.getInstanceStateFromDeploymentStatus(currentStatus);
if (state == null) {
// We need a valid deployment state to expose as service.
throw new InvalidDeploymentStatusException("Creating a service out of a running deployment is possible only when it's status is one of [DEPLOYED, FAILURE, UNDEPLOYED] current was <" + currentStatus + ">", currentStatus);
}
} else {
topology = topologyServiceCore.getOrFail(Csar.createId(environment.getApplicationId(), environment.getTopologyVersion()));
}
if (topology.getSubstitutionMapping() == null) {
throw new MissingSubstitutionException("Substitution is required to expose a topology.");
}
// The elementId of the type created out of the substitution is currently the archive name.
String serviceId = serviceResourceService.create(serviceName, environment.getTopologyVersion(), topology.getArchiveName(), environment.getTopologyVersion(), environmentId);
// Update the service relationships definition from the topology substitution
updateServiceRelationship(serviceId, topology);
if (fromRuntime) {
managedServiceResourceEventService.updateRunningService((DeploymentTopology) topology, serviceResourceService.getOrFail(serviceId), deployment, state);
}
ServiceResource serviceResource = serviceResourceService.getOrFail(serviceId);
// trigger a ManagedServiceCreatedEvent
publisher.publishEvent(new ManagedServiceCreatedEvent(this, serviceResource));
return serviceId;
}
use of org.alien4cloud.tosca.model.templates.Topology in project alien4cloud by alien4cloud.
the class InputService method onCopyConfiguration.
@EventListener
@Order(10)
public void onCopyConfiguration(OnDeploymentConfigCopyEvent onDeploymentConfigCopyEvent) {
ApplicationEnvironment source = onDeploymentConfigCopyEvent.getSourceEnvironment();
ApplicationEnvironment target = onDeploymentConfigCopyEvent.getTargetEnvironment();
DeploymentInputs deploymentInputs = deploymentConfigurationDao.findById(DeploymentInputs.class, AbstractDeploymentConfig.generateId(source.getTopologyVersion(), source.getId()));
if (deploymentInputs == null || MapUtils.isEmpty(deploymentInputs.getInputs())) {
// Nothing to copy
return;
}
Topology topology = topologyServiceCore.getOrFail(Csar.createId(target.getApplicationId(), target.getTopologyVersion()));
if (MapUtils.isNotEmpty(topology.getInputs())) {
Map<String, PropertyDefinition> inputsDefinitions = topology.getInputs();
Map<String, AbstractPropertyValue> inputsToCopy = deploymentInputs.getInputs().entrySet().stream().filter(inputEntry -> inputsDefinitions.containsKey(inputEntry.getKey())).filter(inputEntry -> {
// Copy only inputs which satisfy the new input definition
try {
if (!(inputEntry.getValue() instanceof FunctionPropertyValue)) {
ConstraintPropertyService.checkPropertyConstraint(inputEntry.getKey(), PropertyService.asPropertyValue(inputEntry.getValue()), inputsDefinitions.get(inputEntry.getKey()));
}
return true;
} catch (ConstraintValueDoNotMatchPropertyTypeException | ConstraintViolationException e) {
return false;
}
}).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
if (MapUtils.isNotEmpty(inputsToCopy)) {
DeploymentInputs targetDeploymentInputs = deploymentConfigurationDao.findById(DeploymentInputs.class, AbstractDeploymentConfig.generateId(target.getTopologyVersion(), target.getId()));
if (targetDeploymentInputs == null) {
targetDeploymentInputs = new DeploymentInputs(target.getTopologyVersion(), target.getId());
}
// Copy inputs from original topology
targetDeploymentInputs.setInputs(inputsToCopy);
deploymentConfigurationDao.save(targetDeploymentInputs);
}
}
}
Aggregations