use of org.alien4cloud.alm.deployment.configuration.flow.FlowExecutionContext in project yorc-a4c-plugin by ystia.
the class OpenStackBSComputeWFModifier method doProcess.
private void doProcess(Topology topology, FlowExecutionContext context) {
Csar csar = new Csar(topology.getArchiveName(), topology.getArchiveVersion());
Workflow installWF = topology.getWorkflows().get("install");
Workflow uninstallWF = topology.getWorkflows().get("uninstall");
Set<NodeTemplate> bsSet = TopologyNavigationUtil.getNodesOfType(topology, YORC_OPENSTACK_BS_TYPE, true);
// Let's process all BS
bsSet.forEach(bs -> safe(bs.getRelationships()).forEach((rn, rt) -> {
if ("tosca.capabilities.Attachment".equals(rt.getRequirementType())) {
// Attachment found
context.getLog().info("Found a BlockStorage <{}> with an attachment on <{}>. Let's swap their workflow steps to match Yorc " + "expectations.", bs.getName(), rt.getTarget());
String computeNodeName = rt.getTarget();
// Now lets locate corresponding wf steps in install wf
for (Map.Entry<String, WorkflowStep> workflowStepEntry : installWF.getSteps().entrySet()) {
if (workflowStepEntry.getValue().getTarget().equals(bs.getName())) {
for (String precedingStepName : workflowStepEntry.getValue().getPrecedingSteps()) {
WorkflowStep precedingStep = installWF.getSteps().get(precedingStepName);
if (precedingStep.getTarget().equals(computeNodeName)) {
// We do not use swap operation here as it may mess up other workflow edges
// First remove the edge between steps
RemoveEdgeOperation removeEdgeOperation = new RemoveEdgeOperation();
removeEdgeOperation.setWorkflowName(installWF.getName());
removeEdgeOperation.setFromStepId(precedingStepName);
removeEdgeOperation.setToStepId(workflowStepEntry.getKey());
log.debug("Swapping {} with target {}", precedingStepName, workflowStepEntry.getKey());
removeEdgeProcessor.process(csar, topology, removeEdgeOperation);
// Then reconnect them in the right sequence
ConnectStepFromOperation connectStepFromOperation = new ConnectStepFromOperation();
connectStepFromOperation.setWorkflowName(installWF.getName());
connectStepFromOperation.setFromStepIds(new String[] { workflowStepEntry.getKey() });
connectStepFromOperation.setToStepId(precedingStepName);
connectStepFromProcessor.process(csar, topology, connectStepFromOperation);
break;
}
}
break;
}
}
// Now lets locate corresponding wf steps in uninstall wf
for (Map.Entry<String, WorkflowStep> workflowStepEntry : uninstallWF.getSteps().entrySet()) {
if (workflowStepEntry.getValue().getTarget().equals(bs.getName())) {
for (String onSuccessStepName : workflowStepEntry.getValue().getOnSuccess()) {
WorkflowStep onSuccessStep = uninstallWF.getSteps().get(onSuccessStepName);
if (onSuccessStep.getTarget().equals(computeNodeName)) {
// We do not use swap operation here as it may mess up other workflow edges
// First remove the edge between steps
RemoveEdgeOperation removeEdgeOperation = new RemoveEdgeOperation();
removeEdgeOperation.setWorkflowName(uninstallWF.getName());
removeEdgeOperation.setFromStepId(workflowStepEntry.getKey());
removeEdgeOperation.setToStepId(onSuccessStepName);
log.debug("Swapping {} with target {}", onSuccessStepName, workflowStepEntry.getKey());
removeEdgeProcessor.process(csar, topology, removeEdgeOperation);
// Then reconnect them in the right sequence
ConnectStepFromOperation connectStepFromOperation = new ConnectStepFromOperation();
connectStepFromOperation.setWorkflowName(uninstallWF.getName());
connectStepFromOperation.setFromStepIds(new String[] { onSuccessStepName });
connectStepFromOperation.setToStepId(workflowStepEntry.getKey());
connectStepFromProcessor.process(csar, topology, connectStepFromOperation);
break;
}
}
break;
}
}
// Start & Stop make no sense for those kind of nodes in Yorc as those operations are not implemented.
// Do not change those WFs
}
}));
}
use of org.alien4cloud.alm.deployment.configuration.flow.FlowExecutionContext in project alien4cloud by alien4cloud.
the class PostMatchingNodeSetupModifier method doMergeNode.
@Override
protected boolean doMergeNode(Topology topology, FlowExecutionContext context, String nodeTemplateId, NodePropsOverride nodePropsOverride) {
final ConfigChanged configChanged = new ConfigChanged();
// play the super method first. This will process nodetemplate properties
configChanged.changed = super.doMergeNode(topology, context, nodeTemplateId, nodePropsOverride);
// Then process capabilities
NodeTemplate nodeTemplate = topology.getNodeTemplates().get(nodeTemplateId);
Iterator<Entry<String, NodeCapabilitiesPropsOverride>> capabilitiesOverrideIter = safe(nodePropsOverride.getCapabilities()).entrySet().iterator();
while (capabilitiesOverrideIter.hasNext()) {
Entry<String, NodeCapabilitiesPropsOverride> overrideCapabilityProperties = capabilitiesOverrideIter.next();
Capability capability = safe(nodeTemplate.getCapabilities()).get(overrideCapabilityProperties.getKey());
if (capability == null) {
// Manage clean logic
configChanged.changed = true;
capabilitiesOverrideIter.remove();
} else {
// Merge logic
// When a selected node has changed we may need to cleanup properties that where defined but are not anymore on the capability
CapabilityType capabilityType = ToscaContext.get(CapabilityType.class, capability.getType());
capability.setProperties(mergeProperties(overrideCapabilityProperties.getValue().getProperties(), capability.getProperties(), capabilityType.getProperties(), s -> {
configChanged.changed = true;
context.log().info("The property [" + s + "] previously specified to configure capability [" + overrideCapabilityProperties.getKey() + "] of node [" + nodeTemplateId + "] cannot be set anymore as it is already specified by the matched location resource or in the topology.");
}));
}
}
return configChanged.changed;
}
use of org.alien4cloud.alm.deployment.configuration.flow.FlowExecutionContext in project alien4cloud by alien4cloud.
the class DeploymentTopologyDTOBuilder method prepareDeployment.
@Override
@ToscaContextual
public DeploymentTopologyDTO prepareDeployment(Topology topology, Application application, ApplicationEnvironment environment, ApplicationTopologyVersion topologyVersion, IDeploymentConfigAction deploymentConfigAction) {
// Execute the update
deploymentConfigAction.execute(application, environment, topologyVersion, topology);
FlowExecutionContext executionContext = flowExecutor.executeDeploymentFlow(topology, application, environment);
return build(executionContext);
}
use of org.alien4cloud.alm.deployment.configuration.flow.FlowExecutionContext in project alien4cloud by alien4cloud.
the class NodeMatchingSubstitutionService method onCopyConfiguration.
// FIXME fix this, synch with org.alien4cloud.alm.deployment.configuration.services.PolicyMatchingSubstitutionService#onCopyConfiguration
@EventListener
// Process this after location matching copy (first element).
@Order(30)
public void onCopyConfiguration(OnDeploymentConfigCopyEvent onDeploymentConfigCopyEvent) {
ApplicationEnvironment source = onDeploymentConfigCopyEvent.getSourceEnvironment();
ApplicationEnvironment target = onDeploymentConfigCopyEvent.getTargetEnvironment();
DeploymentMatchingConfiguration sourceConfiguration = deploymentConfigurationDao.findById(DeploymentMatchingConfiguration.class, AbstractDeploymentConfig.generateId(source.getTopologyVersion(), source.getId()));
DeploymentMatchingConfiguration targetConfiguration = deploymentConfigurationDao.findById(DeploymentMatchingConfiguration.class, AbstractDeploymentConfig.generateId(target.getTopologyVersion(), target.getId()));
if (sourceConfiguration == null || MapUtils.isEmpty(sourceConfiguration.getLocationGroups()) || targetConfiguration == null || MapUtils.isEmpty(targetConfiguration.getLocationGroups())) {
// Nothing to copy
return;
}
// We have to execute a piece of the deployment flow to find out matching candidates so we copy only required inputs
Topology topology = topologyServiceCore.getOrFail(Csar.createId(target.getApplicationId(), target.getTopologyVersion()));
if (MapUtils.isNotEmpty(topology.getNodeTemplates())) {
Application application = applicationService.getOrFail(target.getApplicationId());
FlowExecutionContext executionContext = new FlowExecutionContext(deploymentConfigurationDao, topology, new EnvironmentContext(application, target));
flowExecutor.execute(topology, getMatchingFlow(), executionContext);
Map<String, Set<String>> locResTemplateIdsPerNodeIds = (Map<String, Set<String>>) executionContext.getExecutionCache().get(FlowExecutionContext.SELECTED_MATCH_NODE_LOCATION_TEMPLATE_BY_NODE_ID_MAP);
// Update the substitution on the target if available substitution is always compatible
Map<String, String> validOnNewEnvSubstitutedNodes = safe(sourceConfiguration.getMatchedLocationResources()).entrySet().stream().filter(entry -> locResTemplateIdsPerNodeIds.containsKey(entry.getKey()) && locResTemplateIdsPerNodeIds.get(entry.getKey()).contains(entry.getValue())).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
if (MapUtils.isNotEmpty(validOnNewEnvSubstitutedNodes)) {
if (targetConfiguration.getMatchedLocationResources() == null) {
targetConfiguration.setMatchedLocationResources(Maps.newHashMap());
}
validOnNewEnvSubstitutedNodes.forEach((key, value) -> {
targetConfiguration.getMatchedLocationResources().put(key, value);
// Copy properties set on the node to the new one
targetConfiguration.getMatchedNodesConfiguration().put(key, safe(sourceConfiguration.getMatchedNodesConfiguration()).get(key));
});
deploymentConfigurationDao.save(targetConfiguration);
}
}
}
use of org.alien4cloud.alm.deployment.configuration.flow.FlowExecutionContext in project alien4cloud by alien4cloud.
the class PolicyMatchingReplaceModifier method process.
@Override
public void process(Topology topology, FlowExecutionContext context) {
super.process(topology, context);
for (PolicyTemplate policyTemplate : safe(topology.getPolicies()).values()) {
PolicyType policyType = ToscaContext.getOrFail(PolicyType.class, policyTemplate.getType());
String policyImplMeta = TagUtil.getTagValue(policyType.getTags(), "a4c_policy_impl");
if (policyImplMeta == null) {
context.log().warn("Matched policy {} for {} does not define an alien topology modifier implementation, it may not be taken in account.", policyTemplate.getType(), policyTemplate.getName());
continue;
}
String[] policyImpl = policyImplMeta.split(":");
if (policyImpl.length != 3) {
context.log().error("Matched policy {} for policy {} defines an invalid modifier implementation {}, format should be policy_plugin_id:policy_plugin_bean:injection_phase", policyTemplate.getType(), policyTemplate.getName(), policyImplMeta);
}
try {
ITopologyModifier modifier = pluginModifierRegistry.getPluginBean(policyImpl[0], policyImpl[1]);
List<ITopologyModifier> phaseModifiers = (List<ITopologyModifier>) context.getExecutionCache().computeIfAbsent(policyImpl[2], s -> Lists.<ITopologyModifier>newArrayList());
// No need to add a modifier more than once for a phase
if (!phaseModifiers.contains(modifier)) {
phaseModifiers.add(modifier);
}
} catch (MissingPluginException e) {
context.log().error("Implementation specified for policy type {} that refers to plugin bean {}, {} cannot be found.", policyTemplate.getType(), policyImpl[0], policyImpl[1]);
}
}
}
Aggregations