use of org.opentosca.toscana.plugins.kubernetes.util.NodeStack in project TOSCAna by StuPro-TOSCAna.
the class TestNodeStacks method getLampNodeStacks.
public static Set<NodeStack> getLampNodeStacks(Log log) {
Map<String, RootNode> map = new EffectiveModelFactory().create(TestCsars.VALID_LAMP_NO_INPUT_TEMPLATE, log).getNodeMap();
List<KubernetesNodeContainer> webAppNodes = new LinkedList<>();
KubernetesNodeContainer computeContainer = new KubernetesNodeContainer(map.get("server"));
computeContainer.hasParentComputeNode();
webAppNodes.add(new KubernetesNodeContainer(map.get("my_app")));
webAppNodes.add(new KubernetesNodeContainer(map.get("apache_web_server")));
webAppNodes.add(computeContainer);
NodeStack webAppNodeStack = new NodeStack(webAppNodes);
// Manualy set the docker image tag (used for testing the ResourceFileCreator)
webAppNodeStack.setDockerImageTag("my-app");
return Sets.newHashSet(webAppNodeStack);
}
use of org.opentosca.toscana.plugins.kubernetes.util.NodeStack in project TOSCAna by StuPro-TOSCAna.
the class Pod method getPods.
/**
* @param stacks The List of NodeStacks to group in Pods
* @return a List Containing the given NodeStacks grouped in Pods
*/
public static List<Pod> getPods(Collection<NodeStack> stacks) {
// Group Node Stacks
Map<Compute, List<NodeStack>> stackMap = new HashMap<>();
for (NodeStack stack : stacks) {
Compute computeNode = stack.getComputeNode();
stackMap.computeIfAbsent(computeNode, k -> new ArrayList<>());
stackMap.get(computeNode).add(stack);
}
// Convert NodeStacks to Pods
List<Pod> pods = new ArrayList<>();
stackMap.forEach((k, v) -> pods.add(new Pod(v, k)));
return pods;
}
use of org.opentosca.toscana.plugins.kubernetes.util.NodeStack in project TOSCAna by StuPro-TOSCAna.
the class DockerfileBuildingVisitor method handleDefault.
/**
* This method implements the default node transformation behaviour. This means, the
* exectuion of scripts implements the functionality thats expected from the node.
*/
private void handleDefault(RootNode node, String[] ignoredLifecycles) {
try {
Map<NodeStack, String> address = new HashMap<>();
// Add the ports exposed by the node to the ports list
node.getCapabilities().forEach(e -> {
try {
if (e instanceof EndpointCapability) {
if (((EndpointCapability) e).getPort().isPresent()) {
ports.add(((EndpointCapability) e).getPort().get().port);
}
}
} catch (Exception ex) {
logger.warn("Failed reading Port from node {}", node.getEntityName(), ex);
}
});
// name. We therefore have to set this to 127.0.0.1 ('localhost' causes issues too)
for (Requirement e : node.getRequirements()) {
if (e.getRelationship().isPresent() && e.getRelationship().get() instanceof ConnectsTo) {
for (Object o : e.getFulfillers()) {
if (o instanceof RootNode) {
NodeStack targetStack = this.connectionGraph.vertexSet().stream().filter(ek -> ek.hasNode((RootNode) o)).findFirst().orElse(null);
if (targetStack != null && targetStack.getComputeNode() == this.stack.getComputeNode()) {
address.put(this.stack, this.stack.getComputeNode().getPrivateAddress().orElse(null));
this.stack.getComputeNode().setPrivateAddress(IPV4_LOCAL_ADDRESS);
}
}
}
}
}
// Add the scripts from the lifecycle to the Dockerfile
addLifecycleOperationsToDockerfile(node.getEntityName(), node.getStandardLifecycle(), ignoredLifecycles);
// Reset to original address
address.forEach((k, v) -> {
k.getComputeNode().setPrivateAddress(v);
});
} catch (IOException e) {
throw new UnsupportedOperationException("Transformation failed while copying artifacts", e);
}
}
use of org.opentosca.toscana.plugins.kubernetes.util.NodeStack in project TOSCAna by StuPro-TOSCAna.
the class CloudFoundryLifecycle method prepare.
@Override
public void prepare() {
logger.info("Begin preparation for transformation to Cloud Foundry");
PrepareVisitor prepareVisitor = new PrepareVisitor(logger);
for (RootNode node : context.getModel().getNodes()) {
node.accept(prepareVisitor);
}
logger.debug("Collecting Compute Nodes in topology");
ComputeNodeFindingVisitor computeFinder = new ComputeNodeFindingVisitor();
model.getNodes().forEach(e -> {
e.accept(computeFinder);
KubernetesNodeContainer container = new KubernetesNodeContainer(e);
nodes.put(e.getEntityName(), container);
});
computeFinder.getComputeNodes().forEach(e -> computeNodes.add(nodes.get(e.getEntityName())));
logger.debug("Finding top Level Nodes");
graph = model.getTopology();
Set<RootNode> topLevelNodes = determineTopLevelNodes(context.getModel(), computeFinder.getComputeNodes().stream().map(Compute.class::cast).collect(Collectors.toList()), e -> nodes.get(e.getEntityName()).activateParentComputeNode());
logger.debug("Building complete Topology stacks");
this.stacks.addAll(buildTopologyStacks(model, topLevelNodes, nodes));
// TODO: check how many different applications there are and fill list with them
// probably there must be a combination of application and set of nodes
applications = new ArrayList<>();
int i = 1;
for (NodeStack stack : stacks) {
Application myApp = new Application(i, context);
i++;
myApp.setProvider(provider);
myApp.setConnection(connection);
myApp.setName(stack.getStackName());
myApp.addStack(stack);
applications.add(myApp);
}
}
Aggregations