use of com.woorea.openstack.heat.model.UpdateStackParam in project so by onap.
the class MsoHeatUtilsWithUpdate method updateStack.
/**
* Update a Stack in the specified cloud location and tenant. The Heat template and parameter map are passed in as
* arguments, along with the cloud access credentials. It is expected that parameters have been validated and
* contain at minimum the required parameters for the given template with no extra (undefined) parameters..
*
* The Stack name supplied by the caller must be unique in the scope of this tenant. However, it should also be
* globally unique, as it will be the identifier for the resource going forward in Inventory. This latter is managed
* by the higher levels invoking this function.
*
* The caller may choose to let this function poll Openstack for completion of the stack creation, or may handle
* polling itself via separate calls to query the status. In either case, a StackInfo object will be returned
* containing the current status. When polling is enabled, a status of CREATED is expected. When not polling, a
* status of BUILDING is expected.
*
* An error will be thrown if the requested Stack already exists in the specified Tenant and Cloud.
*
* @param tenantId The Openstack ID of the tenant in which to create the Stack
* @param cloudSiteId The cloud identifier (may be a region) in which to create the tenant.
* @param stackName The name of the stack to update
* @param heatTemplate The Heat template
* @param stackInputs A map of key/value inputs
* @param pollForCompletion Indicator that polling should be handled in Java vs. in the client
* @param environment An optional yaml-format string to specify environmental parameters
* @param files a Map<String, Object> for listing child template IDs
* @param heatFiles a Map<String, Object> for listing get_file entries (fileName, fileBody)
* @return A StackInfo object
* @throws MsoException Thrown if the Openstack API call returns an exception.
*/
public StackInfo updateStack(String cloudSiteId, String cloudOwner, String tenantId, String stackName, String heatTemplate, Map<String, Object> stackInputs, boolean pollForCompletion, int timeoutMinutes, String environment, Map<String, Object> files, Map<String, Object> heatFiles) throws MsoException {
boolean heatEnvtVariable = true;
if (environment == null || "".equalsIgnoreCase(environment.trim())) {
heatEnvtVariable = false;
}
boolean haveFiles = true;
if (files == null || files.isEmpty()) {
haveFiles = false;
}
boolean haveHeatFiles = true;
if (heatFiles == null || heatFiles.isEmpty()) {
haveHeatFiles = false;
}
Heat heatClient = getHeatClient(cloudSiteId, tenantId);
// Perform a query first to get the current status
Stack heatStack = queryHeatStack(heatClient, stackName);
if (heatStack == null || "DELETE_COMPLETE".equals(heatStack.getStackStatus())) {
// Not found. Return a StackInfo with status NOTFOUND
throw new MsoStackNotFound(stackName, tenantId, cloudSiteId);
}
// Use canonical name "<stack name>/<stack-id>" to update the stack.
// Otherwise, update by name returns a 302 redirect.
// NOTE: This is specific to the v1 Orchestration API.
String canonicalName = heatStack.getStackName() + "/" + heatStack.getId();
logger.debug("Ready to Update Stack ({}) with input params: {}", canonicalName, stackInputs);
// force entire stackInput object to generic Map<String, Object> for openstack compatibility
ObjectMapper mapper = new ObjectMapper();
Map<String, Object> normalized = new HashMap<>();
try {
normalized = mapper.readValue(mapper.writeValueAsString(stackInputs), new TypeReference<HashMap<String, Object>>() {
});
} catch (IOException e1) {
logger.debug("could not map json", e1);
}
// Build up the stack update parameters
// Disable auto-rollback, because error reason is lost. Always rollback in the code.
UpdateStackParam stack = new UpdateStackParam();
stack.setTimeoutMinutes(timeoutMinutes);
stack.setParameters(normalized);
stack.setTemplate(heatTemplate);
stack.setDisableRollback(true);
// TJM add envt to stack
if (heatEnvtVariable) {
stack.setEnvironment(environment);
}
// and then add to stack (both are part of "files:" being added to stack)
if (haveFiles && haveHeatFiles) {
// Let's do this here - not in the bean
logger.debug("Found files AND heatFiles - combine and add!");
Map<String, Object> combinedFiles = new HashMap<>();
for (String keyString : files.keySet()) {
combinedFiles.put(keyString, files.get(keyString));
}
for (String keyString : heatFiles.keySet()) {
combinedFiles.put(keyString, heatFiles.get(keyString));
}
stack.setFiles(combinedFiles);
} else {
// Handle case where we have one or neither
if (haveFiles) {
stack.setFiles(files);
}
if (haveHeatFiles) {
// setFiles method modified to handle adding a map.
stack.setFiles(heatFiles);
}
}
try {
// Execute the actual Openstack command to update the Heat stack
OpenStackRequest<Void> request = heatClient.getStacks().update(canonicalName, stack);
executeAndRecordOpenstackRequest(request);
} catch (OpenStackBaseException e) {
// in the cloud. Rethrow the error as an MSO exception.
throw heatExceptionToMsoException(e, UPDATE_STACK);
} catch (RuntimeException e) {
// Catch-all
throw runtimeExceptionToMsoException(e, UPDATE_STACK);
}
// If client has requested a final response, poll for stack completion
Stack updateStack = null;
if (pollForCompletion) {
// Set a time limit on overall polling.
// Use the resource (template) timeout for Openstack (expressed in minutes)
// and add one poll interval to give Openstack a chance to fail on its own.
int createPollInterval = Integer.parseInt(this.environment.getProperty(createPollIntervalProp, CREATE_POLL_INTERVAL_DEFAULT));
int pollTimeout = (timeoutMinutes * 60) + createPollInterval;
boolean loopAgain = true;
while (loopAgain) {
try {
updateStack = queryHeatStack(heatClient, canonicalName);
logger.debug("{} ({}) ", updateStack.getStackStatus(), canonicalName);
try {
logger.debug("Current stack {}" + this.getOutputsAsStringBuilderWithUpdate(heatStack).toString());
} catch (Exception e) {
logger.debug("an error occurred trying to print out the current outputs of the stack", e);
}
if ("UPDATE_IN_PROGRESS".equals(updateStack.getStackStatus())) {
// Sleep and try again unless timeout has been reached
if (pollTimeout <= 0) {
// Note that this should not occur, since there is a timeout specified
// in the Openstack call.
logger.error("{} Cloud site: {} Tenant: {} Stack: {} Stack status: {} {} Update stack timeout", MessageEnum.RA_UPDATE_STACK_TIMEOUT, cloudSiteId, tenantId, stackName, updateStack.getStackStatus(), ErrorCode.AvailabilityError.getValue());
loopAgain = false;
} else {
try {
Thread.sleep(createPollInterval * 1000L);
} catch (InterruptedException e) {
// If we are interrupted, we should stop ASAP.
loopAgain = false;
// Set again the interrupted flag
Thread.currentThread().interrupt();
}
}
pollTimeout -= createPollInterval;
logger.debug("pollTimeout remaining: {}", pollTimeout);
} else {
loopAgain = false;
}
} catch (MsoException e) {
// Cannot query the stack. Something is wrong.
// TODO: No way to roll back the stack at this point. What to do?
e.addContext(UPDATE_STACK);
throw e;
}
}
if (!"UPDATE_COMPLETE".equals(updateStack.getStackStatus())) {
logger.error("{} Stack status: {} Stack status reason: {} {} Update Stack error", MessageEnum.RA_UPDATE_STACK_ERR, updateStack.getStackStatus(), updateStack.getStackStatusReason(), ErrorCode.DataError.getValue());
// TODO: No way to roll back the stack at this point. What to do?
// Throw a 'special case' of MsoOpenstackException to report the Heat status
MsoOpenstackException me = null;
if ("UPDATE_IN_PROGRESS".equals(updateStack.getStackStatus())) {
me = new MsoOpenstackException(0, "", "Stack Update Timeout");
} else {
String error = "Stack error (" + updateStack.getStackStatus() + "): " + updateStack.getStackStatusReason();
me = new MsoOpenstackException(0, "", error);
}
me.addContext(UPDATE_STACK);
throw me;
}
} else {
// Return the current status.
updateStack = queryHeatStack(heatClient, canonicalName);
if (updateStack != null) {
logger.debug("UpdateStack, status = {}", updateStack.getStackStatus());
} else {
logger.debug("UpdateStack, stack not found");
}
}
return new StackInfoMapper(updateStack).map();
}
Aggregations