use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.
the class SRDFDeviceController method createNonCGSRDFVolumes.
protected void createNonCGSRDFVolumes(Workflow workflow, String waitFor, List<VolumeDescriptor> sourceDescriptors, Map<URI, Volume> uriVolumeMap) {
for (VolumeDescriptor sourceDescriptor : sourceDescriptors) {
Volume source = uriVolumeMap.get(sourceDescriptor.getVolumeURI());
// this will be null for normal use cases except vpool change
URI vpoolChangeUri = getVirtualPoolChangeVolume(sourceDescriptors);
log.info("VPoolChange URI {}", vpoolChangeUri);
StringSet srdfTargets = source.getSrdfTargets();
for (String targetStr : srdfTargets) {
URI targetURI = URI.create(targetStr);
Volume target = uriVolumeMap.get(targetURI);
RemoteDirectorGroup group = dbClient.queryObject(RemoteDirectorGroup.class, target.getSrdfGroup());
StorageSystem system = dbClient.queryObject(StorageSystem.class, group.getSourceStorageSystemUri());
Workflow.Method createMethod = createSRDFVolumePairMethod(system.getId(), source.getId(), targetURI, vpoolChangeUri);
Workflow.Method rollbackMethod = rollbackSRDFLinkMethod(system.getId(), source.getId(), targetURI, false);
// Ensure CreateElementReplica steps are executed sequentially (CQ613404)
waitFor = workflow.createStep(CREATE_SRDF_MIRRORS_STEP_GROUP, CREATE_SRDF_MIRRORS_STEP_DESC, waitFor, system.getId(), system.getSystemType(), getClass(), createMethod, rollbackMethod, null);
}
}
}
use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.
the class FileDeviceController method acquireStepLock.
/**
* Acquire Work flow Distributed Owner Lock for a Step.
* This method is used to acquire lock at a particular work flow step.Currently we are acquiring lock only for
* VNXFILE.
* This lock would be released after the step completion (either failure or success).
*
* @param storageObj
* -Storage System object's native id is used to generate key for the lock
* @param opId
*/
public void acquireStepLock(StorageSystem storageObj, String opId) {
Workflow workflow = _workflowService.getWorkflowFromStepId(opId);
if (workflow != null && storageObj.deviceIsType(Type.vnxfile)) {
List<String> lockKeys = new ArrayList<String>();
lockKeys.add(storageObj.getNativeGuid());
boolean lockAcquired = _workflowService.acquireWorkflowStepLocks(opId, lockKeys, LockTimeoutValue.get(LockType.FILE_OPERATIONS));
if (!lockAcquired) {
throw DeviceControllerException.exceptions.failedToAcquireWorkflowLock(lockKeys.toString(), "Timeout in Acquiring Lock");
}
}
}
use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.
the class FileDeviceController method createExpandFileshareStep.
/**
* Expand File System Step
*
* @param workflow
* @param waitFor
* @param fileDescriptors
* @param taskId
* @return
*/
private String createExpandFileshareStep(Workflow workflow, String waitFor, List<FileDescriptor> fileDescriptors, String taskId) {
_log.info("START Expand file system");
Map<URI, Long> filesharesToExpand = new HashMap<URI, Long>();
for (FileDescriptor descriptor : fileDescriptors) {
// Grab the fileshare, let's see if an expand is really needed
FileShare fileShare = _dbClient.queryObject(FileShare.class, descriptor.getFsURI());
// new size > existing fileshare's provisioned capacity, otherwise we can ignore.
if (fileShare.getCapacity() != null && fileShare.getCapacity().longValue() != 0 && descriptor.getFileSize() > fileShare.getCapacity().longValue()) {
filesharesToExpand.put(fileShare.getId(), descriptor.getFileSize());
}
}
Workflow.Method expandMethod = null;
for (Map.Entry<URI, Long> entry : filesharesToExpand.entrySet()) {
_log.info("Creating WF step for Expand FileShare for {}", entry.getKey().toString());
FileShare fileShareToExpand = _dbClient.queryObject(FileShare.class, entry.getKey());
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, fileShareToExpand.getStorageDevice());
Long fileSize = entry.getValue();
expandMethod = expandFileSharesMethod(storage.getId(), fileShareToExpand.getId(), fileSize);
waitFor = workflow.createStep(EXPAND_FILESYSTEMS_STEP, String.format("Expand FileShare %s", fileShareToExpand), waitFor, storage.getId(), storage.getSystemType(), getClass(), expandMethod, null, null);
_log.info("Creating workflow step {}", EXPAND_FILESYSTEMS_STEP);
}
return waitFor;
}
use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.
the class FileDeviceController method createReduceFileshareStep.
/**
* Reduce File System Step
*
* @param workflow
* @param waitFor
* @param fileDescriptors
* @param taskId
* @return
*/
private String createReduceFileshareStep(Workflow workflow, String waitFor, List<FileDescriptor> fileDescriptors, String taskId) {
_log.info("START Reduce file system");
Map<URI, Long> filesharesToReduce = new HashMap<URI, Long>();
for (FileDescriptor descriptor : fileDescriptors) {
FileShare fileShare = _dbClient.queryObject(FileShare.class, descriptor.getFsURI());
if (fileShare.getCapacity() != null && fileShare.getCapacity().longValue() != 0) {
filesharesToReduce.put(fileShare.getId(), descriptor.getFileSize());
}
}
Workflow.Method reduceMethod = null;
for (Map.Entry<URI, Long> entry : filesharesToReduce.entrySet()) {
_log.info("Creating WF step for Reduce FileShare for {}", entry.getKey().toString());
FileShare fileShareToReduce = _dbClient.queryObject(FileShare.class, entry.getKey());
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, fileShareToReduce.getStorageDevice());
Long fileSize = entry.getValue();
reduceMethod = reduceFileSharesMethod(storage.getId(), fileShareToReduce.getId(), fileSize);
waitFor = workflow.createStep(REDUCE_FILESYSTEMS_STEP, String.format("Reduce FileShare %s", fileShareToReduce), waitFor, storage.getId(), storage.getSystemType(), getClass(), reduceMethod, null, null);
_log.info("Creating workflow step {}", REDUCE_FILESYSTEMS_STEP);
}
return waitFor;
}
use of com.emc.storageos.workflow.Workflow in project coprhd-controller by CoprHD.
the class VcenterControllerImpl method createOrUpdateVcenterCluster.
private void createOrUpdateVcenterCluster(boolean createCluster, AsyncTask task, URI clusterUri, URI[] addHostUris, URI[] removeHostUris, URI[] volumeUris) throws InternalException {
TaskCompleter completer = null;
try {
_log.info("createOrUpdateVcenterCluster " + createCluster + " " + task + " " + clusterUri + " " + addHostUris + " " + removeHostUris);
if (task == null) {
_log.error("AsyncTask is null");
throw new Exception("AsyncTask is null");
}
URI vcenterDataCenterId = task._id;
VcenterDataCenter vcenterDataCenter = _dbClient.queryObject(VcenterDataCenter.class, vcenterDataCenterId);
if (clusterUri == null) {
_log.error("Cluster URI is null");
throw new Exception("Cluster URI is null");
}
Cluster cluster = _dbClient.queryObject(Cluster.class, clusterUri);
Vcenter vcenter = _dbClient.queryObject(Vcenter.class, vcenterDataCenter.getVcenter());
_log.info("Request to create or update cluster " + vcenter.getIpAddress() + "/" + vcenterDataCenter.getLabel() + "/" + cluster.getLabel());
Collection<Host> addHosts = new ArrayList<Host>();
if (addHostUris == null || addHostUris.length == 0) {
_log.info("Add host URIs is null or empty - Cluster will be created without hosts");
} else {
for (URI hostUri : addHostUris) {
_log.info("createOrUpdateVcenterCluster " + clusterUri + " with add host " + hostUri);
}
addHosts = _dbClient.queryObject(Host.class, addHostUris);
}
Collection<Host> removeHosts = new ArrayList<Host>();
if (removeHostUris == null || removeHostUris.length == 0) {
_log.info("Remove host URIs is null or empty - Cluster will have no removed hosts");
} else {
for (URI hostUri : removeHostUris) {
_log.info("createOrUpdateVcenterCluster " + clusterUri + " with remove host " + hostUri);
}
removeHosts = _dbClient.queryObject(Host.class, removeHostUris);
}
Collection<Volume> volumes = new ArrayList<Volume>();
if (volumeUris == null || volumeUris.length == 0) {
_log.info("Volume URIs is null or empty - Cluster will be created without datastores");
} else {
for (URI volumeUri : volumeUris) {
_log.info("createOrUpdateVcenterCluster " + clusterUri + " with volume " + volumeUri);
}
volumes = _dbClient.queryObject(Volume.class, volumeUris);
}
completer = new VcenterClusterCompleter(vcenterDataCenterId, task._opId, OperationTypeEnum.CREATE_UPDATE_VCENTER_CLUSTER, "VCENTER_CONTROLLER");
Workflow workflow = _workflowService.getNewWorkflow(this, "CREATE_UPDATE_VCENTER_CLUSTER_WORKFLOW", true, task._opId);
String clusterStep = workflow.createStep("CREATE_UPDATE_VCENTER_CLUSTER_STEP", String.format("vCenter cluster operation in vCenter datacenter %s", vcenterDataCenterId), null, vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method("createUpdateVcenterClusterOperation", createCluster, vcenter.getId(), vcenterDataCenter.getId(), cluster.getId()), null, null);
String lastStep = clusterStep;
if (!removeHosts.isEmpty()) {
for (Host host : removeHosts) {
String hostStep = workflow.createStep("VCENTER_CLUSTER_REMOVE_HOST", String.format("vCenter cluster remove host operation %s", host.getId()), clusterStep, vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method("vcenterClusterRemoveHostOperation", vcenter.getId(), vcenterDataCenter.getId(), cluster.getId(), host.getId()), null, null);
// add host will wait on last of these
lastStep = hostStep;
}
}
if (!addHosts.isEmpty()) {
for (Host host : addHosts) {
String hostStep = workflow.createStep("VCENTER_CLUSTER_ADD_HOST", String.format("vCenter cluster add host operation %s", host.getId()), lastStep, vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method("vcenterClusterAddHostOperation", vcenter.getId(), vcenterDataCenter.getId(), cluster.getId(), host.getId()), null, null);
}
}
workflow.executePlan(completer, "Success");
} catch (Exception e) {
_log.error("createOrUpdateVcenterCluster caught an exception.", e);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
completer.error(_dbClient, serviceError);
}
}
Aggregations