use of com.emc.storageos.volumecontroller.impl.smis.SmisStorageDevice in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method changePortGroup.
@Override
public void changePortGroup(URI storageURI, URI exportGroupURI, URI portGroupURI, List<URI> exportMaskURIs, boolean waitForApproval, String token) {
ExportChangePortGroupCompleter taskCompleter = null;
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
StoragePortGroup portGroup = _dbClient.queryObject(StoragePortGroup.class, portGroupURI);
taskCompleter = new ExportChangePortGroupCompleter(storageURI, exportGroupURI, token, portGroupURI);
logExportGroup(exportGroup, storageURI);
String workflowKey = "changePortGroup";
if (_workflowService.hasWorkflowBeenCreated(token, workflowKey)) {
return;
}
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), workflowKey, false, token);
if (CollectionUtils.isEmpty(exportMaskURIs)) {
_log.info("No export masks to change");
taskCompleter.ready(_dbClient);
return;
}
List<ExportMask> exportMasks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
String previousStep = null;
Set<URI> hostURIs = new HashSet<URI>();
for (ExportMask oldMask : exportMasks) {
// create a new masking view using the new port group
SmisStorageDevice device = (SmisStorageDevice) getDevice();
oldMask = device.refreshExportMask(storage, oldMask);
StringSet existingInits = oldMask.getExistingInitiators();
StringMap existingVols = oldMask.getExistingVolumes();
if (!CollectionUtils.isEmpty(existingInits)) {
String error = String.format("The export mask %s has unmanaged initiators %s", oldMask.getMaskName(), Joiner.on(',').join(existingInits));
_log.error(error);
ServiceError serviceError = DeviceControllerException.errors.changePortGroupValidationError(error);
taskCompleter.error(_dbClient, serviceError);
return;
}
if (!CollectionUtils.isEmpty(existingVols)) {
String error = String.format("The export mask %s has unmanaged volumes %s", oldMask.getMaskName(), Joiner.on(',').join(existingVols.keySet()));
_log.error(error);
ServiceError serviceError = DeviceControllerException.errors.changePortGroupValidationError(error);
taskCompleter.error(_dbClient, serviceError);
return;
}
InitiatorHelper initiatorHelper = new InitiatorHelper(StringSetUtil.stringSetToUriList(oldMask.getInitiators())).process(exportGroup);
List<String> initiatorNames = initiatorHelper.getPortNames();
List<URI> volumes = StringSetUtil.stringSetToUriList(oldMask.getVolumes().keySet());
ExportPathParams pathParams = _blockScheduler.calculateExportPathParamForVolumes(volumes, 0, storageURI, exportGroupURI);
pathParams.setStoragePorts(portGroup.getStoragePorts());
List<Initiator> initiators = ExportUtils.getExportMaskInitiators(oldMask, _dbClient);
List<URI> initURIs = new ArrayList<URI>();
for (Initiator init : initiators) {
if (!NullColumnValueGetter.isNullURI(init.getHost())) {
hostURIs.add(init.getHost());
}
initURIs.add(init.getId());
}
// Get impacted export groups
List<ExportGroup> impactedExportGroups = ExportMaskUtils.getExportGroups(_dbClient, oldMask);
List<URI> exportGroupURIs = URIUtil.toUris(impactedExportGroups);
_log.info("changePortGroup: exportMask {}, impacted export groups: {}", oldMask.getMaskName(), Joiner.on(',').join(exportGroupURIs));
Map<URI, List<URI>> assignments = _blockScheduler.assignStoragePorts(storage, exportGroup, initiators, null, pathParams, volumes, _networkDeviceController, exportGroup.getVirtualArray(), token);
// Trying to find if there is existing export mask or masking view for the same host and using the new
// port group. If found one, add the volumes in the current export mask to the new one; otherwise, create
// a new export mask/masking view, with the same storage group, initiator group and the new port group.
// then delete the current export mask.
ExportMask newMask = device.findExportMasksForPortGroupChange(storage, initiatorNames, portGroupURI);
Map<URI, Integer> volumesToAdd = StringMapUtil.stringMapToVolumeMap(oldMask.getVolumes());
if (newMask != null) {
updateZoningMap(exportGroup, newMask, true);
_log.info(String.format("adding these volumes %s to mask %s", Joiner.on(",").join(volumesToAdd.keySet()), newMask.getMaskName()));
previousStep = generateZoningAddVolumesWorkflow(workflow, previousStep, exportGroup, Arrays.asList(newMask), new ArrayList<URI>(volumesToAdd.keySet()));
String addVolumeStep = workflow.createStepId();
ExportTaskCompleter exportTaskCompleter = new ExportMaskAddVolumeCompleter(exportGroupURI, newMask.getId(), volumesToAdd, addVolumeStep);
exportTaskCompleter.setExportGroups(exportGroupURIs);
Workflow.Method maskingExecuteMethod = new Workflow.Method("doExportGroupAddVolumes", storageURI, exportGroupURI, newMask.getId(), volumesToAdd, null, exportTaskCompleter);
Workflow.Method maskingRollbackMethod = new Workflow.Method("rollbackExportGroupAddVolumes", storageURI, exportGroupURI, exportGroupURIs, newMask.getId(), volumesToAdd, initURIs, addVolumeStep);
previousStep = workflow.createStep(EXPORT_GROUP_MASKING_TASK, String.format("Adding volumes to mask %s (%s)", newMask.getMaskName(), newMask.getId().toString()), previousStep, storageURI, storage.getSystemType(), MaskingWorkflowEntryPoints.class, maskingExecuteMethod, maskingRollbackMethod, addVolumeStep);
previousStep = generateExportMaskAddVolumesWorkflow(workflow, previousStep, storage, exportGroup, newMask, volumesToAdd, null);
} else {
// We don't find existing export mask /masking view, we will create a new one.
// first, to construct the new export mask name, if the export mask has the original name, then
// append the new port group name to the current export mask name; if the export mask already has the current
// port group name appended, then remove the current port group name, and append the new one.
String oldName = oldMask.getMaskName();
URI oldPGURI = oldMask.getPortGroup();
if (oldPGURI != null) {
StoragePortGroup oldPG = _dbClient.queryObject(StoragePortGroup.class, oldPGURI);
if (oldPG != null) {
String pgName = oldPG.getLabel();
if (oldName.endsWith(pgName)) {
oldName = oldName.replaceAll(pgName, "");
}
}
}
String maskName = null;
if (oldName.endsWith("_")) {
maskName = String.format("%s%s", oldName, portGroup.getLabel());
} else {
maskName = String.format("%s_%s", oldName, portGroup.getLabel());
}
newMask = ExportMaskUtils.initializeExportMask(storage, exportGroup, initiators, volumesToAdd, getStoragePortsInPaths(assignments), assignments, maskName, _dbClient);
newMask.setPortGroup(portGroupURI);
List<BlockObject> vols = new ArrayList<BlockObject>();
for (URI boURI : volumesToAdd.keySet()) {
BlockObject bo = BlockObject.fetch(_dbClient, boURI);
vols.add(bo);
}
newMask.addToUserCreatedVolumes(vols);
_dbClient.updateObject(newMask);
_log.info(String.format("Creating new exportMask %s", maskName));
// Make a new TaskCompleter for the exportStep. It has only one subtask.
// This is due to existing requirements in the doExportGroupCreate completion
// logic.
String maskingStep = workflow.createStepId();
ExportTaskCompleter exportTaskCompleter = new ExportMaskChangePortGroupAddMaskCompleter(newMask.getId(), exportGroupURI, maskingStep);
exportTaskCompleter.setExportGroups(exportGroupURIs);
Workflow.Method maskingExecuteMethod = new Workflow.Method("doExportChangePortGroupAddPaths", storageURI, exportGroupURI, newMask.getId(), oldMask.getId(), portGroupURI, exportTaskCompleter);
Workflow.Method maskingRollbackMethod = new Workflow.Method("rollbackExportGroupCreate", storageURI, exportGroupURI, newMask.getId(), maskingStep);
maskingStep = workflow.createStep(EXPORT_GROUP_MASKING_TASK, String.format("Create export mask(%s) to use port group %s", newMask.getMaskName(), portGroup.getNativeGuid()), previousStep, storageURI, storage.getSystemType(), MaskingWorkflowEntryPoints.class, maskingExecuteMethod, maskingRollbackMethod, maskingStep);
String zoningStep = workflow.createStepId();
List<URI> masks = new ArrayList<URI>();
masks.add(newMask.getId());
previousStep = generateZoningCreateWorkflow(workflow, maskingStep, exportGroup, masks, volumesToAdd, zoningStep);
}
}
previousStep = _wfUtils.generateHostRescanWorkflowSteps(workflow, hostURIs, previousStep);
if (waitForApproval) {
// Insert a step that will be suspended. When it resumes, it will re-acquire the lock keys,
// which are released when the workflow suspends.
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), StringSetUtil.stringSetToUriList(exportGroup.getInitiators()), storageURI);
String suspendMessage = "Adjust/rescan host/cluster paths. Press \"Resume\" to start removal of unnecessary paths." + "\"Rollback\" will terminate the order and roll back";
Workflow.Method method = WorkflowService.acquireWorkflowLocksMethod(lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
Workflow.Method rollbackNull = Workflow.NULL_METHOD;
previousStep = workflow.createStep("AcquireLocks", "Suspending for user verification of host/cluster connectivity.", previousStep, storage.getId(), storage.getSystemType(), WorkflowService.class, method, rollbackNull, waitForApproval, null);
workflow.setSuspendedStepMessage(previousStep, suspendMessage);
}
for (ExportMask exportMask : exportMasks) {
previousStep = generateChangePortGroupDeleteMaskWorkflowstep(storageURI, exportGroup, exportMask, previousStep, workflow);
}
_wfUtils.generateHostRescanWorkflowSteps(workflow, hostURIs, previousStep);
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The change port group workflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
// update ExportChangePortGroupCompleter with affected export groups
Set<URI> affectedExportGroups = new HashSet<URI>();
for (ExportMask mask : exportMasks) {
List<ExportGroup> assocExportGroups = ExportMaskUtils.getExportGroups(_dbClient, mask);
for (ExportGroup eg : assocExportGroups) {
affectedExportGroups.add(eg.getId());
}
}
taskCompleter.setAffectedExportGroups(affectedExportGroups);
workflow.executePlan(taskCompleter, "Change port group successfully.");
_workflowService.markWorkflowBeenCreated(token, workflowKey);
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception e) {
_log.error("Export change port group Orchestration failed.", e);
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
}
}
}
use of com.emc.storageos.volumecontroller.impl.smis.SmisStorageDevice in project coprhd-controller by CoprHD.
the class SmisAbstractCreateVolumeJob method addVolumesToConsistencyGroup.
/**
* This method will redirect to the appropriate SmiStorageDevice object to make the
* call to add the volumes to the consistency group. This operation should be done
* after the volumes has been successfully created (i.e., there's a deviceNativeId
* for the volumes).
*
* @param jobContext [required] - JobContext object
* @param volumesIds [required] - Volumes to add
* @throws DeviceControllerException
*/
private void addVolumesToConsistencyGroup(JobContext jobContext, List<URI> volumesIds) throws DeviceControllerException {
if (volumesIds == null || volumesIds.isEmpty()) {
return;
}
try {
final DbClient dbClient = jobContext.getDbClient();
// Get volumes from database
final List<Volume> volumes = dbClient.queryObject(Volume.class, volumesIds);
// All the volumes will be in the same consistency group
final URI consistencyGroupId = volumes.get(0).getConsistencyGroup();
BlockConsistencyGroup consistencyGroup = null;
if (consistencyGroupId != null) {
// Get consistency group and storage system from database
consistencyGroup = dbClient.queryObject(BlockConsistencyGroup.class, consistencyGroupId);
}
if (consistencyGroup == null) {
_log.info(String.format("Skipping step addVolumesToConsistencyGroup: volumes %s do not reference a consistency group.", volumesIds.toString()));
return;
}
final StorageSystem storage = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
final SmisStorageDevice storageDevice = (SmisStorageDevice) ControllerServiceImpl.getBean(SmisCommandHelper.getSmisStorageDeviceName(storage));
// Add the new volumes to the consistency group unless:
// 1. The volume is a RP+VPlex target/journal backing volume
// 2. The volume does not have a ReplicationGroupInstance field
List<Volume> volumesToAddToCG = new ArrayList<Volume>();
String rgName = null;
for (URI volumeId : volumesIds) {
Volume volume = dbClient.queryObject(Volume.class, volumeId);
if (!RPHelper.isAssociatedToRpVplexType(volume, dbClient, PersonalityTypes.TARGET, PersonalityTypes.METADATA) && NullColumnValueGetter.isNotNullValue(volume.getReplicationGroupInstance())) {
rgName = volume.getReplicationGroupInstance();
volumesToAddToCG.add(volume);
} else {
_log.info(String.format("Skipping step addVolumesToConsistencyGroup: Volume %s (%s) does not reference an existing consistency group on array %s.", volume.getLabel(), volume.getId(), volume.getStorageController()));
}
}
if (volumesToAddToCG.isEmpty()) {
_log.info("Skipping step addVolumesToConsistencyGroup: Volumes are not part of a consistency group");
return;
}
storageDevice.addVolumesToConsistencyGroup(storage, consistencyGroup, volumesToAddToCG, rgName, getTaskCompleter());
} catch (Exception e) {
_log.error("Problem making SMI-S call: ", e);
ServiceError error = DeviceControllerErrors.smis.unableToCallStorageProvider(e.getMessage());
getTaskCompleter().error(jobContext.getDbClient(), error);
}
}
Aggregations