use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class ScaleIOMaskingOrchestrator method exportGroupCreate.
@Override
public void exportGroupCreate(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token);
// Create a mapping of ExportMasks to Add Volumes to or
// add to a list of new Exports to create
Map<URI, Map<URI, Integer>> exportMaskToVolumesToAdd = new HashMap<>();
List<URI> newInitiators = new ArrayList<>();
List<Initiator> initiators = _dbClient.queryObject(Initiator.class, initiatorURIs);
for (Initiator initiator : initiators) {
List<ExportMask> exportMasks = ExportUtils.getInitiatorExportMasks(initiator, _dbClient);
if (exportMasks == null || exportMasks.isEmpty()) {
newInitiators.add(initiator.getId());
} else {
for (ExportMask exportMask : exportMasks) {
exportMaskToVolumesToAdd.put(exportMask.getId(), volumeMap);
}
}
}
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, newInitiators);
log.info(String.format("Need to create ExportMasks for these compute resources %s", Joiner.on(',').join(computeResourceToInitiators.entrySet())));
// there aren't any already existing ExportMask for them
for (Map.Entry<String, List<URI>> toCreate : computeResourceToInitiators.entrySet()) {
generateExportMaskCreateWorkflow(workflow, null, storage, exportGroup, toCreate.getValue(), volumeMap, token);
}
log.info(String.format("Need to add volumes for these ExportMasks %s", exportMaskToVolumesToAdd.entrySet()));
// concept ExportMasks.
for (Map.Entry<URI, Map<URI, Integer>> toAddVolumes : exportMaskToVolumesToAdd.entrySet()) {
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toAddVolumes.getKey());
generateExportMaskAddVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, toAddVolumes.getValue(), null);
}
String successMessage = String.format("ExportGroup successfully applied for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupCreate", dex.getMessage()));
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupCreate", ex.getMessage()));
}
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class ScaleIOMaskingOrchestrator method exportGroupRemoveInitiators.
@Override
public void exportGroupRemoveInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
/*
* foreach ScaleOI volume in ExportGroup
* foreach initiator in list
* if volume not used in another ExportGroup with same initiator
* scli unmap --volume volid --sdc initiator.sdcid
*/
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
if (initiatorURIs != null && !initiatorURIs.isEmpty() && exportGroup != null && exportGroup.getExportMasks() != null) {
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveInitiators", true, token);
// Create a mapping of ExportMask URI to initiators to remove
Map<URI, List<URI>> exportToInitiatorsToRemove = new HashMap<>();
Map<URI, List<URI>> exportToVolumesToRemove = new HashMap<>();
Map<URI, Integer> volumeMap = null;
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup);
for (ExportMask exportMask : exportMasks) {
if (exportMask == null) {
continue;
}
for (URI initiatorURI : initiatorURIs) {
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
if (initiator == null || !exportMask.hasInitiator(initiatorURI.toString())) {
continue;
}
if (ExportUtils.getInitiatorExportGroups(initiator, _dbClient).size() == 1) {
List<URI> initiators = exportToInitiatorsToRemove.get(exportGroupURI);
if (initiators == null) {
initiators = new ArrayList<>();
exportToInitiatorsToRemove.put(exportMask.getId(), initiators);
}
initiators.add(initiatorURI);
} else {
if (volumeMap == null) {
volumeMap = ExportUtils.getExportGroupVolumeMap(_dbClient, storage, exportGroup);
}
List<URI> volumeURIs = exportToVolumesToRemove.get(exportGroupURI);
if (volumeURIs == null) {
volumeURIs = new ArrayList<>();
exportToVolumesToRemove.put(exportMask.getId(), volumeURIs);
}
for (URI volumeURI : volumeMap.keySet()) {
// Only add to the remove list for the ExportMask if
// the EM is not being shared with another ExportGroup
Integer count = ExportUtils.getNumberOfExportGroupsWithVolume(initiator, volumeURI, _dbClient);
if (count == 1) {
volumeURIs.add(volumeURI);
}
}
}
}
}
// Generate the remove initiators steps for the entries that were determined above
for (Map.Entry<URI, List<URI>> toRemoveInits : exportToInitiatorsToRemove.entrySet()) {
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toRemoveInits.getKey());
if (exportMask != null) {
List<URI> removeInitURIs = toRemoveInits.getValue();
List<String> exportMaskInitiatorURIs = new ArrayList<>(exportMask.getInitiators());
for (URI uri : removeInitURIs) {
exportMaskInitiatorURIs.remove(uri.toString());
}
if (exportMaskInitiatorURIs.isEmpty()) {
log.info(String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
generateExportMaskDeleteWorkflow(workflow, null, storage, exportGroup, exportMask, null, null, null);
} else {
log.info(String.format("Adding step to remove initiators %s from ExportMask %s", Joiner.on(',').join(removeInitURIs), exportMask.getMaskName()));
generateExportMaskRemoveInitiatorsWorkflow(workflow, null, storage, exportGroup, exportMask, null, removeInitURIs, true);
}
}
}
// from an ExportGroup that contains more than one host/initiator
for (Map.Entry<URI, List<URI>> toRemoveVols : exportToVolumesToRemove.entrySet()) {
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toRemoveVols.getKey());
List<URI> removeVolumeURIs = toRemoveVols.getValue();
if (exportMask != null && !removeVolumeURIs.isEmpty()) {
List<String> exportMaskVolumeURIs = new ArrayList<>(exportMask.getVolumes().keySet());
for (URI uri : removeVolumeURIs) {
exportMaskVolumeURIs.remove(uri.toString());
}
if (exportMaskVolumeURIs.isEmpty()) {
log.info(String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
generateExportMaskDeleteWorkflow(workflow, null, storage, exportGroup, exportMask, null, null, null);
} else {
log.info(String.format("Adding step to remove volumes %s from ExportMask %s", Joiner.on(',').join(removeVolumeURIs), exportMask.getMaskName()));
generateExportMaskRemoveVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, removeVolumeURIs, null, null);
}
}
}
String successMessage = String.format("ExportGroup remove initiators successfully applied for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupRemoveInitiators", dex.getMessage()));
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupRemoveInitiators", ex.getMessage()));
}
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class VPlexHDSMaskingOrchestrator method createOrAddVolumesToExportMask.
/**
* Create an ExportMask on the HDS if it does not exist. Otherwise, just add the indicated
* volumes to the ExportMask.
*/
@Override
public void createOrAddVolumesToExportMask(URI arrayURI, URI exportGroupURI, URI exportMaskURI, Map<URI, Integer> volumeMap, List<URI> initiatorURIs, TaskCompleter completer, String stepId) {
try {
WorkflowStepCompleter.stepExecuting(stepId);
StorageSystem array = _dbClient.queryObject(StorageSystem.class, arrayURI);
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
// If the exportMask isn't found, or has been deleted, fail, ask user to retry.
if (exportMask == null || exportMask.getInactive()) {
_log.info(String.format("ExportMask %s deleted or inactive, failing", exportMaskURI));
ServiceError svcerr = VPlexApiException.errors.createBackendExportMaskDeleted(exportMaskURI.toString(), arrayURI.toString());
WorkflowStepCompleter.stepFailed(stepId, svcerr);
return;
}
// Protect concurrent operations by locking {host, array} dupple.
// Lock will be released when workflow step completes.
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroupType.Host, StringSetUtil.stringSetToUriList(exportMask.getInitiators()), arrayURI);
getWorkflowService().acquireWorkflowStepLocks(stepId, lockKeys, LockTimeoutValue.get(LockType.VPLEX_BACKEND_EXPORT));
// Fetch the Initiators
List<Initiator> initiators = new ArrayList<Initiator>();
for (String initiatorId : exportMask.getInitiators()) {
Initiator initiator = _dbClient.queryObject(Initiator.class, URI.create(initiatorId));
if (initiator != null) {
initiators.add(initiator);
}
}
// Refresh the ExportMask
BlockStorageDevice device = _blockController.getDevice(array.getSystemType());
exportMask = refreshExportMask(array, device, exportMask);
if (!exportMask.hasAnyVolumes()) {
// We are creating this ExportMask on the hardware! (Maybe not the first time though...)
// Fetch the targets
List<URI> targets = new ArrayList<URI>();
for (String targetId : exportMask.getStoragePorts()) {
targets.add(URI.create(targetId));
}
device.doExportCreate(array, exportMask, volumeMap, initiators, targets, completer);
} else {
device.doExportAddVolumes(array, exportMask, initiators, volumeMap, completer);
}
} catch (Exception ex) {
_log.error("Failed to create or add volumes to export mask for hds: ", ex);
VPlexApiException vplexex = DeviceControllerExceptions.vplex.addStepsForCreateVolumesFailed(ex);
WorkflowStepCompleter.stepFailed(stepId, vplexex);
}
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class VPlexHDSMaskingOrchestrator method deleteOrRemoveVolumesFromExportMask.
@Override
public void deleteOrRemoveVolumesFromExportMask(URI arrayURI, URI exportGroupURI, URI exportMaskURI, List<URI> volumes, List<URI> initiatorURIs, String stepId) {
ExportTaskCompleter completer = null;
try {
completer = new ExportMaskOnlyRemoveVolumeCompleter(exportGroupURI, exportMaskURI, volumes, stepId);
WorkflowStepCompleter.stepExecuting(stepId);
StorageSystem array = _dbClient.queryObject(StorageSystem.class, arrayURI);
BlockStorageDevice device = _blockController.getDevice(array.getSystemType());
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
// If the exportMask isn't found, or has been deleted, nothing to do.
if (exportMask == null || exportMask.getInactive()) {
_log.info(String.format("ExportMask %s inactive, returning success", exportMaskURI));
completer.ready(_dbClient);
return;
}
// Protect concurrent operations by locking {host, array} dupple.
// Lock will be released when workflow step completes.
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroupType.Host, StringSetUtil.stringSetToUriList(exportMask.getInitiators()), arrayURI);
getWorkflowService().acquireWorkflowStepLocks(stepId, lockKeys, LockTimeoutValue.get(LockType.VPLEX_BACKEND_EXPORT));
// Refresh the ExportMask
exportMask = refreshExportMask(array, device, exportMask);
// Determine if we're deleting the last volume in the mask.
StringMap maskVolumesMap = exportMask.getVolumes();
Set<String> remainingVolumes = new HashSet<String>();
List<URI> passedVolumesInMask = new ArrayList<>(volumes);
if (maskVolumesMap != null) {
remainingVolumes.addAll(maskVolumesMap.keySet());
}
for (URI volume : volumes) {
remainingVolumes.remove(volume.toString());
// are not in the mask to handle this condition.
if ((maskVolumesMap != null) && (!maskVolumesMap.keySet().contains(volume.toString()))) {
passedVolumesInMask.remove(volume);
}
}
_log.info("passedVolumesInMask : {}", passedVolumesInMask);
_log.info("remainingVolumes : {}", remainingVolumes);
// None of the volumes is in the export mask, so we are done.
if (passedVolumesInMask.isEmpty()) {
_log.info("None of these volumes {} are in export mask {}", volumes, exportMask.forDisplay());
completer.ready(_dbClient);
return;
}
// If it is last volume and there are no existing volumes, delete the ExportMask.
if (remainingVolumes.isEmpty() && !exportMask.hasAnyExistingVolumes()) {
device.doExportDelete(array, exportMask, passedVolumesInMask, initiatorURIs, completer);
} else {
List<Initiator> initiators = null;
if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
initiators = _dbClient.queryObject(Initiator.class, initiatorURIs);
}
device.doExportRemoveVolumes(array, exportMask, passedVolumesInMask, initiators, completer);
}
} catch (Exception ex) {
_log.error("Failed to delete or remove volumes to export mask for hds: ", ex);
VPlexApiException vplexex = DeviceControllerExceptions.vplex.addStepsForCreateVolumesFailed(ex);
completer.error(_dbClient, vplexex);
}
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class VPlexVmaxMaskingOrchestrator method deleteOrRemoveVolumesFromExportMask.
@Override
public void deleteOrRemoveVolumesFromExportMask(URI arrayURI, URI exportGroupURI, URI exportMaskURI, List<URI> volumes, List<URI> initiatorURIs, String stepId) {
ExportTaskCompleter completer = null;
try {
completer = new ExportMaskOnlyRemoveVolumeCompleter(exportGroupURI, exportMaskURI, volumes, stepId);
WorkflowStepCompleter.stepExecuting(stepId);
StorageSystem array = _dbClient.queryObject(StorageSystem.class, arrayURI);
BlockStorageDevice device = _blockController.getDevice(array.getSystemType());
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskURI);
// If the exportMask isn't found, or has been deleted, nothing to do.
if (exportMask == null || exportMask.getInactive()) {
_log.info(String.format("ExportMask %s inactive, returning success", exportMaskURI));
completer.ready(_dbClient);
return;
}
// Protect concurrent operations by locking {host, array} dupple.
// Lock will be released when workflow step completes.
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroupType.Host, StringSetUtil.stringSetToUriList(exportMask.getInitiators()), arrayURI);
getWorkflowService().acquireWorkflowStepLocks(stepId, lockKeys, LockTimeoutValue.get(LockType.VPLEX_BACKEND_EXPORT));
// Refresh the ExportMask
exportMask = refreshExportMask(array, device, exportMask);
// Determine if we're deleting the last volume in the mask.
StringMap maskVolumesMap = exportMask.getVolumes();
Set<String> remainingVolumes = new HashSet<String>();
List<URI> passedVolumesInMask = new ArrayList<>(volumes);
if (maskVolumesMap != null) {
remainingVolumes.addAll(maskVolumesMap.keySet());
}
for (URI volume : volumes) {
remainingVolumes.remove(volume.toString());
// are not in the mask to handle this condition.
if ((maskVolumesMap != null) && (!maskVolumesMap.keySet().contains(volume.toString()))) {
passedVolumesInMask.remove(volume);
}
}
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_043);
// None of the volumes is in the export mask, so we are done.
if (passedVolumesInMask.isEmpty()) {
_log.info("None of these volumes {} are in export mask {}", volumes, exportMask.forDisplay());
completer.ready(_dbClient);
return;
}
// If it is last volume and there are no existing volumes, delete the ExportMask.
if (remainingVolumes.isEmpty() && !exportMask.hasAnyExistingVolumes()) {
device.doExportDelete(array, exportMask, passedVolumesInMask, initiatorURIs, completer);
} else {
List<Initiator> initiators = null;
if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
initiators = _dbClient.queryObject(Initiator.class, initiatorURIs);
}
device.doExportRemoveVolumes(array, exportMask, passedVolumesInMask, initiators, completer);
}
} catch (Exception ex) {
_log.error("Failed to delete or remove volumes to export mask for vmax: ", ex);
VPlexApiException vplexex = DeviceControllerExceptions.vplex.addStepsForCreateVolumesFailed(ex);
completer.error(_dbClient, vplexex);
}
}
Aggregations