use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class HDSMaskingOrchestrator method exportGroupAddVolumes.
@Override
public void exportGroupAddVolumes(URI storageURI, URI exportGroupURI, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportOrchestrationTask taskCompleter = null;
try {
BlockStorageDevice device = getDevice();
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
boolean anyVolumesAdded = false;
boolean createdNewMask = false;
if (exportGroup != null && exportGroup.getExportMasks() != null) {
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddVolumes", true, token);
List<ExportMask> exportMasksToZoneAddVolumes = new ArrayList<ExportMask>();
List<URI> volumesToZoneAddVolumes = new ArrayList<URI>();
List<URI> exportMasksToZoneCreate = new ArrayList<URI>();
Map<URI, Integer> volumesToZoneCreate = new HashMap<URI, Integer>();
// Add the volume to all the ExportMasks that are contained in the
// ExportGroup. The volumes should be added only if they don't
// already exist for the ExportMask.
Collection<URI> initiatorURIs = Collections2.transform(exportGroup.getInitiators(), CommonTransformerFunctions.FCTN_STRING_TO_URI);
List<URI> hostURIs = new ArrayList<URI>();
Map<String, URI> portNameToInitiatorURI = new HashMap<String, URI>();
List<String> portNames = new ArrayList<String>();
processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);
// We always want to have the full list of initiators for the hosts involved in
// this export. This will allow the export operation to always find any
// existing exports for a given host.
queryHostInitiatorsAndAddToList(portNames, portNameToInitiatorURI, initiatorURIs, hostURIs);
Map<String, Set<URI>> foundMatches = device.findExportMasks(storage, portNames, false);
Set<String> checkMasks = mergeWithExportGroupMaskURIs(exportGroup, foundMatches.values());
Set<String> storagePortURIsAssociatedWithVArrayAndStorageArray = ExportMaskUtils.getStoragePortUrisAssociatedWithVarrayAndStorageArray(storageURI, exportGroup.getVirtualArray(), _dbClient);
for (String maskURIStr : checkMasks) {
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, URI.create(maskURIStr));
// Check if there are any storage ports in the mask which are part of varray, if not found discard this mask
if (Sets.intersection(storagePortURIsAssociatedWithVArrayAndStorageArray, exportMask.getStoragePorts()).isEmpty()) {
continue;
}
_log.info(String.format("Checking mask %s", exportMask.getMaskName()));
if (!exportMask.getInactive() && exportMask.getStorageDevice().equals(storageURI)) {
exportMask = device.refreshExportMask(storage, exportMask);
// BlockStorageDevice level, so that it has up-to-date
// info from the array
Map<URI, Integer> volumesToAdd = getVolumesToAdd(volumeMap, exportMask, exportGroup, token);
// Not able to get VolumesToAdd due to error condition so, return
if (null == volumesToAdd) {
return;
}
_log.info(String.format("Mask %s, adding volumes %s", exportMask.getMaskName(), Joiner.on(',').join(volumesToAdd.entrySet())));
if (volumesToAdd.size() > 0) {
exportMasksToZoneAddVolumes.add(exportMask);
volumesToZoneAddVolumes.addAll(volumesToAdd.keySet());
// Make sure the zoning map is getting updated for user-created masks
updateZoningMap(exportGroup, exportMask, true);
generateExportMaskAddVolumesWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, exportMask, volumesToAdd, null);
anyVolumesAdded = true;
// associated it with the ExportGroup.
if (!exportGroup.hasMask(exportMask.getId())) {
exportGroup.addExportMask(exportMask.getId());
_dbClient.updateObject(exportGroup);
}
}
}
}
if (!anyVolumesAdded) {
// masks and if there are initiators for the export.
if (!ExportMaskUtils.hasExportMaskForStorageAndVArray(_dbClient, exportGroup, storageURI) && exportGroup.hasInitiators()) {
_log.info("No existing masks to which the requested volumes can be added. Creating a new mask");
List<URI> initiators = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
Map<String, List<URI>> hostInitiatorMap = mapInitiatorsToComputeResource(exportGroup, initiators);
if (!hostInitiatorMap.isEmpty()) {
for (Map.Entry<String, List<URI>> resourceEntry : hostInitiatorMap.entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
_log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
exportMasksToZoneCreate.add(result.getMaskURI());
volumesToZoneCreate.putAll(volumeMap);
}
createdNewMask = true;
}
}
}
if (!exportMasksToZoneAddVolumes.isEmpty()) {
generateZoningAddVolumesWorkflow(workflow, null, exportGroup, exportMasksToZoneAddVolumes, volumesToZoneAddVolumes);
}
if (!exportMasksToZoneCreate.isEmpty()) {
generateZoningCreateWorkflow(workflow, null, exportGroup, exportMasksToZoneCreate, volumesToZoneCreate);
}
String successMessage = String.format("Successfully added volumes to export on StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
if (exportGroup.hasInitiators()) {
_log.info("There are no masks for this export. Need to create anew.");
List<URI> initiatorURIs = new ArrayList<URI>();
for (String initiatorURIStr : exportGroup.getInitiators()) {
initiatorURIs.add(URI.create(initiatorURIStr));
}
// Invoke the export group create operation,
// which should in turn create a workflow operations to
// create the export for the newly added volume(s).
exportGroupCreate(storageURI, exportGroupURI, initiatorURIs, volumeMap, token);
anyVolumesAdded = true;
} else {
_log.warn("There are no initiator for export group: " + exportGroup.getLabel());
}
}
if (!anyVolumesAdded && !createdNewMask) {
taskCompleter.ready(_dbClient);
_log.info("No volumes pushed to array because either they already exist " + "or there were no initiators added to the export yet.");
}
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
// TODO add service code here
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
taskCompleter.error(_dbClient, serviceError);
}
}
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class HDSMaskingOrchestrator method exportGroupCreate.
/**
* Create storage level masking components to support the requested
* ExportGroup object. This operation will be flexible enough to take into
* account initiators that are in some already existent in some
* StorageGroup. In such a case, the underlying masking component will be
* "adopted" by the ExportGroup. Further operations against the "adopted"
* mask will only allow for addition and removal of those initiators/volumes
* that were added by a Bourne request. Existing initiators/volumes will be
* maintained.
*
* @param storageURI
* - URI referencing underlying storage array
* @param exportGroupURI
* - URI referencing Bourne-level masking, ExportGroup
* @param initiatorURIs
* - List of Initiator URIs
* @param volumeMap
* - Map of Volume URIs to requested Integer URI
* @param token
* - Identifier for operation
* @throws Exception
*/
@Override
public void exportGroupCreate(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportOrchestrationTask taskCompleter = null;
try {
BlockStorageDevice device = getDevice();
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
_log.info("export_create: initiator list non-empty");
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token);
// Create two steps, one for Zoning, one for the ExportGroup actions.
// This step is for zoning. It is not specific to a single
// NetworkSystem, as it will look at all the initiators and targets and compute
// the zones required (which might be on multiple NetworkSystems.)
boolean createdSteps = determineExportGroupCreateSteps(workflow, null, device, storage, exportGroup, initiatorURIs, volumeMap, false, token);
String zoningStep = generateDeviceSpecificZoningCreateWorkflow(workflow, EXPORT_GROUP_MASKING_TASK, exportGroup, null, volumeMap);
if (createdSteps && null != zoningStep) {
// Execute the plan and allow the WorkflowExecutor to fire the
// taskCompleter.
String successMessage = String.format("ExportGroup successfully applied for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
}
} else {
_log.info("export_create: initiator list");
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
if (taskCompleter != null) {
taskCompleter.error(_dbClient, DeviceControllerException.errors.vmaxExportGroupCreateError(dex.getMessage()));
}
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
// TODO add service code here
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
taskCompleter.error(_dbClient, serviceError);
}
}
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class MaskingWorkflowEntryPoints method doExportGroupRemoveVolumesCleanup.
public void doExportGroupRemoveVolumesCleanup(URI storageURI, URI exportGroupURI, List<URI> volumeURIs, List<URI> initiatorURIs, TaskCompleter taskCompleter, String token) throws ControllerException {
String call = String.format("doExportGroupRemoveVolumesCleanup(%s, %s, [%s], [%s], %s)", storageURI.toString(), exportGroupURI.toString(), volumeURIs != null ? Joiner.on(',').join(volumeURIs) : "No Volumes", initiatorURIs != null ? Joiner.on(',').join(initiatorURIs) : "No Initiators", taskCompleter.getOpId());
try {
WorkflowStepCompleter.stepExecuting(token);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
exportGroup.removeVolumes(volumeURIs);
// export group, delete the export group automatically.
if ((exportGroup.checkInternalFlags(Flag.INTERNAL_OBJECT)) && (exportGroup == null || exportGroup.getExportMasks() == null || exportGroup.getExportMasks().isEmpty())) {
_dbClient.markForDeletion(exportGroup);
} else {
_dbClient.updateObject(exportGroup);
}
taskCompleter.ready(_dbClient);
_log.info(String.format("%s end", call));
} catch (final Exception e) {
_log.info(call + " Encountered an exception", e);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
taskCompleter.error(_dbClient, serviceError);
}
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class BlockDeviceExportController method exportGroupChangePortGroup.
@Override
public void exportGroupChangePortGroup(URI systemURI, URI exportGroupURI, URI newPortGroupURI, List<URI> exportMaskURIs, boolean waitForApproval, String opId) {
_log.info("Received request for change port group. Creating master workflow.");
ExportTaskCompleter taskCompleter = new ExportOrchestrationUpdateTaskCompleter(exportGroupURI, opId);
Workflow workflow = null;
try {
workflow = _wfUtils.newWorkflow("exportChangePortGroup", false, opId);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
if (exportGroup == null || exportGroup.getExportMasks() == null) {
_log.info("No export group or export mask");
taskCompleter.ready(_dbClient);
return;
}
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, systemURI);
if (exportMasks == null || exportMasks.isEmpty()) {
_log.info(String.format("No export mask found for this system %s", systemURI));
taskCompleter.ready(_dbClient);
return;
}
// Acquire all necessary locks for the workflow:
// For each export group lock initiator's hosts and storage array keys.
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), StringSetUtil.stringSetToUriList(exportGroup.getInitiators()), systemURI);
boolean acquiredLocks = _wfUtils.getWorkflowService().acquireWorkflowLocks(workflow, lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
if (!acquiredLocks) {
_log.error("Change port group could not acquire locks");
ServiceError serviceError = DeviceControllerException.errors.jobFailedOpMsg("change port group", "Could not acquire workflow lock");
taskCompleter.error(_dbClient, serviceError);
return;
}
_wfUtils.generateExportGroupChangePortWorkflow(workflow, "change port group", exportGroupURI, newPortGroupURI, exportMaskURIs, waitForApproval);
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The updateExportWorkflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
workflow.executePlan(taskCompleter, "Update the export group on all storage systems successfully.");
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception e) {
_log.error("Unexpected exception: ", e);
if (workflow != null) {
_wfUtils.getWorkflowService().releaseAllWorkflowLocks(workflow);
}
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
taskCompleter.error(_dbClient, serviceError);
}
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class CephMaskingOrchestrator method exportGroupDelete.
@Override
public void exportGroupDelete(URI storageURI, URI exportGroupURI, String token) throws Exception {
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
List<ExportMask> masks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
if (masks != null && !masks.isEmpty()) {
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupDelete", true, token, taskCompleter);
Map<URI, Integer> volumeMap = ExportUtils.getExportGroupVolumeMap(_dbClient, storage, exportGroup);
List<URI> volumeURIs = new ArrayList<>(volumeMap.keySet());
List<URI> initiatorURIs = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
Map<URI, Map<URI, Integer>> exportMaskToVolumeCount = ExportMaskUtils.mapExportMaskToVolumeShareCount(_dbClient, volumeURIs, initiatorURIs);
for (ExportMask exportMask : masks) {
List<URI> exportGroupURIs = new ArrayList<>();
if (!ExportUtils.isExportMaskShared(_dbClient, exportMask.getId(), exportGroupURIs)) {
_log.info(String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
generateExportMaskDeleteWorkflow(workflow, null, storage, exportGroup, exportMask, null, null, taskCompleter);
} else {
Map<URI, Integer> volumeToExportGroupCount = exportMaskToVolumeCount.get(exportMask.getId());
List<URI> volumesToRemove = new ArrayList<>();
for (URI uri : volumeMap.keySet()) {
if (volumeToExportGroupCount == null) {
continue;
}
// Remove the volume only if it is not shared with
// more than 1 ExportGroup
Integer numExportGroupsVolumeIsIn = volumeToExportGroupCount.get(uri);
if (numExportGroupsVolumeIsIn != null && numExportGroupsVolumeIsIn == 1) {
volumesToRemove.add(uri);
}
}
if (!volumesToRemove.isEmpty()) {
_log.info(String.format("Adding step to remove volumes %s from ExportMask %s", Joiner.on(',').join(volumesToRemove), exportMask.getMaskName()));
generateExportMaskRemoveVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, volumesToRemove, null, taskCompleter);
}
}
}
String successMessage = String.format("ExportGroup delete successfully completed for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
taskCompleter.error(_dbClient, DeviceControllerErrors.ceph.operationFailed("exportGroupDelete", dex.getMessage()));
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
taskCompleter.error(_dbClient, DeviceControllerErrors.ceph.operationFailed("exportGroupDelete", ex.getMessage()));
}
}
Aggregations