use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VNXeMaskingOrchestrator method exportGroupDelete.
@Override
public void exportGroupDelete(URI storageURI, URI exportGroupURI, String token) throws Exception {
try {
_log.info(String.format("exportGroupDelete start - Array: %s ExportMask: %s", storageURI.toString(), exportGroupURI.toString()));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
TaskCompleter taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
if (exportGroup == null || exportGroup.getInactive() || ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI).isEmpty()) {
taskCompleter.ready(_dbClient);
return;
}
/**
* If no export mask is found, nothing to be done. Task will be marked
* complete by the last real export mask delete completion.
*/
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupDelete", true, token);
String deleteStep = null;
for (ExportMask exportMask : exportMasks) {
refreshExportMask(storage, getDevice(), exportMask);
deleteStep = generateExportMaskDeleteWorkflow(workflow, deleteStep, storage, exportGroup, exportMask, null, null, null);
}
generateZoningDeleteWorkflow(workflow, deleteStep, exportGroup, exportMasks);
String successMessage = String.format("Export was successfully removed from StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
_log.info(String.format("exportGroupDelete end - Array: %s ExportMask: %s", storageURI.toString(), exportGroupURI.toString()));
} catch (Exception e) {
throw DeviceControllerException.exceptions.exportGroupDeleteFailed(e);
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VNXeMaskingOrchestrator method exportGroupCreate.
/**
* Create storage level masking components to support the requested
* ExportGroup object. ExportMask will be created for each host.
*
* @param storageURI
* - URI referencing underlying storage array
* @param exportGroupURI
* - URI referencing Bourne-level masking, ExportGroup
* @param initiatorURIs
* - List of Initiator URIs
* @param volumeMap
* - Map of Volume URIs to requested Integer URI
* @param token
* - Identifier for operation
* @throws Exception
*/
@Override
public void exportGroupCreate(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportOrchestrationTask taskCompleter = null;
try {
BlockStorageDevice device = getDevice();
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
_log.info("export_create: initiator list non-empty");
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token);
// Create two steps, one for Zoning, one for the ExportGroup actions.
// This step is for zoning. It is not specific to a single
// NetworkSystem, as it will look at all the initiators and targets and compute
// the zones required (which might be on multiple NetworkSystems.)
boolean createdSteps = determineExportGroupCreateSteps(workflow, null, device, storage, exportGroup, initiatorURIs, volumeMap, token);
String zoningStep = generateZoningCreateWorkflow(workflow, EXPORT_GROUP_MASKING_TASK, exportGroup, null, volumeMap);
if (createdSteps) {
// Execute the plan and allow the WorkflowExecutor to fire the
// taskCompleter.
String successMessage = String.format("ExportGroup successfully applied for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
}
} else {
_log.info("export_create: initiator list");
taskCompleter.ready(_dbClient);
}
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
// TODO add service code here
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
taskCompleter.error(_dbClient, serviceError);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VNXeMaskingOrchestrator method exportGroupRemoveInitiators.
@Override
public void exportGroupRemoveInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
ExportTaskCompleter taskCompleter = null;
try {
List<Initiator> initiators = _dbClient.queryObject(Initiator.class, initiatorURIs);
_log.info(String.format("exportRemoveInitiator start - Array: %s " + "ExportMask: %s Initiator: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(initiatorURIs)));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveInitiators", true, token);
/**
* export mask must exist since both volume & initiator exist
*/
Map<ExportMask, List<Initiator>> exportMasksMap = getInitiatorExportMasks(initiators, _dbClient, exportGroup, storageURI);
Map<URI, List<URI>> maskToInitiatorsMap = new HashMap<URI, List<URI>>();
for (Entry<ExportMask, List<Initiator>> entry : exportMasksMap.entrySet()) {
ExportMask mask = entry.getKey();
List<Initiator> inits = entry.getValue();
List<URI> initURIList = new ArrayList<URI>();
for (Initiator init : inits) {
initURIList.add(init.getId());
}
maskToInitiatorsMap.put(mask.getId(), initURIList);
}
String deleteStep = null;
for (ExportMask exportMask : exportMasksMap.keySet()) {
refreshExportMask(storage, getDevice(), exportMask);
List<Initiator> inits = exportMasksMap.get(exportMask);
if (exportMask.getInitiators().size() == inits.size() && exportMask.getVolumes() != null) {
_log.info(String.format("deleting the exportMask: %s", exportMask.getId().toString()));
// Initiator list (initiatorURIs) need to be provided when deleting export mask as a result of removing last initiators
deleteStep = generateExportMaskDeleteWorkflow(workflow, deleteStep, storage, exportGroup, exportMask, null, initiatorURIs, null);
} else {
Collection<URI> volumeURIs = (Collections2.transform(exportMask.getVolumes().keySet(), CommonTransformerFunctions.FCTN_STRING_TO_URI));
generateExportMaskRemoveInitiatorsWorkflow(workflow, deleteStep, storage, exportGroup, exportMask, new ArrayList<URI>(volumeURIs), initiatorURIs, true);
}
_log.info(String.format("exportRemoveInitiator end - Array: %s ExportMask: %s", storageURI.toString(), exportGroupURI.toString()));
}
generateZoningRemoveInitiatorsWorkflow(workflow, deleteStep, exportGroup, maskToInitiatorsMap);
String successMessage = String.format("Initiators successfully removed from export StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} catch (Exception e) {
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.exportGroupRemoveInitiatorsFailed(e);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class ScaleIOMaskingOrchestrator method exportGroupDelete.
@Override
public void exportGroupDelete(URI storageURI, URI exportGroupURI, String token) throws Exception {
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
List<ExportMask> masks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
if (masks != null && !masks.isEmpty()) {
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupDelete", true, token);
Map<URI, Integer> volumeMap = ExportUtils.getExportGroupVolumeMap(_dbClient, storage, exportGroup);
List<URI> volumeURIs = new ArrayList<>(volumeMap.keySet());
List<URI> initiatorURIs = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
Map<URI, Map<URI, Integer>> exportMaskToVolumeCount = ExportMaskUtils.mapExportMaskToVolumeShareCount(_dbClient, volumeURIs, initiatorURIs);
for (ExportMask exportMask : masks) {
List<URI> exportGroupURIs = new ArrayList<>();
if (!ExportUtils.isExportMaskShared(_dbClient, exportMask.getId(), exportGroupURIs)) {
log.info(String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
generateExportMaskDeleteWorkflow(workflow, null, storage, exportGroup, exportMask, null, null, null);
} else {
Map<URI, Integer> volumeToExportGroupCount = exportMaskToVolumeCount.get(exportMask.getId());
List<URI> volumesToRemove = new ArrayList<>();
for (URI uri : volumeMap.keySet()) {
if (volumeToExportGroupCount == null) {
continue;
}
// Remove the volume only if it is not shared with
// more than 1 ExportGroup
Integer numExportGroupsVolumeIsIn = volumeToExportGroupCount.get(uri);
if (numExportGroupsVolumeIsIn != null && numExportGroupsVolumeIsIn == 1) {
volumesToRemove.add(uri);
}
}
if (!volumesToRemove.isEmpty()) {
log.info(String.format("Adding step to remove volumes %s from ExportMask %s", Joiner.on(',').join(volumesToRemove), exportMask.getMaskName()));
generateExportMaskRemoveVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, volumesToRemove, null, null);
}
}
}
String successMessage = String.format("ExportGroup delete successfully completed for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupDelete", dex.getMessage()));
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupDelete", ex.getMessage()));
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class ScaleIOMaskingOrchestrator method exportGroupAddInitiators.
@Override
public void exportGroupAddInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
/*
* foreach ExportGroup.volume
* if ScaleIO volume
* foreach initiator
* scli map --volume volid --sdc initiator.sdcid
*/
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddInitiators", true, token);
Map<String, URI> portNameToInitiatorURI = new HashMap<>();
List<URI> hostURIs = new ArrayList<>();
List<String> portNames = new ArrayList<>();
// Populate the portNames and the mapping of the portNames to Initiator URIs
processInitiators(exportGroup, initiatorURIs, portNames, portNameToInitiatorURI, hostURIs);
Map<URI, Integer> volumesToAdd = ExportUtils.getExportGroupVolumeMap(_dbClient, storage, exportGroup);
List<URI> initiatorURIsToPlace = new ArrayList<>(initiatorURIs);
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorURIs);
Set<URI> partialMasks = new HashSet<>();
Map<String, Set<URI>> initiatorToExport = determineInitiatorToExportMaskPlacements(exportGroup, storageURI, computeResourceToInitiators, Collections.EMPTY_MAP, portNameToInitiatorURI, partialMasks);
Map<URI, List<URI>> exportToInitiators = toExportMaskToInitiatorURIs(initiatorToExport, portNameToInitiatorURI);
for (Map.Entry<URI, List<URI>> toAddInitiators : exportToInitiators.entrySet()) {
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toAddInitiators.getKey());
if (exportMask == null || exportMask.getInactive()) {
continue;
}
for (URI toAddInitiator : toAddInitiators.getValue()) {
if (!exportMask.hasInitiator(toAddInitiator.toString())) {
log.info(String.format("Add step to add initiator %s to ExportMask %s", toAddInitiator.toString(), exportMask.getMaskName()));
generateExportMaskAddInitiatorsWorkflow(workflow, null, storage, exportGroup, exportMask, toAddInitiators.getValue(), null, null);
} else if (volumesToAdd != null && volumesToAdd.size() > 0) {
log.info(String.format("Add step to add volumes %s to ExportMask %s", Joiner.on(',').join(volumesToAdd.entrySet()), exportMask.getMaskName()));
generateExportMaskAddVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, volumesToAdd, null);
}
initiatorURIsToPlace.remove(toAddInitiator);
}
}
// previously, add them now.
if (!initiatorURIsToPlace.isEmpty() && volumesToAdd != null) {
Map<String, List<URI>> newComputeResources = mapInitiatorsToComputeResource(exportGroup, initiatorURIsToPlace);
log.info(String.format("Need to create ExportMasks for these compute resources %s", Joiner.on(',').join(newComputeResources.entrySet())));
for (Map.Entry<String, List<URI>> toCreate : newComputeResources.entrySet()) {
generateExportMaskCreateWorkflow(workflow, null, storage, exportGroup, toCreate.getValue(), volumesToAdd, null);
}
}
String successMessage = String.format("ExportGroup add initiators successfully applied for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupAddInitiators", dex.getMessage()));
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
taskCompleter.error(_dbClient, DeviceControllerErrors.scaleio.encounteredAnExceptionFromScaleIOOperation("exportGroupAddInitiators", ex.getMessage()));
}
}
Aggregations