use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VNXeMaskingOrchestrator method exportGroupAddInitiators.
@Override
public void exportGroupAddInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
TaskCompleter taskCompleter = null;
try {
_log.info(String.format("exportAddInitiator start - Array: %s ExportMask: " + "%s Initiator: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(initiatorURIs)));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorURIs);
_log.info("initiators : {}", Joiner.on(",").join(computeResourceToInitiators.entrySet()));
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
Map<URI, Integer> volumes = selectExportMaskVolumes(exportGroup, storageURI);
_log.info("Volumes : {}", Joiner.on(",").join(volumes.keySet()));
if (!CollectionUtils.isEmpty(exportMasks)) {
// Refresh all export masks
for (ExportMask exportMask : exportMasks) {
refreshExportMask(storage, getDevice(), exportMask);
}
// find the export mask which has the same Host name as the initiator
// Add the initiator to that export mask
// Set up workflow steps.
_log.info("Creating AddInitiators workFlow");
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddInitiators", true, token, taskCompleter);
// irrespective of cluster name, host will be always present
Map<String, URI> hostToEMaskGroup = ExportMaskUtils.mapHostToExportMask(_dbClient, exportGroup, storage.getId());
_log.info("hostsToExportMask : {}", Joiner.on(",").join(hostToEMaskGroup.entrySet()));
// if export masks are found for the Host, then add initiators to the export mask
Map<URI, List<URI>> masksToInitiators = new HashMap<URI, List<URI>>();
String addIniStep = null;
for (String computeKey : computeResourceToInitiators.keySet()) {
URI exportMaskUri = hostToEMaskGroup.get(computeKey);
if (null != exportMaskUri) {
_log.info("Processing export mask {}", exportMaskUri);
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskUri);
if (exportMask.getStorageDevice().equals(storageURI)) {
_log.info("Processing export mask {} with expected storage {}", exportMaskUri, storageURI);
// AddInitiatorWorkFlow
masksToInitiators.put(exportMaskUri, computeResourceToInitiators.get(computeKey));
// all masks will be always created by system = true, hence port allocation will happen
addIniStep = generateExportMaskAddInitiatorsWorkflow(workflow, null, storage, exportGroup, exportMask, initiatorURIs, null, token);
computeResourceToInitiators.remove(computeKey);
}
if (!masksToInitiators.isEmpty()) {
generateZoningAddInitiatorsWorkflow(workflow, addIniStep, exportGroup, masksToInitiators);
}
}
}
_log.info("Left out initiators : {}", Joiner.on(",").join(computeResourceToInitiators.entrySet()));
// left out initiator's Host which doesn't have any export mask.
Map<URI, Map<URI, Integer>> zoneNewMasksToVolumeMap = new HashMap<URI, Map<URI, Integer>>();
if (!computeResourceToInitiators.isEmpty()) {
for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
_log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, computeInitiatorURIs, volumes, token);
zoneNewMasksToVolumeMap.put(result.getMaskURI(), volumes);
}
if (!zoneNewMasksToVolumeMap.isEmpty()) {
List<URI> exportMaskList = new ArrayList<URI>();
exportMaskList.addAll(zoneNewMasksToVolumeMap.keySet());
Map<URI, Integer> overallVolumeMap = new HashMap<URI, Integer>();
for (Map<URI, Integer> oneVolumeMap : zoneNewMasksToVolumeMap.values()) {
overallVolumeMap.putAll(oneVolumeMap);
}
generateZoningCreateWorkflow(workflow, null, exportGroup, exportMaskList, overallVolumeMap);
}
}
String successMessage = String.format("Initiators successfully added to export StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
_log.info("export_initiator_add: first initiator, creating a new export");
// No existing export masks available inexport Group
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token, taskCompleter);
List<URI> exportMasksToZoneCreate = new ArrayList<URI>();
Map<URI, Integer> volumesToZoneCreate = new HashMap<URI, Integer>();
for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
_log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, computeInitiatorURIs, volumes, token);
exportMasksToZoneCreate.add(result.getMaskURI());
volumesToZoneCreate.putAll(volumes);
}
if (!exportMasksToZoneCreate.isEmpty()) {
generateZoningCreateWorkflow(workflow, null, exportGroup, exportMasksToZoneCreate, volumesToZoneCreate);
}
String successMessage = String.format("Initiators successfully added to export StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
}
_log.info(String.format("exportAddInitiator end - Array: %s ExportMask: %s " + "Initiator: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(initiatorURIs)));
} catch (Exception e) {
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.exportGroupAddInitiatorsFailed(e);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VNXeMaskingOrchestrator method exportGroupAddVolumes.
@Override
public void exportGroupAddVolumes(URI storageURI, URI exportGroupURI, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportTaskCompleter taskCompleter = null;
try {
_log.info(String.format("exportAddVolume start - Array: %s ExportMask: %s Volume: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(volumeMap.entrySet())));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
if (!CollectionUtils.isEmpty(exportMasks)) {
// refresh all export masks
for (ExportMask exportMask : exportMasks) {
refreshExportMask(storage, getDevice(), exportMask);
}
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddVolumes - Added volumes to existing mask", true, token);
List<URI> volumeURIs = new ArrayList<URI>();
volumeURIs.addAll(volumeMap.keySet());
Collection<URI> initiatorURIs = Collections2.transform(exportGroup.getInitiators(), CommonTransformerFunctions.FCTN_STRING_TO_URI);
findAndUpdateFreeHLUsForClusterExport(storage, exportGroup, new ArrayList<URI>(initiatorURIs), volumeMap);
String zoningStep = generateZoningAddVolumesWorkflow(workflow, null, exportGroup, exportMasks, volumeURIs);
String exportStep = null;
for (ExportMask exportMask : exportMasks) {
if (exportStep == null) {
exportStep = generateExportMaskAddVolumesWorkflow(workflow, zoningStep, storage, exportGroup, exportMask, volumeMap, null);
} else {
exportStep = generateExportMaskAddVolumesWorkflow(workflow, exportStep, storage, exportGroup, exportMask, volumeMap, null);
}
}
String successMessage = String.format("Volumes successfully added to export on StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
// This is the case when exportGroup exists, but no volume is added before.
if (exportGroup.getInitiators() != null && !exportGroup.getInitiators().isEmpty()) {
_log.info("export_volume_add: adding volume, creating a new export");
List<URI> initiatorURIs = new ArrayList<URI>();
for (String initiatorId : exportGroup.getInitiators()) {
initiatorURIs.add(URI.create(initiatorId));
}
exportGroupCreate(storageURI, exportGroupURI, initiatorURIs, volumeMap, token);
} else {
_log.info("export_volume_add: adding volume, no initiators yet");
taskCompleter.ready(_dbClient);
}
}
_log.info(String.format("exportAddVolume end - Array: %s ExportMask: %s Volume: %s", storageURI.toString(), exportGroupURI.toString(), volumeMap.toString()));
} catch (Exception e) {
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.exportGroupAddVolumesFailed(e);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VPlexDeviceController method exportGroupChangePathParams.
@Override
public void exportGroupChangePathParams(URI storageURI, URI exportGroupURI, URI blockObjectURI, String token) throws Exception {
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
ExportPathUpdater updater = new ExportPathUpdater(_dbClient);
try {
String workflowKey = "exportGroupChangePathParams";
if (_workflowService.hasWorkflowBeenCreated(token, workflowKey)) {
return;
}
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), workflowKey, true, token);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
BlockObject bo = Volume.fetchExportMaskBlockObject(_dbClient, blockObjectURI);
_log.info(String.format("Changing path parameters for volume %s (%s)", bo.getLabel(), bo.getId()));
updater.generateExportGroupChangePathParamsWorkflow(workflow, _blockScheduler, this, storage, exportGroup, bo, token);
if (!workflow.getAllStepStatus().isEmpty()) {
_log.info("The changePathParams workflow has {} steps. Starting the workflow.", workflow.getAllStepStatus().size());
workflow.executePlan(taskCompleter, "Update the export group on all export masks successfully.");
_workflowService.markWorkflowBeenCreated(token, workflowKey);
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
failStep(taskCompleter, token, serviceError);
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VPlexDeviceController method exportGroupUpdate.
@Override
public void exportGroupUpdate(URI storageURI, URI exportGroupURI, Workflow storageWorkflow, String token) throws Exception {
TaskCompleter taskCompleter = null;
try {
_log.info(String.format("exportGroupUpdate start - Array: %s ExportGroup: %s", storageURI.toString(), exportGroupURI.toString()));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
String successMessage = String.format("ExportGroup %s successfully updated for StorageArray %s", exportGroup.getLabel(), storage.getLabel());
storageWorkflow.setService(_workflowService);
storageWorkflow.executePlan(taskCompleter, successMessage);
} catch (Exception ex) {
_log.error("ExportGroupUpdate Orchestration failed.", ex);
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailed(ex);
failStep(taskCompleter, token, serviceError);
} else {
throw DeviceControllerException.exceptions.exportGroupUpdateFailed(ex);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VNXeMaskingOrchestrator method exportGroupRemoveVolumes.
@Override
public void exportGroupRemoveVolumes(URI storageURI, URI exportGroupURI, List<URI> volumes, String token) throws Exception {
ExportTaskCompleter taskCompleter = null;
try {
_log.info(String.format("exportRemoveVolume start - Array: %s ExportMask: %s " + "Volume: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(volumes)));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
if (exportMasks != null && !exportMasks.isEmpty()) {
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveVolumes", true, token);
List<ExportMask> deleteMasks = new ArrayList<ExportMask>();
List<ExportMask> updateMasks = new ArrayList<ExportMask>();
for (ExportMask mask : exportMasks) {
refreshExportMask(storage, getDevice(), mask);
// Determine if we're deleting the last volume.
Set<String> remainingVolumes = new HashSet<String>();
if (mask.getVolumes() != null) {
remainingVolumes.addAll(mask.getVolumes().keySet());
}
for (URI volume : volumes) {
remainingVolumes.remove(volume.toString());
}
// If so, delete the ExportMask.
if (remainingVolumes.isEmpty()) {
deleteMasks.add(mask);
} else {
updateMasks.add(mask);
}
}
if (!deleteMasks.isEmpty()) {
String deleteStep = null;
for (ExportMask exportMask : deleteMasks) {
deleteStep = generateExportMaskDeleteWorkflow(workflow, deleteStep, storage, exportGroup, exportMask, null, null, null);
}
generateZoningDeleteWorkflow(workflow, deleteStep, exportGroup, deleteMasks);
}
if (!updateMasks.isEmpty()) {
String unexportStep = null;
for (ExportMask exportMask : updateMasks) {
unexportStep = generateExportMaskRemoveVolumesWorkflow(workflow, unexportStep, storage, exportGroup, exportMask, volumes, null, null);
}
generateZoningRemoveVolumesWorkflow(workflow, null, exportGroup, updateMasks, volumes);
}
String successMessage = String.format("Volumes successfully unexported from StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
_log.info("export_volume_remove: no export (initiator should be empty)");
exportGroup.removeVolumes(volumes);
_dbClient.updateObject(exportGroup);
taskCompleter.ready(_dbClient);
}
_log.info(String.format("exportRemoveVolume end - Array: %s ExportMask: %s " + "Volume: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(volumes)));
} catch (Exception e) {
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.exportRemoveVolumes(e);
}
}
}
Aggregations