use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class XtremIOMaskingOrchestrator method exportGroupAddVolumes.
@Override
public void exportGroupAddVolumes(URI storageURI, URI exportGroupURI, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportTaskCompleter taskCompleter = null;
try {
log.info(String.format("exportAddVolume start - Array: %s ExportGroup: %s Volume: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(volumeMap.entrySet())));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
refreshExportMask(storage, getDevice(), null);
List<URI> initiatorURIs = null;
if (exportGroup.getInitiators() != null && !exportGroup.getInitiators().isEmpty()) {
Collection<URI> initiators = Collections2.transform(exportGroup.getInitiators(), CommonTransformerFunctions.FCTN_STRING_TO_URI);
initiatorURIs = new ArrayList<URI>(initiators);
findAndUpdateFreeHLUsForClusterExport(storage, exportGroup, initiatorURIs, volumeMap);
}
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
if (exportMasks != null && !exportMasks.isEmpty()) {
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddVolumes", true, token);
// For each export mask in export group, invoke add Volumes if export Mask belongs to the same storage Array
List<ExportMask> masks = new ArrayList<ExportMask>();
for (ExportMask exportMask : exportMasks) {
if (exportMask.getStorageDevice().equals(storageURI)) {
log.info("export_volume_add: adding volume to an existing export");
masks.add(exportMask);
}
}
List<URI> volumeURIs = new ArrayList<URI>();
volumeURIs.addAll(volumeMap.keySet());
String zoningStep = generateZoningAddVolumesWorkflow(workflow, null, exportGroup, masks, volumeURIs);
for (ExportMask mask : masks) {
List<URI> initiators = StringSetUtil.stringSetToUriList(mask.getInitiators());
generateExportMaskAddVolumesWorkflow(workflow, zoningStep, storage, exportGroup, mask, volumeMap, initiators);
}
String successMessage = String.format("Volumes successfully added to export on StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
if (initiatorURIs != null) {
log.info("export_volume_add: adding volume, creating a new export");
// Group Initiators by compute and invoke create Mask
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddVolumes - Create a new mask", true, token);
List<URI> exportMasksToZoneCreate = new ArrayList<URI>();
Map<URI, Integer> volumesToZoneCreate = new HashMap<URI, Integer>();
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorURIs);
for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, computeInitiatorURIs, volumeMap, token);
exportMasksToZoneCreate.add(result.getMaskURI());
volumesToZoneCreate.putAll(volumeMap);
}
if (!exportMasksToZoneCreate.isEmpty()) {
generateZoningCreateWorkflow(workflow, null, exportGroup, exportMasksToZoneCreate, volumesToZoneCreate);
}
String successMessage = String.format("Initiators successfully added to export StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
log.info("export_volume_add: adding volume, no initiators yet");
taskCompleter.ready(_dbClient);
}
}
log.info(String.format("exportAddVolume end - Array: %s ExportGroup: %s Volume: %s", storageURI.toString(), exportGroupURI.toString(), volumeMap.toString()));
} catch (Exception e) {
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.exportGroupAddVolumesFailed(e);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class XtremIOMaskingOrchestrator method exportGroupAddInitiators.
@Override
public void exportGroupAddInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
TaskCompleter taskCompleter = null;
try {
log.info(String.format("exportAddInitiator start - Array: %s ExportGroup: " + "%s Initiator: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(initiatorURIs)));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
refreshExportMask(storage, getDevice(), null);
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorURIs);
log.info("initiators : {}", Joiner.on(",").join(computeResourceToInitiators.entrySet()));
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
Map<URI, Integer> volumes = selectExportMaskVolumes(exportGroup, storageURI);
log.info("Volumes : {}", Joiner.on(",").join(volumes.keySet()));
if (exportMasks != null && !exportMasks.isEmpty()) {
// find the export mask which has the same Host name as the initiator
// Add the initiator to that export mask
// Set up workflow steps.
log.info("Creating AddInitiators workFlow");
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddInitiators", true, token);
// irrespective of cluster name, host will be always present
Map<String, URI> hostToEMaskGroup = ExportMaskUtils.mapHostToExportMask(_dbClient, exportGroup, storage.getId());
log.info("InitiatorsToHost : {}", Joiner.on(",").join(hostToEMaskGroup.entrySet()));
// if export masks are found for the Host, then add initiators to the export mask
Map<URI, List<URI>> masksToInitiators = new HashMap<URI, List<URI>>();
String addIniStep = null;
for (String computeKey : computeResourceToInitiators.keySet()) {
URI exportMaskUri = hostToEMaskGroup.get(computeKey);
if (null != exportMaskUri) {
log.info("Processing export mask {}", exportMaskUri);
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, exportMaskUri);
if (exportMask.getStorageDevice().equals(storageURI)) {
log.info("Processing export mask {} with expected storage {}", exportMaskUri, storageURI);
// AddInitiatorWorkFlow
masksToInitiators.put(exportMaskUri, computeResourceToInitiators.get(computeKey));
List<URI> volumesInMask = ExportMaskUtils.getUserAddedVolumeURIs(exportMask);
// all masks will be always created by system = true, hence port allocation will happen
addIniStep = generateExportMaskAddInitiatorsWorkflow(workflow, null, storage, exportGroup, exportMask, initiatorURIs, new HashSet<URI>(volumesInMask), token);
computeResourceToInitiators.remove(computeKey);
}
if (!masksToInitiators.isEmpty()) {
generateZoningAddInitiatorsWorkflow(workflow, addIniStep, exportGroup, masksToInitiators);
}
}
}
log.info("Left out initiators : {}", Joiner.on(",").join(computeResourceToInitiators.entrySet()));
// left out initiator's Host which doesn't have any export mask.
Map<URI, Map<URI, Integer>> zoneNewMasksToVolumeMap = new HashMap<URI, Map<URI, Integer>>();
if (!computeResourceToInitiators.isEmpty()) {
for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, computeInitiatorURIs, volumes, token);
zoneNewMasksToVolumeMap.put(result.getMaskURI(), volumes);
}
if (!zoneNewMasksToVolumeMap.isEmpty()) {
List<URI> exportMaskList = new ArrayList<URI>();
exportMaskList.addAll(zoneNewMasksToVolumeMap.keySet());
Map<URI, Integer> overallVolumeMap = new HashMap<URI, Integer>();
for (Map<URI, Integer> oneVolumeMap : zoneNewMasksToVolumeMap.values()) {
overallVolumeMap.putAll(oneVolumeMap);
}
generateZoningCreateWorkflow(workflow, null, exportGroup, exportMaskList, overallVolumeMap);
}
}
String successMessage = String.format("Initiators successfully added to export StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
log.info("export_initiator_add: first initiator, creating a new export");
// No existing export masks available inexport Group
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token);
List<URI> exportMasksToZoneCreate = new ArrayList<URI>();
Map<URI, Integer> volumesToZoneCreate = new HashMap<URI, Integer>();
for (Map.Entry<String, List<URI>> resourceEntry : computeResourceToInitiators.entrySet()) {
String computeKey = resourceEntry.getKey();
List<URI> computeInitiatorURIs = resourceEntry.getValue();
log.info(String.format("New export masks for %s", computeKey));
GenExportMaskCreateWorkflowResult result = generateExportMaskCreateWorkflow(workflow, EXPORT_GROUP_ZONING_TASK, storage, exportGroup, computeInitiatorURIs, volumes, token);
exportMasksToZoneCreate.add(result.getMaskURI());
volumesToZoneCreate.putAll(volumes);
}
if (!exportMasksToZoneCreate.isEmpty()) {
generateZoningCreateWorkflow(workflow, null, exportGroup, exportMasksToZoneCreate, volumesToZoneCreate);
}
String successMessage = String.format("Initiators successfully added to export StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
}
log.info(String.format("exportAddInitiator end - Array: %s ExportGroup: %s " + "Initiator: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(initiatorURIs)));
} catch (Exception e) {
log.info("Error", e);
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.exportGroupAddInitiatorsFailed(e);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class RPDeviceController method exportGroupUpdate.
@Override
public void exportGroupUpdate(URI storageURI, URI exportGroupURI, Workflow storageWorkflow, String token) throws Exception {
TaskCompleter taskCompleter = null;
try {
_log.info(String.format("exportGroupUpdate start - Array: %s ExportMask: %s", storageURI.toString(), exportGroupURI.toString()));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
ProtectionSystem storage = _dbClient.queryObject(ProtectionSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
String successMessage = String.format("ExportGroup %s successfully updated for StorageArray %s", exportGroup.getLabel(), storage.getLabel());
storageWorkflow.setService(_workflowService);
storageWorkflow.executePlan(taskCompleter, successMessage);
} catch (InternalException e) {
_log.error("Operation failed with Exception: ", e);
if (taskCompleter != null) {
taskCompleter.error(_dbClient, e);
}
} catch (Exception e) {
_log.error("Operation failed with Exception: ", e);
if (taskCompleter != null) {
taskCompleter.error(_dbClient, DeviceControllerException.errors.jobFailed(e));
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class CephMaskingOrchestrator method exportGroupCreate.
@Override
public void exportGroupCreate(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
// Set up working flow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupCreate", true, token, taskCompleter);
// Create a mapping of ExportMasks to Add Volumes to or
// add to a list of new Exports to create
Map<URI, Map<URI, Integer>> exportMaskToVolumesToAdd = new HashMap<>();
List<URI> newInitiators = new ArrayList<>();
List<Initiator> initiators = _dbClient.queryObject(Initiator.class, initiatorURIs);
for (Initiator initiator : initiators) {
List<ExportMask> exportMasks = ExportUtils.getInitiatorExportMasks(initiator, _dbClient);
if (exportMasks == null || exportMasks.isEmpty()) {
newInitiators.add(initiator.getId());
} else {
for (ExportMask exportMask : exportMasks) {
exportMaskToVolumesToAdd.put(exportMask.getId(), volumeMap);
}
}
}
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, newInitiators);
_log.info(String.format("Need to create ExportMasks for these compute resources %s", Joiner.on(',').join(computeResourceToInitiators.entrySet())));
// there aren't any already existing ExportMask for them
for (Map.Entry<String, List<URI>> toCreate : computeResourceToInitiators.entrySet()) {
generateExportMaskCreateWorkflow(workflow, null, storage, exportGroup, toCreate.getValue(), volumeMap, token);
}
_log.info(String.format("Need to add volumes for these ExportMasks %s", exportMaskToVolumesToAdd.entrySet()));
// concept ExportMasks.
for (Map.Entry<URI, Map<URI, Integer>> toAddVolumes : exportMaskToVolumesToAdd.entrySet()) {
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toAddVolumes.getKey());
generateExportMaskAddVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, toAddVolumes.getValue(), null);
}
String successMessage = String.format("ExportGroup successfully applied for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
taskCompleter.error(_dbClient, DeviceControllerErrors.ceph.operationFailed("exportGroupCreate", dex.getMessage()));
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
taskCompleter.error(_dbClient, DeviceControllerErrors.ceph.operationFailed("exportGroupCreate", ex.getMessage()));
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class CephMaskingOrchestrator method exportGroupAddVolumes.
@Override
public void exportGroupAddVolumes(URI storageURI, URI exportGroupURI, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
List<URI> initiatorURIs = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
if (initiatorURIs != null && !initiatorURIs.isEmpty()) {
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddVolumes", true, token, taskCompleter);
// Create a mapping of ExportMasks to Add Volumes to or
// add to a list of new Exports to create
Map<URI, Map<URI, Integer>> exportMaskToVolumesToAdd = new HashMap<>();
List<URI> initiatorsToPlace = new ArrayList<>(initiatorURIs);
// Need to figure out which ExportMasks to add volumes to.
for (ExportMask exportMask : ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI)) {
if (exportMask.hasAnyInitiators()) {
exportMaskToVolumesToAdd.put(exportMask.getId(), volumeMap);
for (String uriString : exportMask.getInitiators()) {
URI initiatorURI = URI.create(uriString);
initiatorsToPlace.remove(initiatorURI);
}
}
}
// ExportMask that need to be newly create because we just added
// volumes from 'storage' StorageSystem to this ExportGroup
Map<String, List<URI>> computeResourceToInitiators = mapInitiatorsToComputeResource(exportGroup, initiatorsToPlace);
if (!computeResourceToInitiators.isEmpty()) {
_log.info(String.format("Need to create ExportMasks for these compute resources %s", Joiner.on(',').join(computeResourceToInitiators.entrySet())));
for (Map.Entry<String, List<URI>> toCreate : computeResourceToInitiators.entrySet()) {
generateExportMaskCreateWorkflow(workflow, null, storage, exportGroup, toCreate.getValue(), volumeMap, token);
}
}
// We already know about the ExportMask, so we just add volumes to it
if (!exportMaskToVolumesToAdd.isEmpty()) {
_log.info(String.format("Need to add volumes for these ExportMasks %s", exportMaskToVolumesToAdd.entrySet()));
for (Map.Entry<URI, Map<URI, Integer>> toAddVolumes : exportMaskToVolumesToAdd.entrySet()) {
ExportMask exportMask = _dbClient.queryObject(ExportMask.class, toAddVolumes.getKey());
generateExportMaskAddVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, toAddVolumes.getValue(), null);
}
}
String successMsgTemplate = "ExportGroup add volumes successfully applied for StorageArray %s";
workflow.executePlan(taskCompleter, String.format(successMsgTemplate, storage.getLabel()));
} else {
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
taskCompleter.error(_dbClient, DeviceControllerErrors.ceph.operationFailed("exportGroupAddVolumes", dex.getMessage()));
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
taskCompleter.error(_dbClient, DeviceControllerErrors.ceph.operationFailed("exportGroupAddVolumes", ex.getMessage()));
}
}
Aggregations