use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class AbstractBasicMaskingOrchestrator method exportGroupUpdate.
@Override
public void exportGroupUpdate(URI storageURI, URI exportGroupURI, Workflow storageWorkflow, String token) throws Exception {
TaskCompleter taskCompleter = null;
try {
_log.info(String.format("exportGroupUpdate start - Array: %s ExportGroup: %s", storageURI.toString(), exportGroupURI.toString()));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
String successMessage = String.format("ExportGroup %s successfully updated for StorageArray %s", exportGroup.getLabel(), storage.getLabel());
storageWorkflow.setService(_workflowService);
storageWorkflow.executePlan(taskCompleter, successMessage);
} catch (Exception ex) {
_log.error("ExportGroupUpdate Orchestration failed.", ex);
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.exportGroupUpdateFailed(ex);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class AbstractDefaultMaskingOrchestrator method getVolumesToAdd.
/**
* Checks whether the volume is in existing mask.
* It also validates whether requested HLU is already
* used for some other volume or not.
*
* @param volumeMap
* volumes
* @param exportMask
* @param exportGroup
* export group
* @param token
* step ID
* @return
*/
protected Map<URI, Integer> getVolumesToAdd(Map<URI, Integer> volumeMap, ExportMask exportMask, ExportGroup exportGroup, String token) {
Map<URI, Integer> volumesToAdd = new HashMap<URI, Integer>();
for (URI boURI : volumeMap.keySet()) {
BlockObject bo = BlockObject.fetch(_dbClient, boURI);
if (!exportMask.hasExistingVolume(bo.getWWN()) && !exportMask.hasUserAddedVolume(bo.getWWN())) {
URI thisVol = bo.getId();
Integer hlu = volumeMap.get(thisVol);
volumesToAdd.put(thisVol, hlu);
}
// Check if the requested HLU for the volume is
// already taken by a pre-existing volume.
Integer requestedHLU = volumeMap.get(bo.getId());
StringMap existingVolumesInMask = exportMask.getExistingVolumes();
if (existingVolumesInMask != null && requestedHLU.intValue() != ExportGroup.LUN_UNASSIGNED && !ExportGroup.LUN_UNASSIGNED_DECIMAL_STR.equals(requestedHLU.toString()) && existingVolumesInMask.containsValue(requestedHLU.toString())) {
ExportOrchestrationTask completer = new ExportOrchestrationTask(exportGroup.getId(), token);
ServiceError serviceError = DeviceControllerException.errors.exportHasExistingVolumeWithRequestedHLU(boURI.toString(), requestedHLU.toString());
completer.error(_dbClient, serviceError);
return null;
}
}
return volumesToAdd;
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class AbstractMaskingFirstOrchestrator method exportGroupAddVolumes.
@Override
public void exportGroupAddVolumes(URI storageURI, URI exportGroupURI, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportTaskCompleter taskCompleter = null;
try {
_log.info(String.format("exportAddVolume START - Array: %s ExportMask: %s Volume: %s", storageURI.toString(), exportGroupURI.toString(), Joiner.on(',').join(volumeMap.entrySet())));
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
createWorkFlowAndSubmitForAddVolumes(storageURI, exportGroupURI, volumeMap, token, taskCompleter, exportGroup, storage);
_log.info(String.format("exportAddVolume END - Array: %s ExportMask: %s Volume: %s", storageURI.toString(), exportGroupURI.toString(), volumeMap.toString()));
} catch (Exception e) {
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(_dbClient, serviceError);
} else {
throw DeviceControllerException.exceptions.exportGroupAddVolumesFailed(e);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method exportGroupRemoveInitiators.
@Override
public void exportGroupRemoveInitiators(URI storageURI, URI exportGroupURI, List<URI> initiatorURIs, String token) throws Exception {
BlockStorageDevice device = getDevice();
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StringBuffer errorMessage = new StringBuffer();
logExportGroup(exportGroup, storageURI);
try {
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupRemoveInitiators", true, token);
Initiator firstInitiator = _dbClient.queryObject(Initiator.class, initiatorURIs.get(0));
// No need to validate the orchestrator level validation for vplex/rp. Hence ignoring validation for vplex/rp initiators.
boolean isValidationNeeded = validatorConfig.isValidationEnabled() && !VPlexControllerUtils.isVplexInitiator(firstInitiator, _dbClient) && !ExportUtils.checkIfInitiatorsForRP(Arrays.asList(firstInitiator));
_log.info("Orchestration level validation needed : {}", isValidationNeeded);
InitiatorHelper initiatorHelper = new InitiatorHelper(initiatorURIs).process(exportGroup);
// Populate a map of volumes on the storage device associated with this ExportGroup
List<BlockObject> blockObjects = new ArrayList<BlockObject>();
if (exportGroup != null) {
for (Map.Entry<String, String> entry : exportGroup.getVolumes().entrySet()) {
URI boURI = URI.create(entry.getKey());
BlockObject bo = BlockObject.fetch(_dbClient, boURI);
if (bo.getStorageController().equals(storageURI)) {
blockObjects.add(bo);
}
}
}
Map<URI, Boolean> initiatorIsPartOfFullListFlags = flagInitiatorsThatArePartOfAFullList(exportGroup, initiatorURIs);
List<String> initiatorNames = new ArrayList<String>();
for (URI initiatorURI : initiatorURIs) {
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
String normalizedName = Initiator.normalizePort(initiator.getInitiatorPort());
initiatorNames.add(normalizedName);
}
_log.info("Normalized initiator names :{}", initiatorNames);
device.findExportMasks(storage, initiatorNames, false);
boolean anyOperationsToDo = false;
Map<URI, ExportMask> refreshedMasks = new HashMap<URI, ExportMask>();
if (exportGroup != null && exportGroup.getExportMasks() != null) {
// There were some exports out there that already have some or all of the
// initiators that we are attempting to remove. We need to only
// remove the volumes that the user added to these masks
Map<String, Set<URI>> matchingExportMaskURIs = getInitiatorToExportMaskMap(exportGroup);
// This loop will determine a list of volumes to update per export mask
Map<URI, List<URI>> existingMasksToRemoveInitiator = new HashMap<URI, List<URI>>();
Map<URI, List<URI>> existingMasksToRemoveVolumes = new HashMap<URI, List<URI>>();
for (Map.Entry<String, Set<URI>> entry : matchingExportMaskURIs.entrySet()) {
URI initiatorURI = initiatorHelper.getPortNameToInitiatorURI().get(entry.getKey());
if (initiatorURI == null || !initiatorURIs.contains(initiatorURI)) {
// Entry key points to an initiator that was not passed in the remove request
continue;
}
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
// Get a list of the ExportMasks that were matched to the initiator
// go through the initiators and figure out the proper initiator and volume ramifications
// to the existing masks.
List<URI> exportMaskURIs = new ArrayList<URI>();
exportMaskURIs.addAll(entry.getValue());
List<ExportMask> masks = _dbClient.queryObject(ExportMask.class, exportMaskURIs);
_log.info(String.format("initiator %s masks {%s}", initiator.getInitiatorPort(), Joiner.on(',').join(exportMaskURIs)));
for (ExportMask mask : masks) {
if (mask == null || mask.getInactive() || !mask.getStorageDevice().equals(storageURI)) {
continue;
}
if (!refreshedMasks.containsKey(mask.getId())) {
// refresh the export mask always
mask = device.refreshExportMask(storage, mask);
refreshedMasks.put(mask.getId(), mask);
}
_log.info(String.format("mask %s has initiator %s", mask.getMaskName(), initiator.getInitiatorPort()));
/**
* If user asked to remove Host from Cluster
* 1. Check if the export mask is shared across other export Groups, if not remove the host.
* 2. If shared, check whether all the initiators of host is being asked to remove
* 3. If yes, check if atleast one of the other shared export Group is EXCLUSIVE
* 4. If yes, then remove the shared volumes
*
* In all other cases, remove the initiators.
*/
List<ExportGroup> otherExportGroups = ExportUtils.getOtherExportGroups(exportGroup, mask, _dbClient);
if (!otherExportGroups.isEmpty() && initiatorIsPartOfFullListFlags.get(initiatorURI) && ExportUtils.exportMaskHasBothExclusiveAndSharedVolumes(exportGroup, otherExportGroups, mask)) {
if (!exportGroup.forInitiator()) {
List<URI> removeVolumesList = existingMasksToRemoveVolumes.get(mask.getId());
if (removeVolumesList == null) {
removeVolumesList = new ArrayList<URI>();
existingMasksToRemoveVolumes.put(mask.getId(), removeVolumesList);
}
for (String volumeIdStr : exportGroup.getVolumes().keySet()) {
URI egVolumeID = URI.create(volumeIdStr);
if (mask.getUserAddedVolumes().containsValue(volumeIdStr) && !removeVolumesList.contains(egVolumeID)) {
removeVolumesList.add(egVolumeID);
}
}
} else {
// Just a reminder to the world in the case where Initiator is used in this odd situation.
_log.info("Removing volumes from an Initiator type export group as part of an initiator removal is not supported.");
}
} else {
_log.info(String.format("We can remove initiator %s from mask %s", initiator.getInitiatorPort(), mask.getMaskName()));
List<URI> initiators = existingMasksToRemoveInitiator.get(mask.getId());
if (initiators == null) {
initiators = new ArrayList<URI>();
existingMasksToRemoveInitiator.put(mask.getId(), initiators);
}
if (!initiators.contains(initiator.getId())) {
initiators.add(initiator.getId());
}
}
}
}
Set<URI> masksGettingRemoved = new HashSet<URI>();
// In this loop we are trying to remove those initiators that exist
// on a mask that ViPR created.
String previousStep = null;
for (Map.Entry<URI, List<URI>> entry : existingMasksToRemoveInitiator.entrySet()) {
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
List<URI> initiatorsToRemove = entry.getValue();
List<URI> initiatorsToRemoveOnStorage = new ArrayList<URI>();
for (URI initiatorURI : initiatorsToRemove) {
Initiator initiator = _dbClient.queryObject(Initiator.class, initiatorURI);
// COP-28729 - We can allow remove initiator or host if the shared mask doesn't have any existing volumes.
// Shared masks will have at least one unmanaged volume.
String err = ExportUtils.getExportMasksSharingInitiatorAndHasUnManagedVolumes(_dbClient, initiator, mask, existingMasksToRemoveInitiator.keySet());
if (err != null) {
errorMessage.append(err);
}
initiatorsToRemoveOnStorage.add(initiatorURI);
}
// CTRL-8846 fix : Compare against all the initiators
Set<String> allMaskInitiators = ExportUtils.getExportMaskAllInitiatorPorts(mask, _dbClient);
List<Initiator> removableInitiatorList = _dbClient.queryObject(Initiator.class, initiatorsToRemove);
List<String> portNames = new ArrayList<>(Collections2.transform(removableInitiatorList, CommonTransformerFunctions.fctnInitiatorToPortName()));
allMaskInitiators.removeAll(portNames);
if (allMaskInitiators.isEmpty()) {
masksGettingRemoved.add(mask.getId());
// For this case, we are attempting to remove all the
// initiators in the mask. This means that we will have to delete the
// exportGroup
_log.info(String.format("mask %s has removed all " + "initiators, mask will be deleted from the array.. ", mask.getMaskName()));
List<ExportMask> exportMasks = new ArrayList<ExportMask>();
exportMasks.add(mask);
previousStep = generateExportMaskDeleteWorkflow(workflow, previousStep, storage, exportGroup, mask, getExpectedVolumes(mask), getExpectedInitiators(mask), null);
previousStep = generateZoningDeleteWorkflow(workflow, previousStep, exportGroup, exportMasks);
anyOperationsToDo = true;
} else {
_log.info(String.format("mask %s - going to remove the " + "following initiators %s. ", mask.getMaskName(), Joiner.on(',').join(initiatorsToRemove)));
Map<URI, List<URI>> maskToInitiatorsMap = new HashMap<URI, List<URI>>();
maskToInitiatorsMap.put(mask.getId(), initiatorsToRemove);
ExportMaskRemoveInitiatorCompleter exportTaskCompleter = new ExportMaskRemoveInitiatorCompleter(exportGroupURI, mask.getId(), initiatorsToRemove, null);
previousStep = generateExportMaskRemoveInitiatorsWorkflow(workflow, previousStep, storage, exportGroup, mask, getExpectedVolumes(mask), initiatorsToRemoveOnStorage, true, exportTaskCompleter);
previousStep = generateZoningRemoveInitiatorsWorkflow(workflow, previousStep, exportGroup, maskToInitiatorsMap);
anyOperationsToDo = true;
}
}
// for the storage array and ExportGroup.
for (Map.Entry<URI, List<URI>> entry : existingMasksToRemoveVolumes.entrySet()) {
if (masksGettingRemoved.contains(entry.getKey())) {
_log.info("Mask {} is getting removed, no need to remove volumes from it", entry.getKey().toString());
continue;
}
ExportMask mask = _dbClient.queryObject(ExportMask.class, entry.getKey());
List<URI> volumesToRemove = entry.getValue();
List<URI> initiatorsToRemove = existingMasksToRemoveInitiator.get(mask.getId());
if (initiatorsToRemove != null) {
Set<String> initiatorsInExportMask = ExportUtils.getExportMaskAllInitiatorPorts(mask, _dbClient);
List<Initiator> removableInitiatorList = _dbClient.queryObject(Initiator.class, initiatorsToRemove);
List<String> portNames = new ArrayList<>(Collections2.transform(removableInitiatorList, CommonTransformerFunctions.fctnInitiatorToPortName()));
initiatorsInExportMask.removeAll(portNames);
if (!initiatorsInExportMask.isEmpty()) {
// There are still some initiators in this ExportMask
_log.info(String.format("ExportMask %s would have remaining initiators {%s} that require access to {%s}. " + "Not going to remove any of the volumes", mask.getMaskName(), Joiner.on(',').join(initiatorsInExportMask), Joiner.on(", ").join(volumesToRemove)));
continue;
}
}
Collection<String> volumesToRemoveURIStrings = Collections2.transform(volumesToRemove, CommonTransformerFunctions.FCTN_URI_TO_STRING);
List<String> exportMaskVolumeURIStrings = new ArrayList<String>(mask.getVolumes().keySet());
exportMaskVolumeURIStrings.removeAll(volumesToRemoveURIStrings);
boolean hasExistingVolumes = !CollectionUtils.isEmpty(mask.getExistingVolumes());
List<? extends BlockObject> boList = BlockObject.fetchAll(_dbClient, volumesToRemove);
if (!hasExistingVolumes && exportMaskVolumeURIStrings.isEmpty()) {
_log.info(String.format("All the volumes (%s) from mask %s will be removed, so will have to remove the whole mask. ", Joiner.on(", ").join(volumesToRemove), mask.getMaskName()));
errorMessage.append(String.format("Mask %s would have deleted from array ", mask.forDisplay()));
// Order matters! Above this would be any remove initiators that would impact other masking views.
// Be sure to always remove anything inside the mask before removing the mask itself.
previousStep = generateExportMaskDeleteWorkflow(workflow, previousStep, storage, exportGroup, mask, getExpectedVolumes(mask), getExpectedInitiators(mask), null);
previousStep = generateZoningDeleteWorkflow(workflow, previousStep, exportGroup, Arrays.asList(mask));
anyOperationsToDo = true;
} else {
ExportTaskCompleter completer = new ExportRemoveVolumesOnAdoptedMaskCompleter(exportGroupURI, mask.getId(), volumesToRemove, token);
_log.info(String.format("A subset of volumes will be removed from mask %s: %s. ", mask.getMaskName(), Joiner.on(",").join(volumesToRemove)));
errorMessage.append(String.format("A subset of volumes will be removed from mask %s: %s. ", mask.forDisplay(), Joiner.on(", ").join(Collections2.transform(boList, CommonTransformerFunctions.fctnDataObjectToForDisplay()))));
List<ExportMask> masks = new ArrayList<ExportMask>();
masks.add(mask);
previousStep = generateExportMaskRemoveVolumesWorkflow(workflow, previousStep, storage, exportGroup, mask, volumesToRemove, getExpectedInitiators(mask), completer);
previousStep = generateZoningRemoveVolumesWorkflow(workflow, previousStep, exportGroup, masks, volumesToRemove);
anyOperationsToDo = true;
}
}
}
_log.warn("Error Message {}", errorMessage);
if (isValidationNeeded && StringUtils.hasText(errorMessage)) {
throw DeviceControllerException.exceptions.removeInitiatorValidationError(Joiner.on(", ").join(initiatorNames), storage.getLabel(), errorMessage.toString());
}
if (anyOperationsToDo) {
String successMessage = String.format("Successfully removed exports for initiators on StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
} catch (Exception ex) {
_log.error("ExportGroup remove initiator Orchestration failed.", ex);
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
taskCompleter.error(_dbClient, serviceError);
}
}
}
use of com.emc.storageos.volumecontroller.impl.block.taskcompleter.ExportOrchestrationTask in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method exportGroupAddVolumes.
@Override
public void exportGroupAddVolumes(URI storageURI, URI exportGroupURI, Map<URI, Integer> volumeMap, String token) throws Exception {
ExportOrchestrationTask taskCompleter = null;
try {
BlockStorageDevice device = getDevice();
taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
logExportGroup(exportGroup, storageURI);
// Exceptions to this are documented in the logic.
if (exportGroup != null && exportGroup.getExportMasks() != null) {
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupAddVolumes", true, token);
Collection<URI> initiatorIds = Collections2.transform(StringSetUtil.get(exportGroup.getInitiators()), CommonTransformerFunctions.FCTN_STRING_TO_URI);
if (!determineExportGroupCreateSteps(workflow, null, device, storage, exportGroup, new ArrayList<URI>(initiatorIds), volumeMap, true, token)) {
throw DeviceControllerException.exceptions.exportGroupCreateFailed(new Exception("Export Group Add Volume Failed"));
}
String successMessage = String.format("Successfully added volumes to export on StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
if (exportGroup.hasInitiators()) {
_log.info("There are no masks for this export. Need to create anew.");
List<URI> initiatorURIs = new ArrayList<URI>();
for (String initiatorURIStr : exportGroup.getInitiators()) {
initiatorURIs.add(URI.create(initiatorURIStr));
}
// Invoke the export group create operation,
// which should in turn create a workflow operations to
// create the export for the newly added volume(s).
exportGroupCreate(storageURI, exportGroupURI, initiatorURIs, volumeMap, token);
} else {
_log.warn("There are no initiators for export group: " + exportGroup.getLabel());
// Additional logic to ensure the task is closed out in the case where no workflow was really generated.
taskCompleter.ready(_dbClient);
_log.info("No volumes pushed to array because either they already exist " + "or there were no initiators added to the export yet.");
}
}
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
if (taskCompleter != null) {
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(ex.getMessage(), ex);
taskCompleter.error(_dbClient, serviceError);
}
}
}
Aggregations