use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class CephMaskingOrchestrator method exportGroupDelete.
@Override
public void exportGroupDelete(URI storageURI, URI exportGroupURI, String token) throws Exception {
ExportOrchestrationTask taskCompleter = new ExportOrchestrationTask(exportGroupURI, token);
try {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupURI);
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageURI);
List<ExportMask> masks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, storageURI);
if (masks != null && !masks.isEmpty()) {
Workflow workflow = _workflowService.getNewWorkflow(MaskingWorkflowEntryPoints.getInstance(), "exportGroupDelete", true, token, taskCompleter);
Map<URI, Integer> volumeMap = ExportUtils.getExportGroupVolumeMap(_dbClient, storage, exportGroup);
List<URI> volumeURIs = new ArrayList<>(volumeMap.keySet());
List<URI> initiatorURIs = StringSetUtil.stringSetToUriList(exportGroup.getInitiators());
Map<URI, Map<URI, Integer>> exportMaskToVolumeCount = ExportMaskUtils.mapExportMaskToVolumeShareCount(_dbClient, volumeURIs, initiatorURIs);
for (ExportMask exportMask : masks) {
List<URI> exportGroupURIs = new ArrayList<>();
if (!ExportUtils.isExportMaskShared(_dbClient, exportMask.getId(), exportGroupURIs)) {
_log.info(String.format("Adding step to delete ExportMask %s", exportMask.getMaskName()));
generateExportMaskDeleteWorkflow(workflow, null, storage, exportGroup, exportMask, null, null, taskCompleter);
} else {
Map<URI, Integer> volumeToExportGroupCount = exportMaskToVolumeCount.get(exportMask.getId());
List<URI> volumesToRemove = new ArrayList<>();
for (URI uri : volumeMap.keySet()) {
if (volumeToExportGroupCount == null) {
continue;
}
// Remove the volume only if it is not shared with
// more than 1 ExportGroup
Integer numExportGroupsVolumeIsIn = volumeToExportGroupCount.get(uri);
if (numExportGroupsVolumeIsIn != null && numExportGroupsVolumeIsIn == 1) {
volumesToRemove.add(uri);
}
}
if (!volumesToRemove.isEmpty()) {
_log.info(String.format("Adding step to remove volumes %s from ExportMask %s", Joiner.on(',').join(volumesToRemove), exportMask.getMaskName()));
generateExportMaskRemoveVolumesWorkflow(workflow, null, storage, exportGroup, exportMask, volumesToRemove, null, taskCompleter);
}
}
}
String successMessage = String.format("ExportGroup delete successfully completed for StorageArray %s", storage.getLabel());
workflow.executePlan(taskCompleter, successMessage);
} else {
taskCompleter.ready(_dbClient);
}
} catch (DeviceControllerException dex) {
taskCompleter.error(_dbClient, DeviceControllerErrors.ceph.operationFailed("exportGroupDelete", dex.getMessage()));
} catch (Exception ex) {
_log.error("ExportGroup Orchestration failed.", ex);
taskCompleter.error(_dbClient, DeviceControllerErrors.ceph.operationFailed("exportGroupDelete", ex.getMessage()));
}
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class ExternalDeviceExportOperations method removeVolumes.
@Override
public void removeVolumes(StorageSystem storage, URI exportMaskUri, List<URI> volumeUris, List<com.emc.storageos.db.client.model.Initiator> initiatorList, TaskCompleter taskCompleter) throws DeviceControllerException {
log.info("{} removeVolumes START...", storage.getSerialNumber());
try {
log.info("removeVolumes: Export mask id: {}", exportMaskUri);
log.info("removeVolumes: volumes: {}", Joiner.on(',').join(volumeUris));
if (initiatorList != null) {
log.info("removeVolumes: impacted initiators: {}", Joiner.on(",").join(initiatorList));
}
BlockStorageDriver driver = externalDevice.getDriver(storage.getSystemType());
ExportMask exportMask = (ExportMask) dbClient.queryObject(exportMaskUri);
StringSet maskInitiators = exportMask.getInitiators();
List<String> maskInitiatorList = new ArrayList<>();
for (String initiatorUri : maskInitiators) {
maskInitiatorList.add(initiatorUri);
}
log.info("Export mask existing initiators: {} ", Joiner.on(",").join(maskInitiatorList));
// Prepare volumes. We send to driver only new volumes for the export mask.
List<StorageVolume> driverVolumes = new ArrayList<>();
prepareVolumes(storage, volumeUris, driverVolumes);
// Prepare initiators
Set<com.emc.storageos.db.client.model.Initiator> initiators = ExportMaskUtils.getInitiatorsForExportMask(dbClient, exportMask, null);
List<Initiator> driverInitiators = new ArrayList<>();
// Get export group uri from task completer
URI exportGroupUri = taskCompleter.getId();
ExportGroup exportGroup = (ExportGroup) dbClient.queryObject(exportGroupUri);
prepareInitiators(initiators, exportGroup.forCluster(), driverInitiators);
// Ready to call driver
DriverTask task = driver.unexportVolumesFromInitiators(driverInitiators, driverVolumes);
// todo: need to implement support for async case.
if (task.getStatus() == DriverTask.TaskStatus.READY) {
String msg = String.format("Removed volumes from export: %s.", task.getMessage());
log.info(msg);
taskCompleter.ready(dbClient);
} else {
String errorMsg = String.format("Failed to remove volumes from export mask: %s .", task.getMessage());
log.error(errorMsg);
ServiceError serviceError = ExternalDeviceException.errors.deleteVolumesFromExportMaskFailed("removeVolumes", errorMsg);
taskCompleter.error(dbClient, serviceError);
}
} catch (Exception ex) {
log.error("Problem in removeVolumes: ", ex);
String errorMsg = String.format("Failed to remove volumes from export mask: %s .", ex.getMessage());
log.error(errorMsg);
ServiceError serviceError = ExternalDeviceException.errors.deleteVolumesFromExportMaskFailed("removeVolumes", errorMsg);
taskCompleter.error(dbClient, serviceError);
}
log.info("{} removeVolumes END...", storage.getSerialNumber());
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class ExternalDeviceUnManagedVolumeDiscoverer method determineUnManagedExportMasksForExportInfo.
/**
* This method processes hostToVolumeExportInfoMap to find out which existing unmanaged masks has to be updated,
* and which unmanaged masks have to be created new for this export info. It also identifies hosts with unsupported
* export info data (exported host volumes are not seen through the same set of initiators and the same set of storage
* ports --- which require more than one mask per host) and adds these hosts to invalidExportHosts set.
*
* @param storageSystem
* @param hostToVolumeExportInfoMap [IN] map: key --- host FQDN, value --- list of volume export info instances
* @param invalidExportHosts [IN, OUT] set of invalid hosts, for which we skip export processing for a given array
* @param dbClient reference to db client [IN]
* @param masksToUpdateForVolumes [OUT] map: key --- URI of existing unmanaged export mask, value --- export info to use
* to update the mask.
* @param masksToCreateForVolumes [OUT] list of export info instances for which we need to create new unmanaged masks.
*/
private void determineUnManagedExportMasksForExportInfo(com.emc.storageos.db.client.model.StorageSystem storageSystem, Map<String, List<HostExportInfo>> hostToVolumeExportInfoMap, Set<String> invalidExportHosts, DbClient dbClient, Map<URI, HostExportInfo> masksToUpdateForVolumes, List<HostExportInfo> masksToCreateForVolumes) {
for (Map.Entry<String, List<HostExportInfo>> entry : hostToVolumeExportInfoMap.entrySet()) {
String hostName = entry.getKey();
log.info("Processing export info for host {} .", hostName);
if (invalidExportHosts.contains(hostName)) {
// skip and continue to the next host.
log.info("Found host {} in invalid hosts list. We will not process this host export data.", hostName);
continue;
}
List<HostExportInfo> volumeToHostExportInfoList = entry.getValue();
log.info("Processing export info list {} .", volumeToHostExportInfoList);
String maskName = getUnManagedMaskName(hostName, storageSystem.getNativeGuid());
HostExportInfo hostExportInfo = verifyHostExports(volumeToHostExportInfoList);
if (hostExportInfo == null) {
// invalid, continue to the next host
invalidExportHosts.add(hostName);
log.info("Found export info for host {} invalid. We will not process this host export data.", hostName);
// check existing UnManaged export mask for host/array: the mask could be discovered for volumes on previous
// pages (all unmanaged masks from previous discovery have been deactivated at the begging).
UnManagedExportMask unManagedMask = getUnManagedExportMask(maskName, dbClient, storageSystem.getId());
if (unManagedMask != null) {
log.info("Found existing unmanaged export mask for host {} and array {} --- {} . We will deactivate this mask.", hostName, storageSystem.getNativeId(), unManagedMask);
removeInvalidMaskDataFromVolumes(unManagedMask, dbClient);
unManagedMask.setInactive(true);
dbClient.updateObject(unManagedMask);
}
continue;
}
log.info("The result export info for host {} and array {} : {} .", hostName, storageSystem.getNativeId(), hostExportInfo);
// check existing UnManaged export mask for host/array: the mask could be discovered for volumes on previous
// pages (all unmanaged masks from previous discovery have been deactivated at the begging).
UnManagedExportMask unManagedMask = getUnManagedExportMask(maskName, dbClient, storageSystem.getId());
boolean isValid = true;
if (unManagedMask != null) {
log.info("Found existing unmanaged export mask for host {} and array {} --- {} .", hostName, storageSystem.getNativeId(), unManagedMask);
// check that existing host/array unManaged export mask has the same set of initiators and the same
// set of ports as new discovered hostExportInfo
StringSet storagePortsUris = unManagedMask.getKnownStoragePortUris();
Set<String> storagePortsNativeIds = new HashSet<>();
Set<String> initiatorsNativeIds = new HashSet<>();
for (String portUriString : storagePortsUris) {
URI portUri = URI.create(portUriString);
com.emc.storageos.db.client.model.StoragePort port = dbClient.queryObject(com.emc.storageos.db.client.model.StoragePort.class, portUri);
storagePortsNativeIds.add(port.getNativeId());
}
storagePortsNativeIds.addAll(unManagedMask.getUnmanagedStoragePortNetworkIds());
initiatorsNativeIds.addAll(unManagedMask.getKnownInitiatorNetworkIds());
initiatorsNativeIds.addAll(unManagedMask.getUnmanagedInitiatorNetworkIds());
isValid = verifyHostExports(initiatorsNativeIds, storagePortsNativeIds, hostExportInfo);
if (!isValid) {
// Invalid, we deactivate existing unmanaged mask --- make sure we do not discover invalid export
// masks. We also, remove this mask from "unmanagedExportMasks" set in its unmanaged storage volumes.
log.info("The result export info for host {} and storage array {} does not comply with existing mask.", hostName, storageSystem.getNativeId());
removeInvalidMaskDataFromVolumes(unManagedMask, dbClient);
unManagedMask.setInactive(true);
dbClient.updateObject(unManagedMask);
}
} else {
// Check if export mask for host/array is already managed. If host/array mask is managed, check that hostExportInfo has the same
// storage ports and the same host initiators as in the managed mask. If we have a match for ports/initiators between the mask and hostExportInfo, we will process this
// host export info and create a new UnManagedExportMask for the host.
log.info("There is no existing unmanaged export mask for host {} and array {} .", hostName, storageSystem.getNativeId());
List<String> initiatorPorts = new ArrayList<>();
for (Initiator initiator : hostExportInfo.getInitiators()) {
initiatorPorts.add(initiator.getPort());
}
// We enforce single export mask for host/array for ingested masks, so if only one initiator port match, the mask is a match.
Map<URI, ExportMask> uriToExportMask = ExportMaskUtils.getExportMasksWithInitiatorPorts(dbClient, initiatorPorts);
// Look for export mask for the storage system under processing.
for (ExportMask mask : uriToExportMask.values()) {
if (URIUtil.identical(mask.getStorageDevice(), storageSystem.getId())) {
// found managed export mask for storage system and host initiator
// the mask is already managed.
log.info("Found managed export mask for host {} and array {} --- {}." + " We will process this host export data to see if we can add volumes to this mask.", hostName, storageSystem.getNativeId(), mask.getId());
// check that this managed mask has the same initiators and ports as in the hostExportInfo
StringSet storagePortsUris = mask.getStoragePorts();
StringSet initiatorsUris = mask.getInitiators();
List<com.emc.storageos.db.client.model.StoragePort> ports = dbClient.queryObjectField(com.emc.storageos.db.client.model.StoragePort.class, "nativeId", StringSetUtil.stringSetToUriList(storagePortsUris));
List<com.emc.storageos.db.client.model.Initiator> initiators = dbClient.queryObjectField(com.emc.storageos.db.client.model.Initiator.class, "iniport", StringSetUtil.stringSetToUriList(initiatorsUris));
Set<String> maskStoragePortsNativeIds = new HashSet<>();
Set<String> maskInitiatorPorts = new HashSet<>();
for (com.emc.storageos.db.client.model.StoragePort storagePort : ports) {
maskStoragePortsNativeIds.add(storagePort.getNativeId());
}
for (com.emc.storageos.db.client.model.Initiator initiator : initiators) {
maskInitiatorPorts.add(initiator.getInitiatorPort());
}
log.info("Managed ExportMask {} has the following storage ports {}", mask.getId(), maskStoragePortsNativeIds);
log.info("Managed ExportMask {} has the following initiator ports {}", mask.getId(), maskInitiatorPorts);
// check that hostExportInfo has the same ports and initiators as in the export mask
isValid = verifyHostExports(maskInitiatorPorts, maskStoragePortsNativeIds, hostExportInfo);
if (isValid) {
// we will create unmanaged mask for this hostExportInfo
// we rely on ingestion to add new volumes to the managed mask.
log.info("Managed export mask {} has the same initiators and ports as in hostExportInfo. We will create unmanaged mask for new volumes.", mask.getId());
break;
} else {
log.info("Managed export mask {} has different initiators or ports as those in hostExportInfo.", mask.getId());
}
}
}
}
if (!isValid) {
// invalid, continue to the next host
// add host to invalid hosts list, so we do not process any export volume
// info for this host in the future (for volumes found on next pages).
log.info("Found export info for host {} invalid. Export info: {}." + " We will not process this host export data.", hostName, hostExportInfo);
invalidExportHosts.add(hostName);
continue;
}
if (unManagedMask != null) {
// we will update this mask with additional volumes.
URI maskId = unManagedMask.getId();
masksToUpdateForVolumes.put(maskId, hostExportInfo);
} else {
// we will create new unManaged mask for host/array.
masksToCreateForVolumes.add(hostExportInfo);
}
}
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class HDSExportOperations method removeInitiators.
@Override
public void removeInitiators(StorageSystem storage, URI exportMaskURI, List<URI> volumeURIList, List<Initiator> initiators, List<URI> targets, TaskCompleter taskCompleter) throws DeviceControllerException {
long startTime = System.currentTimeMillis();
log.info("{} removeInitiator START...", storage.getSerialNumber());
try {
log.info("removeInitiator: Export mask id: {}", exportMaskURI);
if (volumeURIList != null) {
log.info("removeInitiator: volumes : {}", Joiner.on(',').join(volumeURIList));
}
log.info("removeInitiator: initiators : {}", Joiner.on(',').join(initiators));
log.info("removeInitiator: targets : {}", Joiner.on(',').join(targets));
if (null == initiators || initiators.isEmpty()) {
log.info("No initiators found to remove {}", exportMaskURI);
taskCompleter.ready(dbClient);
return;
}
ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskURI);
// Get the context from the task completer, in case this is a rollback.
boolean isRollback = WorkflowService.getInstance().isStepInRollbackState(taskCompleter.getOpId());
ExportMaskValidationContext ctx = new ExportMaskValidationContext();
ctx.setStorage(storage);
ctx.setExportMask(exportMask);
ctx.setBlockObjects(volumeURIList, dbClient);
ctx.setInitiators(initiators);
// Allow exceptions to be thrown when not rolling back
ctx.setAllowExceptions(!isRollback);
AbstractHDSValidator removeInitiatorFromMaskValidator = (AbstractHDSValidator) validator.removeInitiators(ctx);
removeInitiatorFromMaskValidator.validate();
HDSApiClient hdsApiClient = hdsApiFactory.getClient(HDSUtils.getHDSServerManagementServerInfo(storage), storage.getSmisUserName(), storage.getSmisPassword());
HDSApiExportManager exportMgr = hdsApiClient.getHDSApiExportManager();
String systemObjectID = HDSUtils.getSystemObjectID(storage);
StringSetMap deviceDataMap = exportMask.getDeviceDataMap();
if (null != deviceDataMap && !deviceDataMap.isEmpty()) {
Set<String> hsdObjectIDSet = deviceDataMap.keySet();
for (String hsdObjectID : hsdObjectIDSet) {
HostStorageDomain hsd = exportMgr.getHostStorageDomain(systemObjectID, hsdObjectID);
if (null == hsd) {
log.warn("Not able to remove initiators as HSD {} couldn't find on array.", hsdObjectID);
continue;
}
List<String> fcInitiators = getFCInitiatorsExistOnHSD(hsd, initiators);
List<String> iSCSIInitiators = getISCSIInitiatorsExistOnHSD(hsd, initiators);
boolean isLastFCInitiator = (fcInitiators.size() == 1 && null != hsd.getWwnList() && hsd.getWwnList().size() == fcInitiators.size());
boolean isLastISCSIInitiator = (iSCSIInitiators.size() == 1 && null != hsd.getIscsiList() && hsd.getIscsiList().size() == iSCSIInitiators.size());
// If Initiator is last one, remove the HSD
if (isLastFCInitiator || isLastISCSIInitiator) {
exportMgr.deleteHostStorageDomain(systemObjectID, hsd.getObjectID(), storage.getModel());
exportMask.getDeviceDataMap().remove(hsd.getObjectID());
} else {
if (null != fcInitiators && !fcInitiators.isEmpty()) {
// remove FC initiators from HSD.
exportMgr.deleteWWNsFromHostStorageDomain(systemObjectID, hsd.getObjectID(), fcInitiators, storage.getModel());
}
if (null != iSCSIInitiators && !iSCSIInitiators.isEmpty()) {
// remove ISCSInames from HSD.
exportMgr.deleteISCSIsFromHostStorageDomain(systemObjectID, hsd.getObjectID(), iSCSIInitiators, storage.getModel());
}
}
}
dbClient.updateObject(exportMask);
// update the task status after processing all HSD's.
taskCompleter.ready(dbClient);
} else {
log.info("No Host groups found on exportMask {}", exportMaskURI);
// No HSD's found in exportMask.
taskCompleter.ready(dbClient);
}
} catch (Exception e) {
log.error(String.format("removeInitiator failed - maskURI: %s", exportMaskURI.toString()), e);
ServiceError serviceError = DeviceControllerException.errors.jobFailedOpMsg(ResourceOperationTypeEnum.DELETE_EXPORT_INITIATOR.getName(), e.getMessage());
taskCompleter.error(dbClient, serviceError);
} finally {
long totalTime = System.currentTimeMillis() - startTime;
log.info(String.format("findExportMasks took %f seconds", (double) totalTime / (double) 1000));
}
log.info("{} removeInitiator END...", storage.getSerialNumber());
}
use of com.emc.storageos.db.client.model.ExportMask in project coprhd-controller by CoprHD.
the class HDSExportOperations method removeVolumes.
@Override
public void removeVolumes(StorageSystem storage, URI exportMaskURI, List<URI> volumes, List<Initiator> initiatorList, TaskCompleter taskCompleter) throws DeviceControllerException {
log.info("{} removeVolumes START...", storage.getSerialNumber());
try {
log.info("removeVolumes: Export mask id: {}", exportMaskURI);
log.info("removeVolumes: volumes: {}", Joiner.on(',').join(volumes));
if (initiatorList != null) {
log.info("removeVolumes: impacted initiators: {}", Joiner.on(",").join(initiatorList));
}
HDSApiClient hdsApiClient = hdsApiFactory.getClient(HDSUtils.getHDSServerManagementServerInfo(storage), storage.getSmisUserName(), storage.getSmisPassword());
HDSApiExportManager exportMgr = hdsApiClient.getHDSApiExportManager();
String systemObjectID = HDSUtils.getSystemObjectID(storage);
ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskURI);
if (CollectionUtils.isEmpty(exportMask.getDeviceDataMap())) {
log.info("HSD's are not found in the exportMask {} device DataMap.", exportMask.getId());
taskCompleter.ready(dbClient);
}
// Get the context from the task completer, in case this is a rollback.
boolean isRollback = WorkflowService.getInstance().isStepInRollbackState(taskCompleter.getOpId());
ExportMaskValidationContext ctx = new ExportMaskValidationContext();
ctx.setStorage(storage);
ctx.setExportMask(exportMask);
ctx.setBlockObjects(volumes, dbClient);
ctx.setInitiators(initiatorList);
// Allow exceptions to be thrown when not rolling back
ctx.setAllowExceptions(!isRollback);
AbstractHDSValidator removeVolumeFromMaskValidator = (AbstractHDSValidator) validator.removeVolumes(ctx);
removeVolumeFromMaskValidator.validate();
StringSetMap deviceDataMap = exportMask.getDeviceDataMap();
Set<String> hsdList = deviceDataMap.keySet();
List<Path> pathObjectIdList = new ArrayList<Path>();
if (null == hsdList || hsdList.isEmpty()) {
throw HDSException.exceptions.notAbleToFindHostStorageDomain(systemObjectID);
}
if (null != exportMask && !exportMask.getInactive()) {
for (String hsdObjectId : hsdList) {
HostStorageDomain hsd = exportMgr.getHostStorageDomain(systemObjectID, hsdObjectId);
if (null == hsd) {
log.warn("Couldn't find the HSD {} to remove volume from ExportMask", hsdObjectId);
continue;
}
if (null != hsd.getPathList() && !hsd.getPathList().isEmpty()) {
pathObjectIdList.addAll(getPathObjectIdsFromHsd(hsd, volumes));
}
}
if (!pathObjectIdList.isEmpty()) {
hdsApiClient.getHDSBatchApiExportManager().deleteLUNPathsFromStorageSystem(systemObjectID, pathObjectIdList, storage.getModel());
} else {
log.info("No volumes found on system: {}", systemObjectID);
}
}
// Update the status after deleting the volume from all HSD's.
taskCompleter.ready(dbClient);
} catch (Exception e) {
log.error(String.format("removeVolume failed - maskURI: %s", exportMaskURI.toString()), e);
ServiceError serviceError = DeviceControllerException.errors.jobFailedMsg(e.getMessage(), e);
taskCompleter.error(dbClient, serviceError);
}
log.info("{} removeVolumes END...", storage.getSerialNumber());
}
Aggregations