use of com.emc.storageos.vplex.api.VPlexApiException in project coprhd-controller by CoprHD.
the class VPlexDeviceController method storageViewAddStoragePorts.
/**
* Workflow Step to add storage port(s) to Storage View.
* Note arguments (except stepId) must match storageViewAddStoragePortsMethod above.
*
* @param vplexURI
* -- URI of VPlex StorageSystem
* @param exportURI
* -- ExportGroup URI
* @param maskURI
* -- ExportMask URI.
* @param targetURIs
* -- list of targets URIs (VPLEX FE ports) to be added.
* If not null, the targets (VPlex front end ports) indicated by the targetURIs will be added
* to the Storage View making sure they do belong to zoningMap storagePorts.
* If null, then ports are calculated from the zoningMap.
* @param completer the ExportMaskAddInitiatorCompleter
* @param stepId
* -- Workflow step id.
* @throws WorkflowException
*/
public void storageViewAddStoragePorts(URI vplexURI, URI exportURI, URI maskURI, List<URI> targetURIs, ExportMaskAddInitiatorCompleter completer, String stepId) throws DeviceControllerException {
try {
WorkflowStepCompleter.stepExecuting(stepId);
ExportOperationContext context = new VplexExportOperationContext();
// Prime the context object
completer.updateWorkflowStepContext(context);
StorageSystem vplex = getDataObject(StorageSystem.class, vplexURI, _dbClient);
ExportGroup exportGroup = getDataObject(ExportGroup.class, exportURI, _dbClient);
VPlexApiClient client = getVPlexAPIClient(_vplexApiFactory, vplex, _dbClient);
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, vplexURI);
for (ExportMask exportMask : exportMasks) {
// If a specific ExportMask is to be processed, ignore any others.
if (maskURI != null && !exportMask.getId().equals(maskURI)) {
continue;
}
ArrayList<URI> filteredTargetURIs = new ArrayList<URI>();
// Filter or get targets from the zoning map
if (exportMask.getZoningMap() != null) {
Set<String> zoningMapTargets = BlockStorageScheduler.getTargetIdsFromAssignments(exportMask.getZoningMap());
List<URI> zoningMapTargetURIs = StringSetUtil.stringSetToUriList(zoningMapTargets);
if (targetURIs == null || targetURIs.isEmpty()) {
// Add all storage ports from the zoning map
if (zoningMapTargetURIs != null && !zoningMapTargetURIs.isEmpty()) {
filteredTargetURIs.addAll(zoningMapTargetURIs);
}
} else {
// Log any ports not in the zoning map.
for (URI targetURI : targetURIs) {
filteredTargetURIs.add(targetURI);
if (zoningMapTargetURIs.contains(targetURI)) {
_log.info(String.format("Target %s not in zoning map", targetURI));
}
}
}
}
// Add new targets if specified
if (filteredTargetURIs != null && filteredTargetURIs.isEmpty() == false) {
List<PortInfo> targetPortInfos = new ArrayList<PortInfo>();
List<URI> targetsAddedToStorageView = new ArrayList<URI>();
for (URI target : filteredTargetURIs) {
// Do not try to add a port twice.
if (exportMask.getStoragePorts().contains(target.toString())) {
continue;
}
// Build the PortInfo structure for the port to be added
StoragePort port = getDataObject(StoragePort.class, target, _dbClient);
PortInfo pi = new PortInfo(port.getPortNetworkId().toUpperCase().replaceAll(":", ""), null, port.getPortName(), null);
targetPortInfos.add(pi);
targetsAddedToStorageView.add(target);
}
if (!targetPortInfos.isEmpty()) {
// Add the targets on the VPLEX
client.addTargetsToStorageView(exportMask.getMaskName(), targetPortInfos);
ExportOperationContext.insertContextOperation(completer, VplexExportOperationContext.OPERATION_ADD_TARGETS_TO_STORAGE_VIEW, targetsAddedToStorageView);
// Add the targets to the database.
for (URI target : targetsAddedToStorageView) {
exportMask.addTarget(target);
}
_dbClient.updateObject(exportMask);
}
}
}
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_083);
completer.ready(_dbClient);
} catch (VPlexApiException vae) {
_log.error("VPlexApiException adding storagePorts to Storage View: " + vae.getMessage(), vae);
failStep(completer, stepId, vae);
} catch (Exception ex) {
_log.error("Exception adding storagePorts to Storage View: " + ex.getMessage(), ex);
String opName = ResourceOperationTypeEnum.ADD_STORAGE_VIEW_STORAGEPORTS.getName();
ServiceError serviceError = VPlexApiException.errors.storageViewAddStoragePortFailed(opName, ex);
failStep(completer, stepId, serviceError);
}
}
use of com.emc.storageos.vplex.api.VPlexApiException in project coprhd-controller by CoprHD.
the class VPlexDeviceController method invalidateCache.
/**
* Called to invalidate the read cache for a VPLEX volume when
* restoring a snapshot.
*
* @param vplexURI
* The URI of the VPLEX system.
* @param vplexVolumeURI
* The URI of a VPLEX volume.
* @param stepId
* The workflow step identifier.
*/
public void invalidateCache(URI vplexURI, URI vplexVolumeURI, String stepId) {
_log.info("Executing invalidate cache for volume {} on VPLEX {}", vplexVolumeURI, vplexURI);
try {
// Update workflow step.
WorkflowStepCompleter.stepExecuting(stepId);
// Get the API client.
StorageSystem vplexSystem = getDataObject(StorageSystem.class, vplexURI, _dbClient);
VPlexApiClient client = getVPlexAPIClient(_vplexApiFactory, vplexSystem, _dbClient);
_log.info("Got VPLEX API client");
// Get the VPLEX volume.
Volume vplexVolume = getDataObject(Volume.class, vplexVolumeURI, _dbClient);
String vplexVolumeName = vplexVolume.getDeviceLabel();
_log.info("Got VPLEX volumes");
// Invalidate the cache for the volume.
boolean stillInProgress = client.invalidateVirtualVolumeCache(vplexVolumeName);
_log.info("Invalidated the VPLEX volume cache");
// invalidation completes.
if (stillInProgress) {
CacheStatusTaskCompleter invalidateCompleter = new CacheStatusTaskCompleter(vplexVolumeURI, stepId);
VPlexCacheStatusJob cacheStatusJob = new VPlexCacheStatusJob(invalidateCompleter);
ControllerServiceImpl.enqueueJob(new QueueJob(cacheStatusJob));
_log.info("Queued job to monitor migration progress.");
} else {
// Update workflow step state to success.
WorkflowStepCompleter.stepSucceded(stepId);
_log.info("Updated workflow step state to success");
}
} catch (VPlexApiException vae) {
_log.error("Exception invalidating VPLEX volume cache " + vae.getMessage(), vae);
WorkflowStepCompleter.stepFailed(stepId, vae);
} catch (Exception e) {
_log.error("Exception invalidating VPLEX volume cache " + e.getMessage(), e);
WorkflowStepCompleter.stepFailed(stepId, VPlexApiException.exceptions.failedInvalidatingVolumeCache(vplexVolumeURI.toString(), e));
}
}
use of com.emc.storageos.vplex.api.VPlexApiException in project coprhd-controller by CoprHD.
the class VPlexDeviceController method detachMirrorDevice.
/**
* This method will detach mirror device from the vplex volume source device.
* If discard is true it will leave the device and the underlying structure on VPlex.
* True is used when this method is used in the context of deleting mirror.
* If discarded is false it will convert the detached mirror into virtual volume with
* the same name as the mirror device. False is used in the context of promoting mirror
* to a vplex volume.
*
* @param vplexURI
* URI of the VPlex StorageSystem
* @param vplexMirrorURI
* URI of the mirror to be detached.
* @param discard
* true or false value, whether to discard device or not.
* @param stepId
* The stepId used for completion.
*
* @throws WorkflowException
* When an error occurs updating the workflow step
* state.
*/
public void detachMirrorDevice(URI vplexURI, URI vplexMirrorURI, URI promotedVolumeURI, boolean discard, String stepId) throws WorkflowException {
try {
WorkflowStepCompleter.stepExecuting(stepId);
VPlexApiClient client = getVPlexAPIClient(_vplexApiFactory, vplexURI, _dbClient);
VplexMirror vplexMirror = getDataObject(VplexMirror.class, vplexMirrorURI, _dbClient);
Volume sourceVplexVolume = getDataObject(Volume.class, vplexMirror.getSource().getURI(), _dbClient);
if (vplexMirror.getDeviceLabel() != null) {
if (null == sourceVplexVolume.getAssociatedVolumes() || sourceVplexVolume.getAssociatedVolumes().isEmpty()) {
_log.error("VPLEX volume {} has no backend volumes.", sourceVplexVolume.forDisplay());
throw InternalServerErrorException.internalServerErrors.noAssociatedVolumesForVPLEXVolume(sourceVplexVolume.forDisplay());
}
if (sourceVplexVolume.getAssociatedVolumes().size() > 1) {
// Call to detach mirror device from Distributed Virtual Volume
client.detachLocalMirrorFromDistributedVirtualVolume(sourceVplexVolume.getDeviceLabel(), vplexMirror.getDeviceLabel(), discard);
} else {
// Call to detach mirror device from Local Virtual Volume
client.detachMirrorFromLocalVirtualVolume(sourceVplexVolume.getDeviceLabel(), vplexMirror.getDeviceLabel(), discard);
}
// Record VPLEX mirror detach event.
recordBourneVplexMirrorEvent(vplexMirrorURI, OperationTypeEnum.DETACH_VOLUME_MIRROR.getEvType(true), Operation.Status.ready, OperationTypeEnum.DETACH_VOLUME_MIRROR.getDescription());
} else {
_log.info("It seems vplex mirror {} was never created, so move to the next step for cleanup.", vplexMirror.getLabel());
}
WorkflowStepCompleter.stepSucceded(stepId);
} catch (VPlexApiException vae) {
_log.error("Exception detaching Vplex Mirror {} ", vplexMirrorURI + vae.getMessage(), vae);
if (promotedVolumeURI != null) {
// If we are here due to promote mirror action then
// delete the volume that was supposed to be promoted volume.
Volume volume = _dbClient.queryObject(Volume.class, promotedVolumeURI);
_dbClient.removeObject(volume);
}
recordBourneVplexMirrorEvent(vplexMirrorURI, OperationTypeEnum.DETACH_VOLUME_MIRROR.getEvType(true), Operation.Status.error, OperationTypeEnum.DETACH_VOLUME_MIRROR.getDescription());
WorkflowStepCompleter.stepFailed(stepId, vae);
} catch (Exception ex) {
_log.error("Exception detaching Vplex Mirror {} ", vplexMirrorURI + ex.getMessage(), ex);
if (promotedVolumeURI != null) {
// If we are here due to promote mirror action then
// delete the volume that was supposed to be promoted volume.
Volume volume = _dbClient.queryObject(Volume.class, promotedVolumeURI);
_dbClient.removeObject(volume);
}
String opName = ResourceOperationTypeEnum.DETACH_VPLEX_LOCAL_MIRROR.getName();
ServiceError serviceError = VPlexApiException.errors.detachMirrorFailed(opName, ex);
recordBourneVplexMirrorEvent(vplexMirrorURI, OperationTypeEnum.DETACH_VOLUME_MIRROR.getEvType(true), Operation.Status.error, OperationTypeEnum.DETACH_VOLUME_MIRROR.getDescription());
WorkflowStepCompleter.stepFailed(stepId, serviceError);
}
}
use of com.emc.storageos.vplex.api.VPlexApiException in project coprhd-controller by CoprHD.
the class VPlexDeviceController method migrateVirtualVolume.
/**
* Creates and starts a VPlex data migration for the passed virtual volume
* on the passed VPlex storage system. The passed target is a newly created
* backend volume to which the data will be migrated. The source for the
* data migration is the current backend volume for the virtual volume that
* is in the same varray as the passed target. The method also creates
* a migration job to monitor the progress of the migration. The workflow
* step will complete when the migration completes, at which point the
* migration is automatically committed.
*
* @param vplexURI
* The URI of the VPlex storage system.
* @param virtualVolumeURI
* The URI of the virtual volume.
* @param targetVolumeURI
* The URI of the migration target.
* @param migrationURI
* The URI of the migration.
* @param newNhURI
* The URI of the new varray for the virtual volume
* when a local virtual volume is being migrated to the other
* cluster, or null.
* @param stepId
* The workflow step identifier.
* @throws WorkflowException
*/
public void migrateVirtualVolume(URI vplexURI, URI virtualVolumeURI, URI targetVolumeURI, URI migrationURI, URI newNhURI, String stepId) throws WorkflowException {
_log.info("Migration {} using target {}", migrationURI, targetVolumeURI);
try {
// Update step state to executing.
WorkflowStepCompleter.stepExecuting(stepId);
// Initialize the step data. The step data indicates if we
// successfully started the migration and is used in
// rollback.
_workflowService.storeStepData(stepId, Boolean.FALSE);
// Get the virtual volume.
Volume virtualVolume = getDataObject(Volume.class, virtualVolumeURI, _dbClient);
String virtualVolumeName = virtualVolume.getDeviceLabel();
_log.info("Virtual volume name is {}", virtualVolumeName);
// Setup the native volume info for the migration target.
Volume migrationTarget = getDataObject(Volume.class, targetVolumeURI, _dbClient);
StorageSystem targetStorageSystem = getDataObject(StorageSystem.class, migrationTarget.getStorageController(), _dbClient);
_log.info("Storage system for migration target is {}", migrationTarget.getStorageController());
List<String> itls = VPlexControllerUtils.getVolumeITLs(migrationTarget);
VolumeInfo nativeVolumeInfo = new VolumeInfo(targetStorageSystem.getNativeGuid(), targetStorageSystem.getSystemType(), migrationTarget.getWWN().toUpperCase().replaceAll(":", ""), migrationTarget.getNativeId(), migrationTarget.getThinlyProvisioned().booleanValue(), itls);
// Get the migration associated with the target.
Migration migration = getDataObject(Migration.class, migrationURI, _dbClient);
// Determine the unique name for the migration. We identifying
// the migration source and target, using array serial number
// and volume native id, in the migration name. This was fine
// for VPlex extent migration, which has a max length of 63
// for the migration name. However, for remote migrations,
// which require VPlex device migration, the max length is much
// more restrictive, like 20 characters. So, we switched over
// timestamps.
StringBuilder migrationNameBuilder = new StringBuilder(MIGRATION_NAME_PREFIX);
DateFormat dateFormatter = new SimpleDateFormat(MIGRATION_NAME_DATE_FORMAT);
migrationNameBuilder.append(dateFormatter.format(new Date()));
String migrationName = migrationNameBuilder.toString();
migration.setLabel(migrationName);
_dbClient.updateObject(migration);
_log.info("Migration name is {}", migrationName);
// Get the VPlex API client.
StorageSystem vplexSystem = getDataObject(StorageSystem.class, vplexURI, _dbClient);
VPlexApiClient client = getVPlexAPIClient(_vplexApiFactory, vplexSystem, _dbClient);
_log.info("Got VPlex API client for VPlex {}", vplexURI);
// Get the configured migration speed
String speed = customConfigHandler.getComputedCustomConfigValue(CustomConfigConstants.MIGRATION_SPEED, vplexSystem.getSystemType(), null);
_log.info("Migration speed is {}", speed);
String transferSize = migrationSpeedToTransferSizeMap.get(speed);
// Make a call to the VPlex API client to migrate the virtual
// volume. Note that we need to do a remote migration when a
// local virtual volume is being migrated to the other VPlex
// cluster. If the passed new varray is not null, then
// this is the case.
Boolean isRemoteMigration = newNhURI != null;
// We support both device and extent migrations, however,
// when we don't know anything about the backend volumes
// we must use device migration.
Boolean useDeviceMigration = migration.getSource() == null;
List<VPlexMigrationInfo> migrationInfoList = client.migrateVirtualVolume(migrationName, virtualVolumeName, Arrays.asList(nativeVolumeInfo), isRemoteMigration, useDeviceMigration, true, true, transferSize);
_log.info("Started VPlex migration");
// We store step data indicating that the migration was successfully
// create and started. We will use this to determine the behavior
// on rollback. If we never got to the point that the migration
// was created and started, then there is no rollback to attempt
// on the VLPEX as the migrate API already tried to clean everything
// up on the VLPEX.
_workflowService.storeStepData(stepId, Boolean.TRUE);
// Initialize the migration info in the database.
VPlexMigrationInfo migrationInfo = migrationInfoList.get(0);
migration.setMigrationStatus(VPlexMigrationInfo.MigrationStatus.READY.getStatusValue());
migration.setPercentDone("0");
migration.setStartTime(migrationInfo.getStartTime());
_dbClient.updateObject(migration);
_log.info("Update migration info");
// Create a migration task completer and queue a job to monitor
// the migration progress. The completer will be invoked by the
// job when the migration completes.
MigrationTaskCompleter migrationCompleter = new MigrationTaskCompleter(migrationURI, stepId);
VPlexMigrationJob migrationJob = new VPlexMigrationJob(migrationCompleter);
migrationJob.setTimeoutTimeMsec(MINUTE_TO_MILLISECONDS * Long.valueOf(ControllerUtils.getPropertyValueFromCoordinator(coordinator, CONTROLLER_VPLEX_MIGRATION_TIMEOUT_MINUTES)));
ControllerServiceImpl.enqueueJob(new QueueJob(migrationJob));
_log.info("Queued job to monitor migration progress.");
} catch (VPlexApiException vae) {
_log.error("Exception migrating VPlex virtual volume: " + vae.getMessage(), vae);
WorkflowStepCompleter.stepFailed(stepId, vae);
} catch (Exception ex) {
_log.error("Exception migrating VPlex virtual volume: " + ex.getMessage(), ex);
String opName = ResourceOperationTypeEnum.MIGRATE_VIRTUAL_VOLUME.getName();
ServiceError serviceError = VPlexApiException.errors.migrateVirtualVolume(opName, ex);
WorkflowStepCompleter.stepFailed(stepId, serviceError);
}
}
use of com.emc.storageos.vplex.api.VPlexApiException in project coprhd-controller by CoprHD.
the class VPlexDeviceController method exportGroupDelete.
/*
* (non-Javadoc)
*
* @see com.emc.storageos.volumecontroller.impl.vplex.VplexController#exportGroupDelete(java.net.URI, java.net.URI,
* java.lang.String)
*/
@Override
public void exportGroupDelete(URI vplex, URI export, String opId) throws ControllerException {
ExportDeleteCompleter completer = null;
try {
_log.info("Entering exportGroupDelete");
WorkflowStepCompleter.stepExecuting(opId);
completer = new ExportDeleteCompleter(export, opId);
StorageSystem vplexSystem = getDataObject(StorageSystem.class, vplex, _dbClient);
ExportGroup exportGroup = null;
try {
exportGroup = getDataObject(ExportGroup.class, export, _dbClient);
} catch (VPlexApiException ve) {
// This exception is caught specifically to handle rollback
// cases. The export group will be marked inactive before this
// method is called hence it will always throw this exception in
// rollback scenarios. Hence this exception is caught as storage
// view will be already deleted due to rollback steps.
completer.ready(_dbClient);
return;
}
_log.info("Attempting to delete ExportGroup " + exportGroup.getGeneratedName() + " on VPLEX " + vplexSystem.getLabel());
Workflow workflow = _workflowService.getNewWorkflow(this, "exportGroupDelete", false, opId);
StringBuffer errorMessages = new StringBuffer();
boolean isValidationNeeded = validatorConfig.isValidationEnabled() && !ExportUtils.checkIfExportGroupIsRP(exportGroup);
_log.info("Orchestration level validation needed : {}", isValidationNeeded);
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, vplex);
if (exportMasks.isEmpty()) {
throw VPlexApiException.exceptions.exportGroupDeleteFailedNull(vplex.toString());
}
// back a failed export group creation.
if (!exportGroupMasksContainExportGroupVolume(exportGroup, exportMasks)) {
for (ExportMask exportMask : exportMasks) {
exportGroup.removeExportMask(exportMask.getId());
}
_dbClient.updateObject(exportGroup);
completer.ready(_dbClient);
return;
}
// Add a steps to remove exports on the VPlex.
List<URI> exportMaskUris = new ArrayList<URI>();
List<URI> volumeUris = new ArrayList<URI>();
String storageViewStepId = null;
VPlexApiClient client = getVPlexAPIClient(_vplexApiFactory, vplex, _dbClient);
for (ExportMask exportMask : exportMasks) {
if (exportMask.getStorageDevice().equals(vplex)) {
String vplexClusterName = VPlexUtil.getVplexClusterName(exportMask, vplex, client, _dbClient);
VPlexStorageViewInfo storageView = client.getStorageView(vplexClusterName, exportMask.getMaskName());
_log.info("Refreshing ExportMask {}", exportMask.getMaskName());
Map<String, String> targetPortToPwwnMap = VPlexControllerUtils.getTargetPortToPwwnMap(client, vplexClusterName);
VPlexControllerUtils.refreshExportMask(_dbClient, storageView, exportMask, targetPortToPwwnMap, _networkDeviceController);
// assemble a list of other ExportGroups that reference this ExportMask
List<ExportGroup> otherExportGroups = ExportUtils.getOtherExportGroups(exportGroup, exportMask, _dbClient);
boolean existingVolumes = exportMask.hasAnyExistingVolumes();
boolean existingInitiators = exportMask.hasAnyExistingInitiators();
boolean removeVolumes = false;
List<URI> volumeURIList = new ArrayList<URI>();
if (!otherExportGroups.isEmpty()) {
if (exportGroup.getVolumes() != null) {
for (String volUri : exportGroup.getVolumes().keySet()) {
volumeURIList.add(URI.create(volUri));
}
}
volumeURIList = getVolumeListDiff(exportGroup, exportMask, otherExportGroups, volumeURIList);
if (!volumeURIList.isEmpty()) {
removeVolumes = true;
}
} else if (existingVolumes || existingInitiators) {
// initiators.
if (existingVolumes) {
_log.info("Storage view will not be deleted because Export Mask {} has existing volumes: {}", exportMask.getMaskName(), exportMask.getExistingVolumes());
}
if (existingInitiators) {
_log.info("Storage view will not be deleted because Export Mask {} has existing initiators: {}", exportMask.getMaskName(), exportMask.getExistingInitiators());
}
if (exportMask.getUserAddedVolumes() != null && !exportMask.getUserAddedVolumes().isEmpty()) {
StringMap volumes = exportMask.getUserAddedVolumes();
if (volumes != null) {
for (String vol : volumes.values()) {
URI volumeURI = URI.create(vol);
volumeURIList.add(volumeURI);
}
}
if (!volumeURIList.isEmpty()) {
removeVolumes = true;
}
}
} else {
_log.info("creating a deleteStorageView workflow step for " + exportMask.getMaskName());
String exportMaskDeleteStep = workflow.createStepId();
Workflow.Method storageViewExecuteMethod = deleteStorageViewMethod(vplex, exportGroup.getId(), exportMask.getId(), false);
storageViewStepId = workflow.createStep(DELETE_STORAGE_VIEW, String.format("Delete VPLEX Storage View %s for ExportGroup %s", exportMask.getMaskName(), export), storageViewStepId, vplexSystem.getId(), vplexSystem.getSystemType(), this.getClass(), storageViewExecuteMethod, null, exportMaskDeleteStep);
}
if (removeVolumes) {
_log.info("removing volumes: " + volumeURIList);
Workflow.Method method = ExportWorkflowEntryPoints.exportRemoveVolumesMethod(vplexSystem.getId(), export, volumeURIList);
storageViewStepId = workflow.createStep("removeVolumes", String.format("Removing volumes from export on storage array %s (%s)", vplexSystem.getNativeGuid(), vplexSystem.getId().toString()), storageViewStepId, NullColumnValueGetter.getNullURI(), vplexSystem.getSystemType(), ExportWorkflowEntryPoints.class, method, null, null);
}
_log.info("determining which volumes to remove from ExportMask " + exportMask.getMaskName());
exportMaskUris.add(exportMask.getId());
for (URI volumeUri : ExportMaskUtils.getVolumeURIs(exportMask)) {
if (exportGroup.hasBlockObject(volumeUri)) {
volumeUris.add(volumeUri);
_log.info(" this ExportGroup volume is a match: " + volumeUri);
} else {
_log.info(" this ExportGroup volume is not in this export mask, so skipping: " + volumeUri);
}
}
}
}
if (!exportMaskUris.isEmpty()) {
_log.info("exportGroupDelete export mask URIs: " + exportMaskUris);
_log.info("exportGroupDelete volume URIs: " + volumeUris);
String zoningStep = workflow.createStepId();
List<NetworkZoningParam> zoningParams = NetworkZoningParam.convertExportMasksToNetworkZoningParam(export, exportMaskUris, _dbClient);
Workflow.Method zoningExecuteMethod = _networkDeviceController.zoneExportMasksDeleteMethod(zoningParams, volumeUris);
workflow.createStep(ZONING_STEP, String.format("Delete ExportMasks %s for VPlex %s", export, vplex), storageViewStepId, nullURI, "network-system", _networkDeviceController.getClass(), zoningExecuteMethod, null, zoningStep);
}
String message = errorMessages.toString();
if (isValidationNeeded && !message.isEmpty()) {
_log.error("Error Message {}", errorMessages);
throw DeviceControllerException.exceptions.deleteExportGroupValidationError(exportGroup.forDisplay(), vplexSystem.forDisplay(), message);
}
// Start the workflow
workflow.executePlan(completer, "Successfully deleted ExportMasks for ExportGroup: " + export);
} catch (Exception ex) {
_log.error("Exception deleting ExportGroup: " + ex.getMessage());
String opName = ResourceOperationTypeEnum.DELETE_EXPORT_GROUP.getName();
ServiceError serviceError = VPlexApiException.errors.exportGroupDeleteFailed(opName, ex);
failStep(completer, opId, serviceError);
}
}
Aggregations