use of com.emc.storageos.workflow.WorkflowException in project coprhd-controller by CoprHD.
the class ControllerWorkflowCleanupHandler method completeWorkflow.
private void completeWorkflow(URI workflowId) {
URIQueryResultList stepURIs = new URIQueryResultList();
dbClient.queryByConstraint(ContainmentConstraint.Factory.getWorkflowWorkflowStepConstraint(workflowId), stepURIs);
for (URI stepURI : stepURIs) {
WorkflowStep step = dbClient.queryObject(WorkflowStep.class, stepURI);
String state = step.getState();
List<String> activeStepStates = Arrays.asList(StepState.CREATED.toString(), StepState.BLOCKED.toString(), StepState.QUEUED.toString(), StepState.EXECUTING.toString());
if (activeStepStates.contains(state)) {
WorkflowException ex = WorkflowException.exceptions.workflowTerminatedForFailover(workflowId.toString());
log.info("Terminate workflow step {}", step.getId());
WorkflowService.completerStepErrorWithoutRollback(step.getStepId(), ex);
}
}
}
use of com.emc.storageos.workflow.WorkflowException in project coprhd-controller by CoprHD.
the class ExportWorkflowUtils method generateExportGroupUpdateWorkflow.
/**
* Creates the workflow for one export mask (storage system) for an update export
* group call. It creates a single step in the main workflow that wraps a workflow
* with necessary steps to:
* <ol>
* <li>add block objects (volumes/snapshots)</li>
* <li>remove volumes</li>
* <li>add initiators</li>
* <li>remove initiators</li>
* </ol>
* The steps are created based on the diff between the current and the requested for
* the storage system export mask
*
* @param workflow the main workflow
* @param wfGroupId the workflow group Id, if any
* @param waitFor the id of a step on which this workflow has to wait, if any
* @param exportGroupUri the export group being updated
* @param exportMask the export mask for the storage system
* @param addedBlockObjects the map of block objects to be added
* @param removedBlockObjects the map of block objects to be removed
* @param addedInitiators the new list of initiators to be added
* @param removedInitiators the new list of initiators to be removed
* @param blockStorageControllerUri the block storage controller. This will always
* be used for adding/removing initiators as we
* do not want a protection controller doing this.
* @param workFlowList holds workflow and sub workflow instances to release all locks during failure
* @param storageUri the storage controller used to perform the export update.
* This can be either a block storage controller or protection
* controller.
* @return the id of the wrapper step that was added to main workflow
* @throws IOException
* @throws WorkflowException
* @throws WorkflowRestartedException
*/
public String generateExportGroupUpdateWorkflow(Workflow workflow, String wfGroupId, String waitFor, URI exportGroupUri, ExportMask exportMask, Map<URI, Integer> addedBlockObjects, Map<URI, Integer> removedBlockObjects, List<URI> addedInitiators, List<URI> removedInitiators, URI blockStorageControllerUri, List<Workflow> workflowList) throws IOException, WorkflowException, WorkflowRestartedException {
// Filter the addedInitiators for non VPLEX system by the Export Group varray.
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupUri);
addedInitiators = ExportUtils.filterNonVplexInitiatorsByExportGroupVarray(exportGroup, addedInitiators, blockStorageControllerUri, _dbClient);
if (allCollectionsAreEmpty(addedBlockObjects, removedBlockObjects, addedInitiators, removedInitiators)) {
_log.info(String.format("There is no export updated required for %s", blockStorageControllerUri.toString()));
return null;
}
// We would rather the task be the child stepID of the parent workflow's stepID.
// This helps us to preserve parent/child relationships.
String exportGroupUpdateStepId = workflow.createStepId();
Workflow storageWorkflow = newWorkflow("storageSystemExportGroupUpdate", false, exportGroupUpdateStepId);
workflowList.add(storageWorkflow);
DiscoveredSystemObject storageSystem = getStorageSystem(_dbClient, blockStorageControllerUri);
String stepId = null;
// We willLock the host/storage system duples necessary for the workflows.
// There are two possibilities about locking. Here we just generate the lockKeys.
List<URI> lockedInitiatorURIs = new ArrayList<URI>();
lockedInitiatorURIs.addAll(addedInitiators);
lockedInitiatorURIs.addAll(StringSetUtil.stringSetToUriList(exportGroup.getInitiators()));
List<String> lockKeys = ControllerLockingUtil.getHostStorageLockKeys(_dbClient, ExportGroup.ExportGroupType.valueOf(exportGroup.getType()), lockedInitiatorURIs, blockStorageControllerUri);
// are getting replaced.
if (addedInitiators != null && !addedInitiators.isEmpty()) {
stepId = generateExportGroupAddInitiators(storageWorkflow, null, stepId, exportGroupUri, blockStorageControllerUri, addedInitiators);
}
if (removedInitiators != null && !removedInitiators.isEmpty()) {
stepId = generateExportGroupRemoveInitiators(storageWorkflow, null, stepId, exportGroupUri, blockStorageControllerUri, removedInitiators);
}
// ends being a problem, we would need to tackle this issue
if (removedBlockObjects != null && !removedBlockObjects.isEmpty()) {
Map<URI, Integer> objectsToRemove = new HashMap<URI, Integer>(removedBlockObjects);
ProtectionExportController protectionExportController = getProtectionExportController();
stepId = protectionExportController.addStepsForExportGroupRemoveVolumes(storageWorkflow, null, stepId, exportGroupUri, objectsToRemove, blockStorageControllerUri);
if (!objectsToRemove.isEmpty()) {
// Unexport the remaining block objects.
_log.info(String.format("Generating exportGroupRemoveVolumes step for objects %s associated with storage system [%s]", objectsToRemove, blockStorageControllerUri));
List<URI> objectsToRemoveList = new ArrayList<URI>(objectsToRemove.keySet());
stepId = generateExportGroupRemoveVolumes(storageWorkflow, null, stepId, blockStorageControllerUri, exportGroupUri, objectsToRemoveList);
}
}
if (addedBlockObjects != null && !addedBlockObjects.isEmpty()) {
Map<URI, Integer> objectsToAdd = new HashMap<URI, Integer>(addedBlockObjects);
ProtectionExportController protectionExportController = getProtectionExportController();
stepId = protectionExportController.addStepsForExportGroupAddVolumes(storageWorkflow, null, stepId, exportGroupUri, objectsToAdd, blockStorageControllerUri);
if (!objectsToAdd.isEmpty()) {
// Export the remaining block objects.
_log.info(String.format("Generating exportGroupAddVolumes step for objects %s associated with storage system [%s]", objectsToAdd.keySet(), blockStorageControllerUri));
stepId = generateExportGroupAddVolumes(storageWorkflow, null, stepId, blockStorageControllerUri, exportGroupUri, objectsToAdd);
}
}
boolean addObject = (addedInitiators != null && !addedInitiators.isEmpty()) || (addedBlockObjects != null && !addedBlockObjects.isEmpty());
if (exportMask == null && addObject) {
// recreate export mask only for add initiator/volume
if (addedInitiators == null) {
addedInitiators = new ArrayList<URI>();
}
if (addedInitiators.isEmpty()) {
addedInitiators.addAll(getInitiators(exportGroup));
}
// Add block volumes already in the export group
if (exportGroup.getVolumes() != null) {
for (String key : exportGroup.getVolumes().keySet()) {
BlockObject bobject = BlockObject.fetch(_dbClient, URI.create(key));
if (bobject.getStorageController().equals(blockStorageControllerUri)) {
addedBlockObjects.put(URI.create(key), Integer.valueOf(exportGroup.getVolumes().get(key)));
}
}
}
// Acquire locks for the parent workflow.
boolean acquiredLocks = getWorkflowService().acquireWorkflowLocks(workflow, lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
if (!acquiredLocks) {
throw DeviceControllerException.exceptions.failedToAcquireLock(lockKeys.toString(), "ExportMaskUpdate: " + exportGroup.getLabel());
}
Map<URI, Integer> objectsToAdd = new HashMap<URI, Integer>(addedBlockObjects);
ProtectionExportController protectionController = getProtectionExportController();
waitFor = protectionController.addStepsForExportGroupCreate(workflow, wfGroupId, waitFor, exportGroupUri, objectsToAdd, blockStorageControllerUri, addedInitiators);
if (!objectsToAdd.isEmpty()) {
// There are no export BlockObjects tied to the current storage system that have an associated protection
// system. We can just create a step to call the block controller directly for export group create.
_log.info(String.format("Generating exportGroupCreate steps for objects %s associated with storage system [%s]", objectsToAdd, blockStorageControllerUri));
// Add the new block objects to the existing ones and send all down
waitFor = generateExportGroupCreateWorkflow(workflow, wfGroupId, waitFor, blockStorageControllerUri, exportGroupUri, addedBlockObjects, addedInitiators);
}
return waitFor;
}
try {
// Acquire locks for the storageWorkflow which is started just below.
boolean acquiredLocks = getWorkflowService().acquireWorkflowLocks(storageWorkflow, lockKeys, LockTimeoutValue.get(LockType.EXPORT_GROUP_OPS));
if (!acquiredLocks) {
throw DeviceControllerException.exceptions.failedToAcquireLock(lockKeys.toString(), "ExportMaskUpdate: " + exportMask.getMaskName());
}
// There will not be a rollback step for the overall update instead
// the code allows the user to retry update as needed.
Workflow.Method method = ExportWorkflowEntryPoints.exportGroupUpdateMethod(blockStorageControllerUri, exportGroupUri, storageWorkflow);
return newWorkflowStep(workflow, wfGroupId, String.format("Updating export (%s) on storage array %s", exportGroupUri, storageSystem.getNativeGuid()), storageSystem, method, null, waitFor, exportGroupUpdateStepId);
} catch (Exception ex) {
getWorkflowService().releaseAllWorkflowLocks(storageWorkflow);
throw ex;
}
}
use of com.emc.storageos.workflow.WorkflowException in project coprhd-controller by CoprHD.
the class AbstractConsistencyGroupManager method deleteCG.
/**
* Deletes the consistency group with the passed URI on the VPLEX storage
* system with the passed URU.
*
* @param vplexSystemURI The URI of the VPlex system.
* @param cgUri The URI of the ViPR consistency group.
* @param cgName The name of the VPlex consistency group to delete.
* @param clusterName The name of the VPlex cluster.
* @param setInactive true to mark the CG for deletion.
* @param stepId The workflow step identifier.
*
* @throws WorkflowException When an error occurs updating the work step
* state.
*/
public void deleteCG(URI vplexSystemURI, URI cgUri, String cgName, String clusterName, Boolean setInactive, String stepId) throws WorkflowException {
StorageSystem vplexSystem = null;
try {
// Update step state to executing.
WorkflowStepCompleter.stepExecuting(stepId);
log.info(String.format("Executing workflow step deleteCG. Storage System: %s, CG Name: %s, Cluster Name: %s", vplexSystemURI, cgName, clusterName));
vplexSystem = getDataObject(StorageSystem.class, vplexSystemURI, dbClient);
VPlexApiClient client = getVPlexAPIClient(vplexApiFactory, vplexSystem, dbClient);
log.info("Got VPlex API client for VPlex system {}", vplexSystemURI);
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_085);
// Make a call to the VPlex API client to delete the consistency group.
client.deleteConsistencyGroup(cgName);
log.info(String.format("Deleted consistency group %s", cgName));
cleanUpVplexCG(vplexSystemURI, cgUri, cgName, setInactive);
// Update step status to success.
WorkflowStepCompleter.stepSucceded(stepId);
} catch (VPlexApiException vae) {
if (vae.getServiceCode().getCode() == ServiceCode.VPLEX_CG_NOT_FOUND.getCode()) {
log.info(String.format("Consistency group %s not found on storage system: %s. Assumed already deleted.", clusterName + ":" + cgName, (vplexSystem != null && vplexSystem.forDisplay() != null) ? vplexSystem.forDisplay() : vplexSystemURI));
cleanUpVplexCG(vplexSystemURI, cgUri, cgName, setInactive);
WorkflowStepCompleter.stepSucceded(stepId);
} else {
log.error("Exception deleting consistency group: " + vae.getMessage(), vae);
WorkflowStepCompleter.stepFailed(stepId, vae);
}
} catch (Exception ex) {
log.error("Exception deleting consistency group: " + ex.getMessage(), ex);
String opName = ResourceOperationTypeEnum.DELETE_CONSISTENCY_GROUP.getName();
ServiceError serviceError = VPlexApiException.errors.deleteCGFailed(opName, ex);
WorkflowStepCompleter.stepFailed(stepId, serviceError);
}
}
use of com.emc.storageos.workflow.WorkflowException in project coprhd-controller by CoprHD.
the class VPlexDeviceController method commitMigration.
/**
* Invoked by the migration workflow to commit the migration after it has
* been completed.
*
* @param vplexURI
* The URI of the VPlex storage system.
* @param virtualVolumeURI
* The URI of the virtual volume.
* @param migrationURI
* The URI of the data migration.
* @param rename
* Indicates if the volume should be renamed after commit to
* conform to ViPR standard naming conventions.
* @param newVpoolURI - the new virtual pool for the virtual volume (or null if not changing)
* @param newVarrayURI - the new varray for the virtual volume (or null if not changing)
* @param stepId
* The workflow step identifier.
*
* @throws WorkflowException
*/
public void commitMigration(URI vplexURI, URI virtualVolumeURI, URI migrationURI, Boolean rename, URI newVpoolURI, URI newVarrayURI, String stepId) throws WorkflowException {
_log.info("Committing migration {}", migrationURI);
Migration migration = null;
VPlexApiClient client = null;
try {
// Update step state to executing.
WorkflowStepCompleter.stepExecuting(stepId);
// Get the migration.
migration = getDataObject(Migration.class, migrationURI, _dbClient);
// workflow, so check the status.
if (!VPlexMigrationInfo.MigrationStatus.COMMITTED.getStatusValue().equals(migration.getMigrationStatus())) {
// Get the VPlex API client.
StorageSystem vplexSystem = getDataObject(StorageSystem.class, vplexURI, _dbClient);
client = getVPlexAPIClient(_vplexApiFactory, vplexSystem, _dbClient);
_log.info("Got VPlex API client for system {}", vplexURI);
// Make a call to the VPlex API client to commit the migration.
// Note that for ingested VPLEX volumes created outside ViPR, we
// don't want to update the name.
List<VPlexMigrationInfo> migrationInfoList = new ArrayList<VPlexMigrationInfo>();
Volume virtualVolume = getDataObject(Volume.class, virtualVolumeURI, _dbClient);
try {
migrationInfoList = client.commitMigrations(virtualVolume.getDeviceLabel(), Arrays.asList(migration.getLabel()), true, true, rename.booleanValue());
_log.info("Committed migration {}", migration.getLabel());
} catch (VPlexApiException vae) {
_log.error("Exception committing VPlex migration: " + vae.getMessage(), vae);
boolean committed = false;
// Check the migration status. Maybe it committed even though we had an error.
VPlexMigrationInfo migrationInfo = client.getMigrationInfo(migration.getLabel());
if (migrationInfo.getStatus().equalsIgnoreCase(VPlexMigrationInfo.MigrationStatus.COMMITTED.name())) {
_log.info("Migration {} has committed despite exception", migration.getLabel());
migrationInfoList.clear();
migrationInfoList.add(migrationInfo);
committed = true;
} else {
_log.info("Migration {} status {}", migration.getLabel(), migrationInfo.getStatus());
}
if (!committed) {
// This was observed at customer site COP-21257
if (vae.getServiceCode() == ServiceCode.VPLEX_API_RESPONSE_TIMEOUT_ERROR) {
// We are going to throw an error, but we don't want to rollback completely
_workflowService.setWorkflowRollbackContOnError(stepId, false);
}
WorkflowStepCompleter.stepFailed(stepId, vae);
return;
}
}
// Below this point migration is committed, no turning back.
// Initialize the migration info in the database.
migration.setMigrationStatus(VPlexMigrationInfo.MigrationStatus.COMMITTED.getStatusValue());
_dbClient.updateObject(migration);
_log.info("Update migration status to committed");
// Update the virtual volume native id and associated
// volumes. Note that we don't update CoS until all
// commits are successful.
VPlexVirtualVolumeInfo updatedVirtualVolumeInfo = migrationInfoList.get(0).getVirtualVolumeInfo();
// update any properties that were changed after migration including deviceLabel, nativeGuid, and nativeId.
// also, if the updated volume isn't thin-enabled, it is thin-capable, and the target vpool supports thin
// provisioning, then a call should be made to the VPLEX to flip the thin-enabled flag on for this volume.
URI targetVolumeUri = migration.getTarget();
Volume targetVolume = getDataObject(Volume.class, targetVolumeUri, _dbClient);
if (updatedVirtualVolumeInfo != null) {
_log.info(String.format("New virtual volume is %s", updatedVirtualVolumeInfo.toString()));
// if the new virtual volume is thin-capable, but thin-enabled is not true,
// that means we need to ask the VPLEX to convert it to a thin-enabled volume.
// this doesn't happen automatically for thick-to-thin data migrations.
boolean isThinEnabled = updatedVirtualVolumeInfo.isThinEnabled();
if (!isThinEnabled && VPlexApiConstants.TRUE.equalsIgnoreCase(updatedVirtualVolumeInfo.getThinCapable())) {
if (verifyVplexSupportsThinProvisioning(vplexSystem)) {
if (null != targetVolume) {
_log.info(String.format("migration target Volume is %s", targetVolume.forDisplay()));
VirtualPool targetVirtualPool = getDataObject(VirtualPool.class, targetVolume.getVirtualPool(), _dbClient);
if (null != targetVirtualPool) {
_log.info(String.format("migration target VirtualPool is %s", targetVirtualPool.forDisplay()));
boolean doEnableThin = VirtualPool.ProvisioningType.Thin.toString().equalsIgnoreCase(targetVirtualPool.getSupportedProvisioningType());
if (doEnableThin) {
_log.info(String.format("the new VirtualPool is thin, requesting VPLEX to enable thin provisioning on %s", updatedVirtualVolumeInfo.getName()));
isThinEnabled = client.setVirtualVolumeThinEnabled(updatedVirtualVolumeInfo);
}
}
}
}
}
virtualVolume.setDeviceLabel(updatedVirtualVolumeInfo.getName());
virtualVolume.setNativeId(updatedVirtualVolumeInfo.getPath());
virtualVolume.setNativeGuid(updatedVirtualVolumeInfo.getPath());
virtualVolume.setThinlyProvisioned(isThinEnabled);
}
// Note that for ingested volumes, there will be no associated volumes
// at first.
StringSet assocVolumes = virtualVolume.getAssociatedVolumes();
if ((assocVolumes != null) && (!assocVolumes.isEmpty())) {
// For a distributed volume, there could be multiple
// migrations. When the first completes, there will
// be no associated volumes. However, when the second
// completes, there will be associated volumes. However,
// the migration source could be null.
URI sourceVolumeUri = migration.getSource();
if (sourceVolumeUri != null) {
assocVolumes.remove(sourceVolumeUri.toString());
// Retain any previous RP fields on the new target volume.
Volume sourceVolume = getDataObject(Volume.class, sourceVolumeUri, _dbClient);
if (sourceVolume != null) {
boolean targetUpdated = false;
if (NullColumnValueGetter.isNotNullValue(sourceVolume.getRpCopyName())) {
targetVolume.setRpCopyName(sourceVolume.getRpCopyName());
targetUpdated = true;
}
if (NullColumnValueGetter.isNotNullValue(sourceVolume.getInternalSiteName())) {
targetVolume.setInternalSiteName(sourceVolume.getInternalSiteName());
targetUpdated = true;
}
if (targetUpdated) {
_dbClient.updateObject(targetVolume);
}
}
}
assocVolumes.add(migration.getTarget().toString());
} else {
// NOTE: Now an ingested volume will have associated volumes.
// It will no longer be considered an ingested volume.
assocVolumes = new StringSet();
assocVolumes.add(migration.getTarget().toString());
virtualVolume.setAssociatedVolumes(assocVolumes);
}
updateMigratedVirtualVolumeVpoolAndVarray(virtualVolume, newVpoolURI, newVarrayURI);
_dbClient.updateObject(virtualVolume);
_log.info("Updated virtual volume.");
} else {
_log.info("The migration is already committed.");
// Note that we don't set the device label and native id. If the
// migration was committed outside of Bourne, the virtual volume
// will still have the old name. If it was committed through
// Bourne, these values would already have been update.
// Regardless, we have to update the vpool, and we update the
// associated volumes in case it was committed outside of
// Bourne.
associateVplexVolumeWithMigratedTarget(migration, virtualVolumeURI);
_log.info("Updated virtual volume.");
}
// Update the workflow step status.
StringBuilder successMsgBuilder = new StringBuilder();
successMsgBuilder.append("VPlex System: ");
successMsgBuilder.append(vplexURI);
successMsgBuilder.append(" migration: ");
successMsgBuilder.append(migrationURI);
successMsgBuilder.append(" was committed");
_log.info(successMsgBuilder.toString());
WorkflowStepCompleter.stepSucceded(stepId);
_log.info("Updated workflow step state to success");
} catch (VPlexApiException vae) {
_log.error("Exception committing VPlex migration: " + vae.getMessage(), vae);
WorkflowStepCompleter.stepFailed(stepId, vae);
} catch (Exception ex) {
_log.error("Exception committing VPlex migration: " + ex.getMessage(), ex);
String opName = ResourceOperationTypeEnum.COMMIT_VOLUME_MIGRATION.getName();
ServiceError serviceError = VPlexApiException.errors.commitMigrationFailed(opName, ex);
WorkflowStepCompleter.stepFailed(stepId, serviceError);
}
}
use of com.emc.storageos.workflow.WorkflowException in project coprhd-controller by CoprHD.
the class VPlexDeviceController method storageViewAddInitiators.
/**
* Workflow Step to add initiator to Storage View.
* Note arguments (except stepId) must match storageViewAddInitiatorsMethod above.
*
* @param vplexURI
* -- URI of VPlex StorageSystem
* @param exportURI
* -- ExportGroup URI
* @param maskURI
* -- ExportMask URI. Optional.
* If non-null, only the indicated ExportMask will be processed.
* Otherwise, all ExportMasks will be processed.
* @param initiatorURIs
* -- List of initiator URIs to be added.
* @param targetURIs
* -- optional list of additional targets URIs (VPLEX FE ports) to be added.
* If non null, the targets (VPlex front end ports) indicated by the targetURIs will be added
* to the Storage View.
* @param completer the ExportMaskAddInitiatorCompleter
* @param stepId
* -- Workflow step id.
* @throws WorkflowException
*/
public void storageViewAddInitiators(URI vplexURI, URI exportURI, URI maskURI, List<URI> initiatorURIs, List<URI> targetURIs, boolean sharedExportMask, ExportMaskAddInitiatorCompleter completer, String stepId) throws DeviceControllerException {
try {
WorkflowStepCompleter.stepExecuting(stepId);
ExportOperationContext context = new VplexExportOperationContext();
// Prime the context object
completer.updateWorkflowStepContext(context);
StorageSystem vplex = getDataObject(StorageSystem.class, vplexURI, _dbClient);
ExportGroup exportGroup = getDataObject(ExportGroup.class, exportURI, _dbClient);
VPlexApiClient client = getVPlexAPIClient(_vplexApiFactory, vplex, _dbClient);
List<ExportMask> exportMasks = ExportMaskUtils.getExportMasks(_dbClient, exportGroup, vplexURI);
for (ExportMask exportMask : exportMasks) {
// If a specific ExportMask is to be processed, ignore any others.
if (maskURI != null && !exportMask.getId().equals(maskURI)) {
continue;
}
_log.info("Refreshing ExportMask {}", exportMask.getMaskName());
String vplexClusterName = VPlexUtil.getVplexClusterName(exportMask, vplexURI, client, _dbClient);
VPlexStorageViewInfo storageView = client.getStorageView(vplexClusterName, exportMask.getMaskName());
VPlexControllerUtils.refreshExportMask(_dbClient, storageView, exportMask, VPlexControllerUtils.getTargetPortToPwwnMap(client, vplexClusterName), _networkDeviceController);
// Determine host of ExportMask
Set<URI> exportMaskHosts = VPlexUtil.getExportMaskHosts(_dbClient, exportMask, sharedExportMask);
List<Initiator> inits = _dbClient.queryObject(Initiator.class, initiatorURIs);
if (sharedExportMask) {
for (Initiator initUri : inits) {
URI hostUri = VPlexUtil.getInitiatorHost(initUri);
if (null != hostUri) {
exportMaskHosts.add(hostUri);
}
}
}
// Invoke artificial failure to simulate invalid storageview name on vplex
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_060);
// Add new targets if specified
if (targetURIs != null && targetURIs.isEmpty() == false) {
List<PortInfo> targetPortInfos = new ArrayList<PortInfo>();
List<URI> targetsAddedToStorageView = new ArrayList<URI>();
for (URI target : targetURIs) {
// Do not try to add a port twice.
if (exportMask.getStoragePorts().contains(target.toString())) {
continue;
}
// Log any ports not listed as a target in the Export Masks zoningMap
Set<String> zoningMapTargets = BlockStorageScheduler.getTargetIdsFromAssignments(exportMask.getZoningMap());
if (!zoningMapTargets.contains(target.toString())) {
_log.info(String.format("Target %s not in zoning map", target));
}
// Build the PortInfo structure for the port to be added
StoragePort port = getDataObject(StoragePort.class, target, _dbClient);
PortInfo pi = new PortInfo(port.getPortNetworkId().toUpperCase().replaceAll(":", ""), null, port.getPortName(), null);
targetPortInfos.add(pi);
targetsAddedToStorageView.add(target);
}
if (!targetPortInfos.isEmpty()) {
// Add the targets on the VPLEX
client.addTargetsToStorageView(exportMask.getMaskName(), targetPortInfos);
// Add the targets to the database.
for (URI target : targetsAddedToStorageView) {
exportMask.addTarget(target);
}
}
}
List<PortInfo> initiatorPortInfos = new ArrayList<PortInfo>();
List<String> initiatorPortWwns = new ArrayList<String>();
Map<PortInfo, Initiator> portInfosToInitiatorMap = new HashMap<PortInfo, Initiator>();
for (Initiator initiator : inits) {
// Only add this initiator if it's for the same host as other initiators in mask
if (!exportMaskHosts.contains(VPlexUtil.getInitiatorHost(initiator))) {
continue;
}
// Only add this initiator if it's not in the mask already after refresh
if (exportMask.hasInitiator(initiator.getId().toString())) {
continue;
}
PortInfo portInfo = new PortInfo(initiator.getInitiatorPort().toUpperCase().replaceAll(":", ""), initiator.getInitiatorNode().toUpperCase().replaceAll(":", ""), initiator.getLabel(), getVPlexInitiatorType(initiator));
initiatorPortInfos.add(portInfo);
initiatorPortWwns.add(initiator.getInitiatorPort());
portInfosToInitiatorMap.put(portInfo, initiator);
}
if (!initiatorPortInfos.isEmpty()) {
String lockName = null;
boolean lockAcquired = false;
try {
StringSet portIds = exportMask.getStoragePorts();
StoragePort exportMaskPort = getDataObject(StoragePort.class, URI.create(portIds.iterator().next()), _dbClient);
String clusterId = ConnectivityUtil.getVplexClusterOfPort(exportMaskPort);
lockName = _vplexApiLockManager.getLockName(vplexURI, clusterId);
lockAcquired = _vplexApiLockManager.acquireLock(lockName, LockTimeoutValue.get(LockType.VPLEX_API_LIB));
if (!lockAcquired) {
throw VPlexApiException.exceptions.couldNotObtainConcurrencyLock(vplex.getLabel());
}
// Add the initiators to the VPLEX
client.addInitiatorsToStorageView(exportMask.getMaskName(), vplexClusterName, initiatorPortInfos);
ExportOperationContext.insertContextOperation(completer, VplexExportOperationContext.OPERATION_ADD_INITIATORS_TO_STORAGE_VIEW, initiatorURIs);
} finally {
if (lockAcquired) {
_vplexApiLockManager.releaseLock(lockName);
}
}
}
}
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_003);
completer.ready(_dbClient);
} catch (VPlexApiException vae) {
_log.error("VPlexApiException adding initiator to Storage View: " + vae.getMessage(), vae);
failStep(completer, stepId, vae);
} catch (Exception ex) {
_log.error("Exception adding initiator to Storage View: " + ex.getMessage(), ex);
String opName = ResourceOperationTypeEnum.ADD_STORAGE_VIEW_INITIATOR.getName();
ServiceError serviceError = VPlexApiException.errors.storageViewAddInitiatorFailed(opName, ex);
failStep(completer, stepId, serviceError);
}
}
Aggregations