use of com.emc.storageos.svcs.errorhandling.resources.InternalException in project coprhd-controller by CoprHD.
the class RPDeviceController method exportGroupCreate.
/*
* RPDeviceController.exportGroupCreate()
*
* This method is a mini-orchestration of all of the steps necessary to create an export based on
* a Bourne Snapshot object associated with a RecoverPoint bookmark.
*
* This controller does not service block devices for export, only RP bookmark snapshots.
*
* The method is responsible for performing the following steps:
* - Enable the volumes to a specific bookmark.
* - Call the block controller to export the target volume
*
* @param protectionDevice The RP System used to manage the protection
*
* @param exportgroupID The export group
*
* @param snapshots snapshot list
*
* @param initatorURIs initiators to send to the block controller
*
* @param token The task object
*/
@Override
public void exportGroupCreate(URI protectionDevice, URI exportGroupID, List<URI> initiatorURIs, Map<URI, Integer> snapshots, String token) throws ControllerException {
TaskCompleter taskCompleter = null;
try {
// Grab the RP System information; we'll need it to talk to the RP client
ProtectionSystem rpSystem = getRPSystem(protectionDevice);
taskCompleter = new RPCGExportCompleter(exportGroupID, token);
// Ensure the bookmarks actually exist before creating the export group
searchForBookmarks(protectionDevice, snapshots.keySet());
// Create a new token/taskid and use that in the workflow. Multiple threads entering this method might
// collide with each others
// workflows in cassandra if the taskid is not unique.
String newToken = UUID.randomUUID().toString();
// Set up workflow steps.
Workflow workflow = _workflowService.getNewWorkflow(this, "exportGroupCreate", true, newToken);
// Tasks 1: Activate the bookmarks
//
// Enable image access on the target volumes
addEnableImageAccessStep(workflow, rpSystem, snapshots, null);
// Tasks 2: Export Volumes
//
// Export the volumes associated with the snapshots to the host
addExportSnapshotSteps(workflow, rpSystem, exportGroupID, snapshots, initiatorURIs);
// Execute the plan and allow the WorkflowExecutor to fire the taskCompleter.
String successMessage = String.format("Workflow of Export Group %s successfully created", exportGroupID);
workflow.executePlan(taskCompleter, successMessage);
} catch (InternalException e) {
_log.error("Operation failed with Exception: ", e);
if (taskCompleter != null) {
taskCompleter.error(_dbClient, e);
}
} catch (Exception e) {
_log.error("Operation failed with Exception: ", e);
if (taskCompleter != null) {
taskCompleter.error(_dbClient, DeviceControllerException.errors.jobFailed(e));
}
}
}
use of com.emc.storageos.svcs.errorhandling.resources.InternalException in project coprhd-controller by CoprHD.
the class RPDeviceController method enableImageForSnapshots.
/**
* Enable image access for RP snapshots.
*
* @param protectionDevice
* protection system
* @param storageDevice
* storage device of the backing (parent) volume
* @param snapshotList
* list of snapshots to enable
* @param opId
* task ID
* @return true if operation was successful
* @throws ControllerException
* @throws URISyntaxException
*/
private boolean enableImageForSnapshots(URI protectionDevice, URI storageDevice, List<URI> snapshotList, String opId) throws ControllerException, URISyntaxException {
TaskCompleter completer = null;
try {
_log.info("Activating a bookmark on the RP CG(s)");
completer = new BlockSnapshotActivateCompleter(snapshotList, opId);
ProtectionSystem system = null;
try {
system = _dbClient.queryObject(ProtectionSystem.class, protectionDevice);
} catch (DatabaseException e) {
throw DeviceControllerExceptions.recoverpoint.databaseExceptionActivateSnapshot(protectionDevice);
}
// Verify non-null storage device returned from the database client.
if (system == null) {
throw DeviceControllerExceptions.recoverpoint.databaseExceptionActivateSnapshot(protectionDevice);
}
// is still creating the snapshot
if (snapshotList != null && !snapshotList.isEmpty()) {
BlockSnapshot snapshot = _dbClient.queryObject(BlockSnapshot.class, snapshotList.get(0));
Volume parent = _dbClient.queryObject(Volume.class, snapshot.getParent().getURI());
final List<Volume> vplexVolumes = CustomQueryUtility.queryActiveResourcesByConstraint(_dbClient, Volume.class, getVolumesByAssociatedId(parent.getId().toString()));
if (vplexVolumes != null && !vplexVolumes.isEmpty()) {
parent = vplexVolumes.get(0);
}
String lockName = ControllerLockingUtil.getConsistencyGroupStorageKey(_dbClient, parent.getConsistencyGroup(), system.getId());
if (null != lockName) {
List<String> locks = new ArrayList<String>();
locks.add(lockName);
acquireWorkflowLockOrThrow(_workflowService.getWorkflowFromStepId(opId), locks);
}
}
// Keep a mapping of the emNames(bookmark names) to target copy volume WWNs
Map<String, Set<String>> emNamesToVolumeWWNs = new HashMap<String, Set<String>>();
// Keep a mapping of the emNames(bookmark names) to BlockSnapshot objects
Map<String, Set<URI>> emNamesToSnapshots = new HashMap<String, Set<URI>>();
for (URI snapshotID : snapshotList) {
BlockSnapshot snapshot = _dbClient.queryObject(BlockSnapshot.class, snapshotID);
String emName = snapshot.getEmName();
if (NullColumnValueGetter.isNotNullValue(emName)) {
if (!emNamesToVolumeWWNs.containsKey(emName)) {
emNamesToVolumeWWNs.put(emName, new HashSet<String>());
}
if (!emNamesToSnapshots.containsKey(emName)) {
emNamesToSnapshots.put(emName, new HashSet<URI>());
}
emNamesToSnapshots.get(emName).add(snapshotID);
} else {
throw DeviceControllerExceptions.recoverpoint.failedToActivateSnapshotEmNameMissing(snapshotID);
}
// Get the volume associated with this snapshot
Volume volume = _dbClient.queryObject(Volume.class, snapshot.getParent().getURI());
// Fetch the VPLEX volume that is created with this volume as the back-end volume.
if (Volume.checkForVplexBackEndVolume(_dbClient, volume)) {
volume = Volume.fetchVplexVolume(_dbClient, volume);
}
String wwn = null;
// If the personality is SOURCE, then the enable image access request is part of export operation.
if (volume.checkPersonality(Volume.PersonalityTypes.TARGET.toString())) {
wwn = RPHelper.getRPWWn(volume.getId(), _dbClient);
} else {
// Now determine the target volume that corresponds to the site of the snapshot
ProtectionSet protectionSet = _dbClient.queryObject(ProtectionSet.class, volume.getProtectionSet());
Volume targetVolume = ProtectionSet.getTargetVolumeFromSourceAndInternalSiteName(_dbClient, protectionSet, volume, snapshot.getEmInternalSiteName());
wwn = RPHelper.getRPWWn(targetVolume.getId(), _dbClient);
}
// Add the volume WWN
emNamesToVolumeWWNs.get(emName).add(wwn);
}
// Now enable image access to that bookmark
RecoverPointClient rp = RPHelper.getRecoverPointClient(system);
// correponding to each emName.
for (Map.Entry<String, Set<String>> emNameEntry : emNamesToVolumeWWNs.entrySet()) {
MultiCopyEnableImageRequestParams request = new MultiCopyEnableImageRequestParams();
request.setVolumeWWNSet(emNameEntry.getValue());
request.setBookmark(emNameEntry.getKey());
MultiCopyEnableImageResponse response = rp.enableImageCopies(request);
if (response == null) {
throw DeviceControllerExceptions.recoverpoint.failedEnableAccessOnRP();
}
}
completer.ready(_dbClient);
return true;
} catch (InternalException e) {
_log.error("Operation failed with Exception: ", e);
if (completer != null) {
completer.error(_dbClient, e);
}
throw e;
} catch (URISyntaxException e) {
_log.error("Operation failed with Exception: ", e);
if (completer != null) {
completer.error(_dbClient, DeviceControllerException.errors.invalidURI(e));
}
throw e;
} catch (Exception e) {
_log.error("Operation failed with Exception: ", e);
if (completer != null) {
completer.error(_dbClient, DeviceControllerException.errors.jobFailed(e));
}
throw e;
}
}
use of com.emc.storageos.svcs.errorhandling.resources.InternalException in project coprhd-controller by CoprHD.
the class RPDeviceController method enableImageAccessForCreateReplicaStep.
/**
* Enable image access before create native array replica operation
*
* @param protectionDevice
* @param clazz
* type of replica (such as Volume, BlockSnapshot or BlockSnapshotSession)
* @param copyList
* list of replica ids
* @param bookmarkName
* name of the bookmark created for this operation
* @param volumeWWNs
* wwns of volumes that are parents to replica objects
* @param opId
* @return
* @throws ControllerException
*/
public boolean enableImageAccessForCreateReplicaStep(URI protectionDevice, Class<? extends DataObject> clazz, List<URI> copyList, String bookmarkName, Set<String> volumeWWNs, String opId) throws ControllerException {
TaskCompleter completer = null;
try {
WorkflowStepCompleter.stepExecuting(opId);
_log.info(String.format("Activating bookmark %s on the RP CG(s)", bookmarkName));
completer = new RPCGCopyVolumeCompleter(clazz, copyList, opId);
// Verify non-null storage device returned from the database client.
ProtectionSystem system = _dbClient.queryObject(ProtectionSystem.class, protectionDevice);
if (system == null || system.getInactive()) {
throw DeviceControllerExceptions.recoverpoint.databaseExceptionActivateSnapshot(protectionDevice);
}
// enable image access to that bookmark
RecoverPointClient rp = RPHelper.getRecoverPointClient(system);
MultiCopyEnableImageRequestParams request = new MultiCopyEnableImageRequestParams();
request.setVolumeWWNSet(volumeWWNs);
request.setBookmark(bookmarkName);
MultiCopyEnableImageResponse response = rp.enableImageCopies(request);
if (response == null) {
throw DeviceControllerExceptions.recoverpoint.failedEnableAccessOnRP();
}
completer.ready(_dbClient);
// Update the workflow state.
WorkflowStepCompleter.stepSucceded(opId);
return true;
} catch (InternalException e) {
_log.error("Operation failed with Exception: ", e);
if (completer != null) {
completer.error(_dbClient, e);
}
stepFailed(opId, "enableImageAccessStep: Failed to enable image");
return false;
} catch (Exception e) {
_log.error("Operation failed with Exception: ", e);
if (completer != null) {
completer.error(_dbClient, DeviceControllerException.errors.jobFailed(e));
}
stepFailed(opId, "enableImageAccessStep: Failed to enable image");
return false;
}
}
use of com.emc.storageos.svcs.errorhandling.resources.InternalException in project coprhd-controller by CoprHD.
the class RPDeviceController method performProtectionOperationStep.
/**
* Workflow step to perform RP protection operation
*
* @param protectionSystem
* @param cgId
* @param volId
* @param copyID
* @param pointInTime
* @param imageAccessMode
* @param op
* @param stepId
* @return
* @throws ControllerException
*/
public boolean performProtectionOperationStep(URI protectionSystem, URI cgId, URI volId, URI copyID, String pointInTime, String imageAccessMode, String op, String stepId) throws ControllerException {
WorkflowStepCompleter.stepExecuting(stepId);
try {
ProtectionSystem rpSystem = getRPSystem(protectionSystem);
// Take out a workflow step lock on the CG
_workflowService.getWorkflowFromStepId(stepId);
List<String> lockKeys = new ArrayList<String>();
lockKeys.add(ControllerLockingUtil.getConsistencyGroupStorageKey(_dbClient, cgId, rpSystem.getId()));
boolean lockAcquired = _workflowService.acquireWorkflowStepLocks(stepId, lockKeys, LockTimeoutValue.get(LockType.RP_CG));
if (!lockAcquired) {
throw DeviceControllerException.exceptions.failedToAcquireLock(lockKeys.toString(), String.format("failed to get lock while restoring volumes in RP consistency group: %s", cgId.toString()));
}
// set the protection volume to the source volume if the copyID is null (operation is performed on all
// copies)
// otherwise set it to the volume referenced by the copyID (operation is performed on specifc copy)
Volume protectionVolume = (copyID == null) ? _dbClient.queryObject(Volume.class, volId) : _dbClient.queryObject(Volume.class, copyID);
RecoverPointClient rp = RPHelper.getRecoverPointClient(rpSystem);
RecoverPointVolumeProtectionInfo volumeProtectionInfo = rp.getProtectionInfoForVolume(RPHelper.getRPWWn(protectionVolume.getId(), _dbClient));
if (op.equals(STOP)) {
rp.disableProtection(volumeProtectionInfo);
setProtectionSetStatus(volumeProtectionInfo, ProtectionStatus.DISABLED.toString(), rpSystem);
_log.info("doStopProtection {} - complete", rpSystem.getId());
} else if (op.equals(START)) {
rp.enableProtection(volumeProtectionInfo);
setProtectionSetStatus(volumeProtectionInfo, ProtectionStatus.ENABLED.toString(), rpSystem);
} else if (op.equals(SYNC)) {
Set<String> volumeWWNs = new HashSet<String>();
volumeWWNs.add(RPHelper.getRPWWn(protectionVolume.getId(), _dbClient));
// Create and enable a temporary bookmark for the volume associated with this volume
CreateBookmarkRequestParams request = new CreateBookmarkRequestParams();
request.setVolumeWWNSet(volumeWWNs);
request.setBookmark("Sync-Snapshot");
rp.createBookmarks(request);
} else if (op.equals(PAUSE)) {
rp.pauseTransfer(volumeProtectionInfo);
setProtectionSetStatus(volumeProtectionInfo, ProtectionStatus.PAUSED.toString(), rpSystem);
} else if (op.equals(RESUME)) {
rp.resumeTransfer(volumeProtectionInfo);
setProtectionSetStatus(volumeProtectionInfo, ProtectionStatus.ENABLED.toString(), rpSystem);
} else if (op.equals(FAILOVER_TEST)) {
RPCopyRequestParams copyParams = new RPCopyRequestParams();
copyParams.setCopyVolumeInfo(volumeProtectionInfo);
rp.failoverCopyTest(copyParams);
} else if (op.equals(FAILOVER)) {
// cancel.
if (protectionVolume.getLinkStatus() != null && protectionVolume.getLinkStatus().equalsIgnoreCase(Volume.LinkStatus.FAILED_OVER.name())) {
// TODO: ViPR 2.0 needs to support this.
// TODO BEGIN: allow re-failover perform the same as a failback in 2.0 since the UI support will not
// be there to do a
// swap or cancel.
// Jira CTRL-2773: Once UI adds support for /swap and /failover-cancel, we can remove this and
// replace with an error.
// If protectionVolume is a source, then the "source" sent in must be a target. Verify.
Volume targetVolume = null;
if (protectionVolume.checkPersonality(Volume.PersonalityTypes.SOURCE.toString())) {
targetVolume = _dbClient.queryObject(Volume.class, volId);
} else {
targetVolume = protectionVolume;
}
// Disable the image access that is in effect.
volumeProtectionInfo = rp.getProtectionInfoForVolume(RPHelper.getRPWWn(targetVolume.getId(), _dbClient));
RPCopyRequestParams copyParams = new RPCopyRequestParams();
copyParams.setCopyVolumeInfo(volumeProtectionInfo);
rp.failoverCopyCancel(copyParams);
// Set the flags back to where they belong.
updatePostFailoverCancel(targetVolume);
// TODO END
// Replace with this error: taskCompleter.error(_dbClient, _locker,
// DeviceControllerErrors.recoverpoint.stepFailed("performFailoverOperation: source volume specified
// for failover where target volume specified is not in failover state"));
} else {
// Standard failover case.
RPCopyRequestParams copyParams = new RPCopyRequestParams();
copyParams.setCopyVolumeInfo(volumeProtectionInfo);
if (pointInTime != null) {
// Build a Date reference.
copyParams.setApitTime(TimeUtils.getDateTimestamp(pointInTime));
}
rp.failoverCopy(copyParams);
updatePostFailover(protectionVolume);
}
} else if (op.equals(FAILOVER_CANCEL)) {
// cancel.
if (protectionVolume.checkPersonality(Volume.PersonalityTypes.SOURCE.name())) {
throw DeviceControllerExceptions.recoverpoint.failoverWrongTargetSpecified();
} else {
if (protectionVolume.getLinkStatus() != null && protectionVolume.getLinkStatus().equalsIgnoreCase(Volume.LinkStatus.FAILED_OVER.name())) {
// Disable the image access that is in effect.
volumeProtectionInfo = rp.getProtectionInfoForVolume(RPHelper.getRPWWn(protectionVolume.getId(), _dbClient));
RPCopyRequestParams copyParams = new RPCopyRequestParams();
copyParams.setCopyVolumeInfo(volumeProtectionInfo);
rp.failoverCopyCancel(copyParams);
// Set the flags back to where they belong.
updatePostFailoverCancel(protectionVolume);
} else {
// failed over target.
throw DeviceControllerExceptions.recoverpoint.failoverWrongTargetSpecified();
}
}
} else if (op.equals(SWAP)) {
RPCopyRequestParams copyParams = new RPCopyRequestParams();
copyParams.setCopyVolumeInfo(volumeProtectionInfo);
rp.swapCopy(copyParams);
protectionVolume = updatePostSwapPersonalities(protectionVolume);
rp.setCopyAsProduction(copyParams);
// 3. add back the standby CDP copy
if (RPHelper.isMetroPointVolume(_dbClient, protectionVolume)) {
// copy exists, we can skip all the CG reconstruction steps.
if (!rp.doesStandbyProdCopyExist(volumeProtectionInfo)) {
_log.info(String.format("Adding back standby production copy after swap back to original VPlex Metro for Metropoint volume %s (%s)", protectionVolume.getLabel(), protectionVolume.getId().toString()));
List<Volume> standbyLocalCopyVols = RPHelper.getMetropointStandbyCopies(protectionVolume, _dbClient);
CreateCopyParams standbyLocalCopyParams = null;
List<CreateRSetParams> rSets = new ArrayList<CreateRSetParams>();
Set<URI> journalVolumes = new HashSet<URI>();
if (!standbyLocalCopyVols.isEmpty()) {
for (Volume standbyCopyVol : standbyLocalCopyVols) {
// 1. delete the standby CDP copy if it exists
if (rp.doesProtectionVolumeExist(RPHelper.getRPWWn(standbyCopyVol.getId(), _dbClient))) {
RecoverPointVolumeProtectionInfo standbyCdpCopy = rp.getProtectionInfoForVolume(RPHelper.getRPWWn(standbyCopyVol.getId(), _dbClient));
rp.deleteCopy(standbyCdpCopy);
}
// set up volume info for the standby copy volume
CreateVolumeParams vol = new CreateVolumeParams();
vol.setWwn(RPHelper.getRPWWn(standbyCopyVol.getId(), _dbClient));
vol.setInternalSiteName(standbyCopyVol.getInternalSiteName());
vol.setProduction(false);
List<CreateVolumeParams> volumes = new ArrayList<CreateVolumeParams>();
volumes.add(vol);
CreateRSetParams rSet = new CreateRSetParams();
rSet.setName(standbyCopyVol.getRSetName());
rSet.setVolumes(volumes);
rSets.add(rSet);
List<Volume> standbyJournals = RPHelper.findExistingJournalsForCopy(_dbClient, standbyCopyVol.getConsistencyGroup(), standbyCopyVol.getRpCopyName());
// compile a unique set of journal volumes
for (Volume standbyJournal : standbyJournals) {
journalVolumes.add(standbyJournal.getId());
}
}
// prepare journal volumes info
String rpCopyName = null;
List<CreateVolumeParams> journalVols = new ArrayList<CreateVolumeParams>();
for (URI journalVolId : journalVolumes) {
Volume standbyLocalJournal = _dbClient.queryObject(Volume.class, journalVolId);
if (standbyLocalJournal != null) {
_log.info(String.format("Found standby local journal volume %s (%s) for metropoint volume %s (%s)", standbyLocalJournal.getLabel(), standbyLocalJournal.getId().toString(), protectionVolume.getLabel(), protectionVolume.getId().toString()));
rpCopyName = standbyLocalJournal.getRpCopyName();
CreateVolumeParams journalVolParams = new CreateVolumeParams();
journalVolParams.setWwn(RPHelper.getRPWWn(standbyLocalJournal.getId(), _dbClient));
journalVolParams.setInternalSiteName(standbyLocalJournal.getInternalSiteName());
journalVols.add(journalVolParams);
}
}
// if we found any journal volumes, add them to the local copies list
if (!journalVols.isEmpty()) {
standbyLocalCopyParams = new CreateCopyParams();
standbyLocalCopyParams.setName(rpCopyName);
standbyLocalCopyParams.setJournals(journalVols);
} else {
_log.error("no journal volumes found for standby production copy for source volume " + protectionVolume.getLabel());
}
}
String standbyProductionCopyName = RPHelper.getStandbyProductionCopyName(_dbClient, protectionVolume);
// Build standby production journal
if (standbyProductionCopyName != null) {
List<Volume> existingStandbyJournals = RPHelper.findExistingJournalsForCopy(_dbClient, protectionVolume.getConsistencyGroup(), standbyProductionCopyName);
// Get the first standby production journal
Volume standbyProdJournal = existingStandbyJournals.get(0);
if (standbyProdJournal != null) {
_log.info(String.format("Found standby production journal volume %s (%s) for metropoint volume %s (%s)", standbyProdJournal.getLabel(), standbyProdJournal.getId().toString(), protectionVolume.getLabel(), protectionVolume.getId().toString()));
List<CreateVolumeParams> journalVols = new ArrayList<CreateVolumeParams>();
CreateVolumeParams journalVolParams = new CreateVolumeParams();
journalVolParams.setWwn(RPHelper.getRPWWn(standbyProdJournal.getId(), _dbClient));
journalVolParams.setInternalSiteName(standbyProdJournal.getInternalSiteName());
journalVols.add(journalVolParams);
CreateCopyParams standbyProdCopyParams = new CreateCopyParams();
standbyProdCopyParams.setName(standbyProdJournal.getRpCopyName());
standbyProdCopyParams.setJournals(journalVols);
// 2. and 3. add back the standby production copy; add back the standby CDP copy
rp.addStandbyProductionCopy(standbyProdCopyParams, standbyLocalCopyParams, rSets, copyParams);
} else {
_log.error(String.format("Cannot add standby production copy because the standby production journal could not be found for copy %s.", standbyProductionCopyName));
}
}
}
}
// copy bookmarks before we cleanup the corresponding BlockSnapshot objects from ViPR.
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// When we perform a swap, the target swap copy will become the production copy and lose all
// its bookmarks so we need to sync with RP.
RPHelper.cleanupSnapshots(_dbClient, rpSystem);
} else if (op.equals(FAILOVER_TEST_CANCEL)) {
RPCopyRequestParams copyParams = new RPCopyRequestParams();
copyParams.setCopyVolumeInfo(volumeProtectionInfo);
rp.failoverCopyTestCancel(copyParams);
} else if (op.equals(CHANGE_ACCESS_MODE)) {
RPCopyRequestParams copyParams = new RPCopyRequestParams();
copyParams.setCopyVolumeInfo(volumeProtectionInfo);
copyParams.setImageAccessMode(imageAccessMode);
if (imageAccessMode != null) {
rp.updateImageAccessMode(copyParams);
// BlockSnapshot objects that reference the target copy being set to direct access mode.
if (Copy.ImageAccessMode.DIRECT_ACCESS.name().equalsIgnoreCase(imageAccessMode)) {
_log.info(String.format("Updated imaged access mode for copy %s at %s to DIRECT_ACCESS. Removing all bookmarks associated with that copy and site.", volumeProtectionInfo.getRpCopyName(), volumeProtectionInfo.getRpSiteName()));
// Wait for RP to remove copy snapshots
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
RPHelper.cleanupSnapshots(_dbClient, rpSystem);
}
} else {
throw DeviceControllerExceptions.recoverpoint.imageAccessModeNotSupported(imageAccessMode);
}
} else {
throw DeviceControllerExceptions.recoverpoint.protectionOperationNotSupported(op);
}
_log.info("performProtectionOperation: after " + op + " operation successful");
WorkflowStepCompleter.stepSucceded(stepId);
return true;
} catch (InternalException e) {
_log.error("Operation failed with Exception: ", e);
return stepFailed(stepId, (ServiceCoded) e, "removeProtection operation failed.");
} catch (Exception e) {
stepFailed(stepId, e, "removeProtection operation failed.");
return false;
}
}
use of com.emc.storageos.svcs.errorhandling.resources.InternalException in project coprhd-controller by CoprHD.
the class RPDeviceController method exportOrchestrationSteps.
/**
* @param volumeDescriptors
* - Volume descriptors
* @param rpSystemId
* - RP system
* @param taskId
* - task ID
* @return - True on success, false otherwise
* @throws InternalException
*/
public boolean exportOrchestrationSteps(List<VolumeDescriptor> volumeDescriptors, URI rpSystemId, String taskId) throws InternalException {
List<URI> volUris = VolumeDescriptor.getVolumeURIs(volumeDescriptors);
RPCGExportOrchestrationCompleter completer = new RPCGExportOrchestrationCompleter(volUris, taskId);
Workflow workflow = null;
boolean lockException = false;
Map<URI, Set<URI>> exportGroupVolumesAdded = new HashMap<URI, Set<URI>>();
exportGroupsCreated = new ArrayList<URI>();
final String COMPUTE_RESOURCE_CLUSTER = "cluster";
try {
final String workflowKey = "rpExportOrchestration";
if (!WorkflowService.getInstance().hasWorkflowBeenCreated(taskId, workflowKey)) {
// Generate the Workflow.
workflow = _workflowService.getNewWorkflow(this, EXPORT_ORCHESTRATOR_WF_NAME, true, taskId);
// the wait for key returned by previous call
String waitFor = null;
ProtectionSystem rpSystem = _dbClient.queryObject(ProtectionSystem.class, rpSystemId);
// Get the CG Params based on the volume descriptors
CGRequestParams params = this.getCGRequestParams(volumeDescriptors, rpSystem);
updateCGParams(params);
_log.info("Start adding RP Export Volumes steps....");
// Get the RP Exports from the CGRequestParams object
Collection<RPExport> rpExports = generateStorageSystemExportMaps(params, volumeDescriptors);
Map<String, Set<URI>> rpSiteInitiatorsMap = getRPSiteInitiators(rpSystem, rpExports);
// Acquire all the RP lock keys needed for export before we start assembling the export groups.
acquireRPLockKeysForExport(taskId, rpExports, rpSiteInitiatorsMap);
// or create a new one.
for (RPExport rpExport : rpExports) {
URI storageSystemURI = rpExport.getStorageSystem();
String internalSiteName = rpExport.getRpSite();
URI varrayURI = rpExport.getVarray();
List<URI> volumes = rpExport.getVolumes();
List<URI> initiatorSet = new ArrayList<URI>();
String rpSiteName = (rpSystem.getRpSiteNames() != null) ? rpSystem.getRpSiteNames().get(internalSiteName) : internalSiteName;
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, storageSystemURI);
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, varrayURI);
_log.info("--------------------");
_log.info(String.format("RP Export: StorageSystem = [%s] RPSite = [%s] VirtualArray = [%s]", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
boolean isJournalExport = rpExport.getIsJournalExport();
String exportGroupGeneratedName = RPHelper.generateExportGroupName(rpSystem, storageSystem, internalSiteName, varray, isJournalExport);
// Setup the export group - we may or may not need to create it, but we need to have everything ready in case we do
ExportGroup exportGroup = RPHelper.createRPExportGroup(exportGroupGeneratedName, varray, _dbClient.queryObject(Project.class, params.getProject()), 0, isJournalExport);
// Get the initiators of the RP Cluster (all of the RPAs on one side of a configuration)
Map<String, Map<String, String>> rpaWWNs = RPHelper.getRecoverPointClient(rpSystem).getInitiatorWWNs(internalSiteName);
if (rpaWWNs == null || rpaWWNs.isEmpty()) {
throw DeviceControllerExceptions.recoverpoint.noInitiatorsFoundOnRPAs();
}
// Convert to initiator object
List<Initiator> initiators = new ArrayList<Initiator>();
for (String rpaId : rpaWWNs.keySet()) {
for (Map.Entry<String, String> rpaWWN : rpaWWNs.get(rpaId).entrySet()) {
Initiator initiator = ExportUtils.getInitiator(rpaWWN.getKey(), _dbClient);
initiators.add(initiator);
}
}
// We need to find and distill only those RP initiators that correspond to the network of the
// storage
// system and
// that network has front end port from the storage system.
// In certain lab environments, its quite possible that there are 2 networks one for the storage
// system
// FE ports and one for
// the BE ports.
// In such configs, RP initiators will be spread across those 2 networks. RP controller does not
// care
// about storage system
// back-end ports, so
// we will ignore those initiators that are connected to a network that has only storage system back
// end
// port connectivity.
Map<URI, Set<Initiator>> rpNetworkToInitiatorsMap = new HashMap<URI, Set<Initiator>>();
Set<URI> rpSiteInitiatorUris = rpSiteInitiatorsMap.get(internalSiteName);
if (rpSiteInitiatorUris != null) {
for (URI rpSiteInitiatorUri : rpSiteInitiatorUris) {
Initiator rpSiteInitiator = _dbClient.queryObject(Initiator.class, rpSiteInitiatorUri);
URI rpInitiatorNetworkURI = getInitiatorNetwork(exportGroup, rpSiteInitiator);
if (rpInitiatorNetworkURI != null) {
if (rpNetworkToInitiatorsMap.get(rpInitiatorNetworkURI) == null) {
rpNetworkToInitiatorsMap.put(rpInitiatorNetworkURI, new HashSet<Initiator>());
}
rpNetworkToInitiatorsMap.get(rpInitiatorNetworkURI).add(rpSiteInitiator);
_log.info(String.format("RP Initiator [%s] found on network: [%s]", rpSiteInitiator.getInitiatorPort(), rpInitiatorNetworkURI.toASCIIString()));
} else {
_log.info(String.format("RP Initiator [%s] was not found on any network. Excluding from automated exports", rpSiteInitiator.getInitiatorPort()));
}
}
}
// Compute numPaths. This is how its done:
// We know the RP site and the Network/TransportZone it is on.
// Determine all the storage ports for the storage array for all the networks they are on.
// Next, if we find the network for the RP site in the above list, return all the storage ports
// corresponding to that.
// For RP we will try and use as many Storage ports as possible.
Map<URI, List<StoragePort>> initiatorPortMap = getInitiatorPortsForArray(rpNetworkToInitiatorsMap, storageSystemURI, varrayURI, rpSiteName);
for (URI networkURI : initiatorPortMap.keySet()) {
for (StoragePort storagePort : initiatorPortMap.get(networkURI)) {
_log.info(String.format("Network : [%s] - Port : [%s]", networkURI.toString(), storagePort.getLabel()));
}
}
int numPaths = computeNumPaths(initiatorPortMap, varrayURI, storageSystem);
_log.info("Total paths = " + numPaths);
// Stems from above comment where we distill the RP network and the initiators in that network.
List<Initiator> initiatorList = new ArrayList<Initiator>();
for (URI rpNetworkURI : rpNetworkToInitiatorsMap.keySet()) {
if (initiatorPortMap.containsKey(rpNetworkURI)) {
initiatorList.addAll(rpNetworkToInitiatorsMap.get(rpNetworkURI));
}
}
for (Initiator initiator : initiatorList) {
initiatorSet.add(initiator.getId());
}
// See if the export group already exists
ExportGroup exportGroupInDB = exportGroupExistsInDB(exportGroup);
boolean addExportGroupToDB = false;
if (exportGroupInDB != null) {
exportGroup = exportGroupInDB;
// If the export already exists, check to see if any of the volumes have already been exported.
// No
// need to
// re-export volumes.
List<URI> volumesToRemove = new ArrayList<URI>();
for (URI volumeURI : volumes) {
if (exportGroup.getVolumes() != null && !exportGroup.getVolumes().isEmpty() && exportGroup.getVolumes().containsKey(volumeURI.toString())) {
_log.info(String.format("Volume [%s] already exported to export group [%s], " + "it will be not be re-exported", volumeURI.toString(), exportGroup.getGeneratedName()));
volumesToRemove.add(volumeURI);
}
}
// Remove volumes if they have already been exported
if (!volumesToRemove.isEmpty()) {
volumes.removeAll(volumesToRemove);
}
// nothing else needs to be done here.
if (volumes.isEmpty()) {
_log.info(String.format("No volumes needed to be exported to export group [%s], continue", exportGroup.getGeneratedName()));
continue;
}
} else {
addExportGroupToDB = true;
}
// Add volumes to the export group
Map<URI, Integer> volumesToAdd = new HashMap<URI, Integer>();
for (URI volumeID : volumes) {
exportGroup.addVolume(volumeID, ExportGroup.LUN_UNASSIGNED);
volumesToAdd.put(volumeID, ExportGroup.LUN_UNASSIGNED);
}
// Keep track of volumes added to export group
if (!volumesToAdd.isEmpty()) {
exportGroupVolumesAdded.put(exportGroup.getId(), volumesToAdd.keySet());
}
// volume
if (rpExport.getComputeResource() != null) {
URI computeResource = rpExport.getComputeResource();
_log.info(String.format("RP Export: ComputeResource : %s", computeResource.toString()));
if (computeResource.toString().toLowerCase().contains(COMPUTE_RESOURCE_CLUSTER)) {
Cluster cluster = _dbClient.queryObject(Cluster.class, computeResource);
exportGroup.addCluster(cluster);
} else {
Host host = _dbClient.queryObject(Host.class, rpExport.getComputeResource());
exportGroup.addHost(host);
}
}
// Persist the export group
if (addExportGroupToDB) {
exportGroup.addInitiators(initiatorSet);
exportGroup.setNumPaths(numPaths);
_dbClient.createObject(exportGroup);
// Keep track of newly created EGs in case of rollback
exportGroupsCreated.add(exportGroup.getId());
} else {
_dbClient.updateObject(exportGroup);
}
// If the export group already exists, add the volumes to it, otherwise create a brand new
// export group.
StringBuilder buffer = new StringBuilder();
buffer.append(String.format(DASHED_NEWLINE));
if (!addExportGroupToDB) {
buffer.append(String.format("Adding volumes to existing Export Group for Storage System [%s], RP Site [%s], Virtual Array [%s]%n", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
buffer.append(String.format("Export Group name is : [%s]%n", exportGroup.getGeneratedName()));
buffer.append(String.format("Export Group will have these volumes added: [%s]%n", Joiner.on(',').join(volumes)));
buffer.append(String.format(DASHED_NEWLINE));
_log.info(buffer.toString());
waitFor = _exportWfUtils.generateExportGroupAddVolumes(workflow, STEP_EXPORT_GROUP, waitFor, storageSystemURI, exportGroup.getId(), volumesToAdd);
_log.info("Added Export Group add volumes step in workflow");
} else {
buffer.append(String.format("Creating new Export Group for Storage System [%s], RP Site [%s], Virtual Array [%s]%n", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
buffer.append(String.format("Export Group name is: [%s]%n", exportGroup.getGeneratedName()));
buffer.append(String.format("Export Group will have these initiators: [%s]%n", Joiner.on(',').join(initiatorSet)));
buffer.append(String.format("Export Group will have these volumes added: [%s]%n", Joiner.on(',').join(volumes)));
buffer.append(String.format(DASHED_NEWLINE));
_log.info(buffer.toString());
String exportStep = workflow.createStepId();
initTaskStatus(exportGroup, exportStep, Operation.Status.pending, "create export");
waitFor = _exportWfUtils.generateExportGroupCreateWorkflow(workflow, STEP_EXPORT_GROUP, waitFor, storageSystemURI, exportGroup.getId(), volumesToAdd, initiatorSet);
_log.info("Added Export Group create step in workflow. New Export Group Id: " + exportGroup.getId());
}
}
String successMessage = "Export orchestration completed successfully";
// Finish up and execute the plan.
// The Workflow will handle the TaskCompleter
Object[] callbackArgs = new Object[] { volUris };
workflow.executePlan(completer, successMessage, new WorkflowCallback(), callbackArgs, null, null);
// Mark this workflow as created/executed so we don't do it again on retry/resume
WorkflowService.getInstance().markWorkflowBeenCreated(taskId, workflowKey);
}
} catch (LockRetryException ex) {
/**
* Added this catch block to mark the current workflow as completed so that lock retry will not get exception while creating new
* workflow using the same taskid.
*/
_log.warn(String.format("Lock retry exception key: %s remaining time %d", ex.getLockIdentifier(), ex.getRemainingWaitTimeSeconds()));
if (workflow != null && !NullColumnValueGetter.isNullURI(workflow.getWorkflowURI()) && workflow.getWorkflowState() == WorkflowState.CREATED) {
com.emc.storageos.db.client.model.Workflow wf = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow.getWorkflowURI());
if (!wf.getCompleted()) {
_log.error("Marking the status to completed for the newly created workflow {}", wf.getId());
wf.setCompleted(true);
_dbClient.updateObject(wf);
}
}
throw ex;
} catch (Exception ex) {
_log.error("Could not create volumes: " + volUris, ex);
// Rollback ViPR level RP export group changes
rpExportGroupRollback();
if (workflow != null) {
_workflowService.releaseAllWorkflowLocks(workflow);
}
String opName = ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME.getName();
ServiceError serviceError = null;
if (lockException) {
serviceError = DeviceControllerException.errors.createVolumesAborted(volUris.toString(), ex);
} else {
serviceError = DeviceControllerException.errors.createVolumesFailed(volUris.toString(), opName, ex);
}
completer.error(_dbClient, _locker, serviceError);
return false;
}
_log.info("End adding RP Export Volumes steps.");
return true;
}
Aggregations