use of com.emc.storageos.volumecontroller.TaskCompleter in project coprhd-controller by CoprHD.
the class VPlexDeviceController method migrateVolumes.
/**
* Deprecating this for now, should be using the migrateVolumes call with the WF passed in from
* the BlockOrchestrator.
*
* {@inheritDoc}
*/
@Override
public void migrateVolumes(URI vplexURI, URI virtualVolumeURI, List<URI> targetVolumeURIs, Map<URI, URI> migrationsMap, Map<URI, URI> poolVolumeMap, URI newCoSURI, URI newNhURI, String successMsg, String failMsg, OperationTypeEnum opType, String opId, String wfStepId) throws ControllerException {
List<URI> migrationURIs = new ArrayList<URI>(migrationsMap.values());
try {
_log.info("VPlex controller migrate volume {} on VPlex {}", virtualVolumeURI, vplexURI);
// Get the VPlex storage system
StorageSystem vplexSystem = getDataObject(StorageSystem.class, vplexURI, _dbClient);
_log.info("Got VPlex system");
// If a workflow step id is passed, then this is being called
// from a step in a "parent" workflow. In that case, this
// sub-workflow takes the name of the step.
String wfId = (wfStepId != null ? wfStepId : opId);
// Get a new workflow to execute the migrations.
Workflow workflow = _workflowService.getNewWorkflow(this, MIGRATE_VOLUMES_WF_NAME, false, wfId);
_log.info("Created new workflow with operation id {}", wfId);
// Create a step to validate the volume and prevent migration if the
// the ViPR DB does not properly reflect the actual backend volumes.
// A successful migration will delete the backend source volumes. If
// the ViPR DB does not correctly reflect the actual backend volume,
// we could delete a backend volume used by some other VPLEX volume.
String waitFor = createWorkflowStepToValidateVPlexVolume(workflow, vplexSystem, virtualVolumeURI, null);
// We first need to create steps in the workflow to create the new
// backend volume(s) to which the data for the virtual volume will
// be migrated.
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
Map<URI, StorageSystem> storageSystemMap = new HashMap<URI, StorageSystem>();
Map<URI, Volume> volumeMap = new HashMap<URI, Volume>();
Iterator<URI> storagePoolIter = poolVolumeMap.keySet().iterator();
while (storagePoolIter.hasNext()) {
URI storagePoolURI = storagePoolIter.next();
URI volumeURI = poolVolumeMap.get(storagePoolURI);
_log.info("Creating descriptor for volume{} in pool {}", volumeURI, storagePoolURI);
descriptors.add(createDescriptorForBlockVolumeCreation(storagePoolURI, volumeURI, storageSystemMap, volumeMap));
_log.info("Created descriptor for volume");
}
// Add steps in the block device controller to create the target
// volumes.
waitFor = _blockDeviceController.addStepsForCreateVolumes(workflow, waitFor, descriptors, wfId);
// Set the project and tenant. We prefer a project created for the Vplex system,
// but will fallback to the volume's project if there isn't a project for the VPlex.
Volume firstVolume = volumeMap.values().iterator().next();
Project vplexProject = VPlexUtil.lookupVplexProject(firstVolume, vplexSystem, _dbClient);
URI tenantURI = vplexProject.getTenantOrg().getURI();
_log.info("Project is {}, Tenant is {}", vplexProject.getId(), tenantURI);
// Now we need to do the necessary zoning and export steps to ensure
// the VPlex can see these new backend volumes.
createWorkflowStepsForBlockVolumeExport(workflow, vplexSystem, storageSystemMap, volumeMap, vplexProject.getId(), tenantURI, waitFor);
_log.info("Created workflow steps for volume export.");
// Now make a migration Step for each passed target to which data
// for the passed virtual volume will be migrated. The migrations
// will be done from this controller.
Iterator<URI> targetVolumeIter = targetVolumeURIs.iterator();
while (targetVolumeIter.hasNext()) {
URI targetVolumeURI = targetVolumeIter.next();
_log.info("Target volume is {}", targetVolumeURI);
URI migrationURI = migrationsMap.get(targetVolumeURI);
_log.info("Migration is {}", migrationURI);
String stepId = workflow.createStepId();
_log.info("Migration opId is {}", stepId);
Workflow.Method vplexExecuteMethod = new Workflow.Method(MIGRATE_VIRTUAL_VOLUME_METHOD_NAME, vplexURI, virtualVolumeURI, targetVolumeURI, migrationURI, newNhURI);
Workflow.Method vplexRollbackMethod = new Workflow.Method(RB_MIGRATE_VIRTUAL_VOLUME_METHOD_NAME, vplexURI, migrationURI, stepId);
_log.info("Creating workflow migration step");
workflow.createStep(MIGRATION_CREATE_STEP, String.format("VPlex %s migrating to target volume %s.", vplexSystem.getId().toString(), targetVolumeURI.toString()), EXPORT_STEP, vplexSystem.getId(), vplexSystem.getSystemType(), getClass(), vplexExecuteMethod, vplexRollbackMethod, stepId);
_log.info("Created workflow migration step");
}
// Once the migrations complete, we will commit the migrations.
// So, now we create the steps to commit the migrations.
String waitForStep = MIGRATION_CREATE_STEP;
List<URI> migrationSources = new ArrayList<URI>();
Iterator<URI> migrationsIter = migrationsMap.values().iterator();
while (migrationsIter.hasNext()) {
URI migrationURI = migrationsIter.next();
_log.info("Migration is {}", migrationURI);
Migration migration = getDataObject(Migration.class, migrationURI, _dbClient);
// The migration source volume may be null for ingested volumes
// for which we do not know anything about the backend volumes.
// If we don't know the source, we know we are migrating an
// ingested volume and we will not want to do any renaming
// after the commit as we do when migration ViPR create volumes,
// which adhere to a standard naming convention.
Boolean rename = Boolean.TRUE;
if (migration.getSource() != null) {
migrationSources.add(migration.getSource());
} else {
rename = Boolean.FALSE;
}
_log.info("Added migration source {}", migration.getSource());
String stepId = workflow.createStepId();
_log.info("Commit operation id is {}", stepId);
Workflow.Method vplexExecuteMethod = new Workflow.Method(COMMIT_MIGRATION_METHOD_NAME, vplexURI, virtualVolumeURI, migrationURI, rename, newCoSURI, newNhURI);
Workflow.Method vplexRollbackMethod = new Workflow.Method(RB_COMMIT_MIGRATION_METHOD_NAME, migrationURIs, newCoSURI, newNhURI, stepId);
_log.info("Creating workflow step to commit migration");
waitForStep = workflow.createStep(MIGRATION_COMMIT_STEP, String.format("VPlex %s committing volume migration", vplexSystem.getId().toString()), waitForStep, vplexSystem.getId(), vplexSystem.getSystemType(), getClass(), vplexExecuteMethod, vplexRollbackMethod, stepId);
_log.info("Created workflow step to commit migration");
}
// Create a step that creates a sub workflow to delete the old
// migration source volumes, which are no longer used by the
// virtual volume. We also update the virtual volume CoS. If
// we make it to this step, then all migrations were committed.
// We do this in a sub workflow because we don't won't to
// initiate rollback regardless of success or failure.
String stepId = workflow.createStepId();
Workflow.Method vplexExecuteMethod = new Workflow.Method(DELETE_MIGRATION_SOURCES_METHOD, vplexURI, virtualVolumeURI, newCoSURI, newNhURI, migrationSources);
workflow.createStep(DELETE_MIGRATION_SOURCES_STEP, String.format("Creating workflow to delete migration sources"), MIGRATION_COMMIT_STEP, vplexSystem.getId(), vplexSystem.getSystemType(), getClass(), vplexExecuteMethod, null, stepId);
_log.info("Created workflow step to create sub workflow for source deletion");
// Finish up and execute the plan. The Workflow will handle the
// TaskCompleter
List<URI> volumes = new ArrayList<URI>();
volumes.add(virtualVolumeURI);
volumes.addAll(targetVolumeURIs);
TaskCompleter completer = new MigrationWorkflowCompleter(volumes, migrationURIs, opId, wfStepId);
_log.info("Executing workflow plan");
workflow.executePlan(completer, successMsg);
_log.info("Workflow plan executed");
} catch (Exception e) {
_log.error(failMsg, e);
List<URI> volumes = new ArrayList<URI>();
volumes.add(virtualVolumeURI);
volumes.addAll(targetVolumeURIs);
TaskCompleter completer = new MigrationWorkflowCompleter(volumes, migrationURIs, opId, wfStepId);
ServiceError serviceError = VPlexApiException.errors.jobFailed(e);
serviceError.setMessage(failMsg);
failStep(completer, opId, serviceError);
}
}
use of com.emc.storageos.volumecontroller.TaskCompleter in project coprhd-controller by CoprHD.
the class VPlexDeviceController method establishVolumeAndFullCopyGroupRelation.
@Override
public void establishVolumeAndFullCopyGroupRelation(URI storage, URI sourceVolume, URI fullCopy, String opId) throws InternalException {
try {
// Generate the Workflow.
Workflow workflow = _workflowService.getNewWorkflow(this, VOLUME_FULLCOPY_GROUP_RELATION_WF, false, opId);
_log.info("Created establish volume and full copy group relation workflow with operation id {}", opId);
// Get the VPLEX and backend full copy volumes.
Volume fullCopyVolume = getDataObject(Volume.class, fullCopy, _dbClient);
Volume nativeFullCopyVolume = VPlexUtil.getVPLEXBackendVolume(fullCopyVolume, true, _dbClient);
URI nativeSourceVolumeURI = nativeFullCopyVolume.getAssociatedSourceVolume();
URI nativeSystemURI = nativeFullCopyVolume.getStorageController();
StorageSystem nativeSystem = getDataObject(StorageSystem.class, nativeSystemURI, _dbClient);
Workflow.Method establishRelationMethod = new Workflow.Method(VOLUME_FULLCOPY_RELATION_METHOD, nativeSystemURI, nativeSourceVolumeURI, nativeFullCopyVolume.getId());
workflow.createStep(VOLUME_FULLCOPY_GROUP_RELATION_STEP, "create group relation between Volume group and Full copy group", null, nativeSystemURI, nativeSystem.getSystemType(), BlockDeviceController.class, establishRelationMethod, rollbackMethodNullMethod(), null);
TaskCompleter completer = new CloneTaskCompleter(fullCopy, opId);
String successMsg = String.format("Establish volume and full copy %s group relation completed successfully", fullCopy);
FullCopyOperationCompleteCallback wfCompleteCB = new FullCopyOperationCompleteCallback();
workflow.executePlan(completer, successMsg, wfCompleteCB, new Object[] { Arrays.asList(fullCopy) }, null, null);
_log.info("Workflow plan executing");
} catch (Exception e) {
String failMsg = String.format("Establish volume and full copy %s group relation failed", fullCopy);
_log.error(failMsg, e);
TaskCompleter completer = new CloneTaskCompleter(fullCopy, opId);
ServiceCoded sc = VPlexApiException.exceptions.establishVolumeFullCopyGroupRelationFailed(fullCopy.toString(), e);
failStep(completer, opId, sc);
}
}
use of com.emc.storageos.volumecontroller.TaskCompleter in project coprhd-controller by CoprHD.
the class VPlexDeviceController method restoreFromFullCopy.
/**
* {@inheritDoc}
*/
@Override
public void restoreFromFullCopy(URI vplexURI, List<URI> fullCopyURIs, String opId) throws InternalException {
TaskCompleter completer = null;
try {
completer = new CloneRestoreCompleter(fullCopyURIs, opId);
// Generate the Workflow.
Workflow workflow = _workflowService.getNewWorkflow(this, RESTORE_VOLUME_WF_NAME, false, opId);
_log.info("Created restore volume workflow with operation id {}", opId);
// add CG to taskCompleter
Volume firstFullCopy = getDataObject(Volume.class, fullCopyURIs.get(0), _dbClient);
BlockObject firstSource = BlockObject.fetch(_dbClient, firstFullCopy.getAssociatedSourceVolume());
if (!NullColumnValueGetter.isNullURI(firstSource.getConsistencyGroup())) {
completer.addConsistencyGroupId(firstSource.getConsistencyGroup());
}
// Get the VPLEX and backend full copy volumes.
URI nativeSystemURI = null;
Map<URI, Volume> vplexFullCopyMap = new HashMap<URI, Volume>();
Map<URI, Volume> nativeFullCopyMap = new HashMap<URI, Volume>();
for (URI fullCopyURI : fullCopyURIs) {
Volume fullCopyVolume = getDataObject(Volume.class, fullCopyURI, _dbClient);
vplexFullCopyMap.put(fullCopyURI, fullCopyVolume);
Volume nativeFullCopyVolume = VPlexUtil.getVPLEXBackendVolume(fullCopyVolume, true, _dbClient);
nativeFullCopyMap.put(nativeFullCopyVolume.getId(), nativeFullCopyVolume);
if (nativeSystemURI == null) {
nativeSystemURI = nativeFullCopyVolume.getStorageController();
}
}
// We'll need a list of the native full copy URIs.
List<URI> nativeFullCopyURIs = new ArrayList<URI>(nativeFullCopyMap.keySet());
// Get the native system.
StorageSystem nativeSystem = getDataObject(StorageSystem.class, nativeSystemURI, _dbClient);
// Maps Vplex volume that needs to be flushed to underlying array volume
Map<Volume, Volume> vplexToArrayVolumesToFlush = new HashMap<Volume, Volume>();
for (Volume vplexFullCopyVolume : vplexFullCopyMap.values()) {
Volume fcSourceVolume = getDataObject(Volume.class, vplexFullCopyVolume.getAssociatedSourceVolume(), _dbClient);
Volume arrayVolumeToBeRestored = VPlexUtil.getVPLEXBackendVolume(fcSourceVolume, true, _dbClient);
vplexToArrayVolumesToFlush.put(fcSourceVolume, arrayVolumeToBeRestored);
}
Map<URI, String> vplexVolumeIdToDetachStep = new HashMap<URI, String>();
// Generate pre restore steps
String waitFor = addPreRestoreResyncSteps(workflow, vplexToArrayVolumesToFlush, vplexVolumeIdToDetachStep, null);
// Now create a workflow step to natively restore the backend
// source volumes from the backend full copies. We execute this
// after the invalidate cache steps.
waitFor = createWorkflowStepForRestoreNativeFullCopy(workflow, nativeSystem, nativeFullCopyURIs, waitFor, rollbackMethodNullMethod());
// Generate post restore steps
waitFor = addPostRestoreResyncSteps(workflow, vplexToArrayVolumesToFlush, vplexVolumeIdToDetachStep, waitFor);
// Execute the workflow.
_log.info("Executing workflow plan");
String successMsg = String.format("Restore full copy volumes %s completed successfully", fullCopyURIs);
FullCopyOperationCompleteCallback wfCompleteCB = new FullCopyOperationCompleteCallback();
workflow.executePlan(completer, successMsg, wfCompleteCB, new Object[] { fullCopyURIs }, null, null);
_log.info("Workflow plan executing");
} catch (Exception e) {
String failMsg = String.format("Restore full copy volumes %s failed", fullCopyURIs);
_log.error(failMsg, e);
ServiceCoded sc = VPlexApiException.exceptions.restoreFromFullCopyFailed(fullCopyURIs.toString(), e);
failStep(completer, opId, sc);
}
}
use of com.emc.storageos.volumecontroller.TaskCompleter in project coprhd-controller by CoprHD.
the class VPlexDeviceController method relinkTargetsToSnapshotSession.
/**
* {@inheritDoc}
*/
@Override
public void relinkTargetsToSnapshotSession(URI vplexURI, URI tgtSnapSessionURI, List<URI> snapshotURIs, String opId) throws InternalException {
try {
// Create a new the Workflow.
Workflow workflow = _workflowService.getNewWorkflow(this, RELINK_SNAPSHOT_SESSION_TARGETS_WF_NAME, false, opId);
_log.info("Created relink snapshot session targets workflow with operation id {}", opId);
// First if this is a group operation, we make sure we only process
// one snapshot per replication group.
List<URI> filteredSnapshotURIs = new ArrayList<URI>();
BlockSnapshotSession tgtSnapSession = _dbClient.queryObject(BlockSnapshotSession.class, tgtSnapSessionURI);
if (tgtSnapSession.hasConsistencyGroup() && NullColumnValueGetter.isNotNullValue(tgtSnapSession.getReplicationGroupInstance())) {
filteredSnapshotURIs.addAll(ControllerUtils.ensureOneSnapshotPerReplicationGroup(snapshotURIs, _dbClient));
} else {
filteredSnapshotURIs.addAll(snapshotURIs);
}
// Now we need to make sure we get all the snapshots in each
// replication group. If a snapshot is not in a replication group,
// this will just add the snapshot.
List<BlockSnapshot> snapshotsToRelink = new ArrayList<BlockSnapshot>();
for (URI filteredSnapshotURI : filteredSnapshotURIs) {
BlockSnapshot snapshot = _dbClient.queryObject(BlockSnapshot.class, filteredSnapshotURI);
snapshotsToRelink.addAll(ControllerUtils.getSnapshotsPartOfReplicationGroup(snapshot, _dbClient));
}
// Get a list of the VPLEX volumes, if any, that are built
// using the snapshot target volumes.
List<Volume> vplexVolumes = VPlexUtil.getVPlexVolumesBuiltOnSnapshots(snapshotsToRelink, _dbClient);
// Create the workflow steps.
if (vplexVolumes.isEmpty()) {
// If there are no VPLEX volumes built on the snapshots to be relinked,
// then we just need a single step to invoke the block device controller to
// relink the snapshots.
createWorkflowStepForRelinkNativeTargets(workflow, tgtSnapSession, snapshotURIs, null, null);
} else {
String waitFor = null;
// Maps Vplex volume that needs to be flushed to underlying array volume
Map<Volume, Volume> vplexToArrayVolumesToFlush = new HashMap<Volume, Volume>();
for (Volume vplexVolume : vplexVolumes) {
Volume arrayVolumeToBeRelinked = VPlexUtil.getVPLEXBackendVolume(vplexVolume, true, _dbClient);
vplexToArrayVolumesToFlush.put(vplexVolume, arrayVolumeToBeRelinked);
}
// Generate pre restore steps
Map<URI, String> vplexVolumeIdToDetachStep = new HashMap<URI, String>();
waitFor = addPreRestoreResyncSteps(workflow, vplexToArrayVolumesToFlush, vplexVolumeIdToDetachStep, waitFor);
// Now create a workflow step to natively relink the snapshots.
// Note that if a snapshot is associated with a CG, then block
// controller will relink all snapshots in the snapshot set. We
// execute this after the invalidate cache.
waitFor = createWorkflowStepForRelinkNativeTargets(workflow, tgtSnapSession, snapshotURIs, waitFor, rollbackMethodNullMethod());
// Generate post restore steps
addPostRestoreResyncSteps(workflow, vplexToArrayVolumesToFlush, vplexVolumeIdToDetachStep, waitFor);
}
// Execute the workflow.
_log.info("Executing workflow plan");
TaskCompleter completer = new BlockSnapshotSessionRelinkTargetsWorkflowCompleter(tgtSnapSessionURI, Boolean.TRUE, opId);
String successMsg = String.format("Relink VPLEX native snapshot session targets %s to session %s " + "completed successfully", snapshotURIs, tgtSnapSessionURI);
workflow.executePlan(completer, successMsg);
_log.info("Workflow plan executing");
} catch (Exception e) {
String failMsg = String.format("Relink VPLEX native snapshot session targets %s to session %s failed", snapshotURIs, tgtSnapSessionURI);
_log.error(failMsg, e);
TaskCompleter completer = new BlockSnapshotSessionRelinkTargetsWorkflowCompleter(tgtSnapSessionURI, Boolean.TRUE, opId);
ServiceError serviceError = VPlexApiException.errors.relinkSnapshotSessionTargetsFailed(snapshotURIs, tgtSnapSessionURI, e);
failStep(completer, opId, serviceError);
}
}
use of com.emc.storageos.volumecontroller.TaskCompleter in project coprhd-controller by CoprHD.
the class VPlexDeviceController method updateVolumeGroup.
@Override
public void updateVolumeGroup(URI vplexURI, ApplicationAddVolumeList addVolList, List<URI> removeVolumeList, URI volumeGroup, String opId) throws InternalException {
_log.info("Update volume group {}", volumeGroup);
TaskCompleter completer = null;
List<URI> addVols = null;
String waitFor = null;
// Get a new workflow to execute the volume group update.
Workflow workflow = _workflowService.getNewWorkflow(this, UPDATE_VOLUMEGROUP_WF_NAME, false, opId);
Set<URI> cgs = new HashSet<URI>();
try {
List<URI> allRemoveBEVolumes = new ArrayList<URI>();
if (removeVolumeList != null && !removeVolumeList.isEmpty()) {
_log.info("Creating steps for removing volumes from the volume group");
for (URI voluri : removeVolumeList) {
Volume vol = getDataObject(Volume.class, voluri, _dbClient);
if (vol == null || vol.getInactive()) {
_log.info(String.format("The volume: %s has been deleted. Skip it.", voluri));
continue;
}
cgs.add(vol.getConsistencyGroup());
StringSet backends = vol.getAssociatedVolumes();
if (backends == null) {
_log.info(String.format("The volume: %s do not have backend volumes. Skip it.", vol.getLabel()));
continue;
}
for (String backendId : backends) {
allRemoveBEVolumes.add(URI.create(backendId));
}
}
}
List<URI> allAddBEVolumes = new ArrayList<URI>();
ApplicationAddVolumeList addBEVolList = new ApplicationAddVolumeList();
if (addVolList != null && addVolList.getVolumes() != null && !addVolList.getVolumes().isEmpty()) {
_log.info("Creating steps for adding volumes to the volume group");
addVols = addVolList.getVolumes();
for (URI addVol : addVols) {
Volume addVplexVol = getDataObject(Volume.class, addVol, _dbClient);
if (addVplexVol == null || addVplexVol.getInactive()) {
_log.info(String.format("The volume: %s has been deleted. Skip it.", addVol));
continue;
}
cgs.add(addVplexVol.getConsistencyGroup());
StringSet backends = addVplexVol.getAssociatedVolumes();
if (backends == null) {
_log.info(String.format("The volume: %s do not have backend volumes. Skip it.", addVol));
continue;
}
for (String backendId : backends) {
URI backUri = URI.create(backendId);
Volume backVol = getDataObject(Volume.class, backUri, _dbClient);
if (backVol != null && !backVol.getInactive()) {
allAddBEVolumes.add(backUri);
}
}
}
}
completer = new VolumeGroupUpdateTaskCompleter(volumeGroup, addVols, removeVolumeList, cgs, opId);
addBEVolList.setVolumes(allAddBEVolumes);
addBEVolList.setReplicationGroupName(addVolList.getReplicationGroupName());
addBEVolList.setConsistencyGroup(addVolList.getConsistencyGroup());
// add steps for add source and remove vols
waitFor = _blockDeviceController.addStepsForUpdateApplication(workflow, addBEVolList, allRemoveBEVolumes, waitFor, opId);
addStepsForImportClonesOfApplicationVolumes(workflow, waitFor, addVolList.getVolumes(), opId);
// Finish up and execute the plan.
_log.info("Executing workflow plan {}", UPDATE_VOLUMEGROUP_WF_NAME);
String successMessage = String.format("Update volume group successful for %s", volumeGroup.toString());
workflow.executePlan(completer, successMessage);
} catch (Exception e) {
_log.error("Exception while updating the volume group", e);
DeviceControllerException ex = DeviceControllerException.exceptions.failedToUpdateVolumesFromAppication(volumeGroup.toString(), e.getMessage());
if (completer != null) {
completer.error(_dbClient, ex);
} else {
throw ex;
}
}
}
Aggregations