use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class ComputeSystemControllerImpl method addStepsForRemoveHost.
/**
* Assembles steps to remove a host
*
* @param workflow The current workflow
* @param waitFor The current waitFor
* @param hostIds List of hosts to remove
* @param clusterId Cluster ID if this is a clustered host
* @param vcenterDataCenter vCenter ID if this is a vCenter
* @param isVcenter Boolean flag for determining if vCenter enabled
* @return Next step
*/
public String addStepsForRemoveHost(Workflow workflow, String waitFor, List<URI> hostIds, URI clusterId, URI vcenterDataCenter, boolean isVcenter) {
List<ExportGroup> exportGroups = getSharedExports(_dbClient, clusterId);
String newWaitFor = waitFor;
if (isVcenter && !NullColumnValueGetter.isNullURI(vcenterDataCenter)) {
Collection<URI> exportIds = Collections2.transform(exportGroups, CommonTransformerFunctions.fctnDataObjectToID());
Map<URI, Collection<URI>> hostExports = Maps.newHashMap();
for (URI host : hostIds) {
hostExports.put(host, exportIds);
}
newWaitFor = this.verifyDatastoreForRemoval(hostExports, vcenterDataCenter, newWaitFor, workflow);
newWaitFor = this.unmountAndDetachVolumes(hostExports, vcenterDataCenter, newWaitFor, workflow);
}
for (ExportGroup export : exportGroups) {
// While doing Host related operation These EG should be ignored.
if (export.checkInternalFlags(Flag.INTERNAL_OBJECT)) {
_log.info("Skipping remove host step. Export group : {} is an internal object.", export.getId());
continue;
}
newWaitFor = addStepsForRemoveHostFromExport(workflow, newWaitFor, hostIds, export.getId());
}
if (isVcenter && !NullColumnValueGetter.isNullURI(vcenterDataCenter)) {
newWaitFor = this.rescanHostStorage(hostIds, vcenterDataCenter, newWaitFor, workflow);
}
return newWaitFor;
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class ComputeSystemControllerImpl method addStepsForRemoveHostFromExport.
/**
* Assembles steps to remove a host from an export group
*
* @param workflow The current workflow
* @param waitFor The current waitFor
* @param hostIds List of hosts to remove
* @param exportId The ID of the export group to remove the host from
* @return Next step
*/
public String addStepsForRemoveHostFromExport(Workflow workflow, String waitFor, List<URI> hostIds, URI exportId) {
ExportGroup export = _dbClient.queryObject(ExportGroup.class, exportId);
String newWaitFor = waitFor;
Set<URI> addedClusters = new HashSet<>();
Set<URI> removedClusters = new HashSet<>();
Set<URI> addedHosts = new HashSet<>();
Set<URI> removedHosts = new HashSet<>(hostIds);
Set<URI> addedInitiators = new HashSet<>();
Set<URI> removedInitiators = new HashSet<>();
if (export != null) {
List<URI> updatedHosts = StringSetUtil.stringSetToUriList(export.getHosts());
Map<URI, Integer> updatedVolumesMap = StringMapUtil.stringMapToVolumeMap(export.getVolumes());
for (URI hostId : hostIds) {
updatedHosts.remove(hostId);
List<Initiator> hostInitiators = ComputeSystemHelper.queryInitiators(_dbClient, hostId);
for (Initiator initiator : hostInitiators) {
removedInitiators.add(initiator.getId());
}
}
// host. I think, this we can raise an enhancemnet and fix this later.
if (updatedHosts.isEmpty()) {
newWaitFor = workflow.createStep(DELETE_EXPORT_GROUP_STEP, String.format("Deleting export group %s", export.getId()), newWaitFor, export.getId(), export.getId().toString(), this.getClass(), deleteExportGroupMethod(export.getId()), rollbackMethodNullMethod(), null);
} else {
newWaitFor = workflow.createStep(UPDATE_EXPORT_GROUP_STEP, String.format("Updating export group %s", export.getId()), newWaitFor, export.getId(), export.getId().toString(), this.getClass(), updateExportGroupMethod(export.getId(), CollectionUtils.isEmpty(export.getInitiators()) ? new HashMap<URI, Integer>() : updatedVolumesMap, addedClusters, removedClusters, addedHosts, removedHosts, addedInitiators, removedInitiators), updateExportGroupRollbackMethod(export.getId()), null);
}
}
return newWaitFor;
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class ComputeSystemControllerImpl method attachAndMount.
/**
* Attaches and mounts every disk and datastore associated with the volumes in the export group.
* For each volume in the export group, the associated disk is attached to the host and any datastores backed by the
* volume are mounted
* to the host.
*
* @param exportGroupId
* export group that contains volumes
* @param hostId
* host to attach and mount to
* @param vcenterId
* vcenter that the host belongs to
* @param vcenterDatacenter
* vcenter datacenter that the host belongs to
* @param stepId
* the id of the workflow step
*/
public void attachAndMount(URI exportGroupId, URI hostId, URI vCenterId, URI vcenterDatacenter, String stepId) {
WorkflowStepCompleter.stepExecuting(stepId);
try {
// Test mechanism to invoke a failure. No-op on production systems.
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_054);
Host esxHost = _dbClient.queryObject(Host.class, hostId);
Vcenter vCenter = _dbClient.queryObject(Vcenter.class, vCenterId);
VcenterDataCenter vCenterDataCenter = _dbClient.queryObject(VcenterDataCenter.class, vcenterDatacenter);
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupId);
VCenterAPI api = VcenterDiscoveryAdapter.createVCenterAPI(vCenter);
HostSystem hostSystem = api.findHostSystem(vCenterDataCenter.getLabel(), esxHost.getLabel());
if (hostSystem == null) {
_log.info("Not able to find host " + esxHost.getLabel() + " in vCenter. Unable to attach disks and mount datastores");
WorkflowStepCompleter.stepSucceded(stepId);
return;
}
HostStorageAPI storageAPI = new HostStorageAPI(hostSystem);
if (exportGroup != null && exportGroup.getVolumes() != null) {
_log.info("Refreshing storage");
storageAPI.refreshStorage();
Set<BlockObject> blockObjects = Sets.newHashSet();
for (String volume : exportGroup.getVolumes().keySet()) {
BlockObject blockObject = BlockObject.fetch(_dbClient, URI.create(volume));
blockObjects.add(blockObject);
for (HostScsiDisk entry : storageAPI.listScsiDisks()) {
if (VolumeWWNUtils.wwnMatches(VMwareUtils.getDiskWwn(entry), blockObject.getWWN())) {
if (VMwareUtils.isDiskOff(entry)) {
_log.info("Attach SCSI Lun " + entry.getCanonicalName() + " on host " + esxHost.getLabel());
storageAPI.attachScsiLun(entry);
}
// Test mechanism to invoke a failure. No-op on production systems.
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_055);
break;
}
}
}
int retries = 0;
while (retries++ < MAXIMUM_RESCAN_ATTEMPTS && !blockObjects.isEmpty()) {
_log.info("Rescanning VMFS for host " + esxHost.getLabel());
storageAPI.getStorageSystem().rescanVmfs();
_log.info("Waiting for {} milliseconds before checking for datastores", RESCAN_DELAY_MS);
Thread.sleep(RESCAN_DELAY_MS);
_log.info("Looking for datastores for {} volumes", blockObjects.size());
Map<String, Datastore> wwnDatastores = getWwnDatastoreMap(hostSystem);
Iterator<BlockObject> objectIterator = blockObjects.iterator();
while (objectIterator.hasNext()) {
BlockObject blockObject = objectIterator.next();
if (blockObject != null) {
Datastore datastore = getDatastoreByWwn(wwnDatastores, blockObject.getWWN());
if (datastore != null && VMwareUtils.isDatastoreMountedOnHost(datastore, hostSystem)) {
_log.info("Datastore {} is already mounted on {}", datastore.getName(), esxHost.getLabel());
objectIterator.remove();
} else if (datastore != null && !VMwareUtils.isDatastoreMountedOnHost(datastore, hostSystem)) {
_log.info("Mounting datastore {} on host {}", datastore.getName(), esxHost.getLabel());
storageAPI.mountDatastore(datastore);
objectIterator.remove();
// Test mechanism to invoke a failure. No-op on production systems.
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_056);
}
}
}
}
}
WorkflowStepCompleter.stepSucceded(stepId);
} catch (Exception ex) {
_log.error(ex.getMessage(), ex);
WorkflowStepCompleter.stepFailed(stepId, DeviceControllerException.errors.jobFailed(ex));
}
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class RPDeviceController method addExportRemoveVolumesSteps.
/**
* Add the steps that will remove the volumes from the export group
* TODO: This could stand to be refactored to be simpler.
*
* @param workflow
* workflow object
* @param waitFor
* step that these steps are dependent on
* @param filteredSourceVolumeDescriptors
* volumes to act on
* @return "waitFor" step that future steps should wait on
* @throws InternalException
*/
private String addExportRemoveVolumesSteps(Workflow workflow, String waitFor, List<VolumeDescriptor> filteredSourceVolumeDescriptors) throws InternalException {
_log.info("Adding steps to remove volumes from export groups.");
String returnStep = waitFor;
Set<URI> volumeURIs = RPHelper.getVolumesToDelete(VolumeDescriptor.getVolumeURIs(filteredSourceVolumeDescriptors), _dbClient);
_log.info(String.format("Following volume(s) will be unexported from their RP export groups : [%s]", Joiner.on("--").join(volumeURIs)));
Map<URI, RPExport> rpExports = new HashMap<URI, RPExport>();
for (URI volumeURI : volumeURIs) {
Volume volume = _dbClient.queryObject(Volume.class, volumeURI);
if (volume == null) {
_log.warn("Could not load volume with given URI: " + volumeURI);
continue;
}
// get the protection system for this volume
URI rpSystemId = volume.getProtectionController();
ProtectionSystem rpSystem = null;
if (rpSystemId != null) {
rpSystem = _dbClient.queryObject(ProtectionSystem.class, rpSystemId);
if (rpSystem == null || rpSystem.getInactive()) {
_log.warn("No protection system information found for volume {}. Volume cannot be removed from RP export groups.", volume.getLabel());
continue;
}
}
// Get the storage controller URI of the volume
URI storageURI = volume.getStorageController();
// Get the vpool of the volume
VirtualPool virtualPool = _dbClient.queryObject(VirtualPool.class, volume.getVirtualPool());
if (VirtualPool.isRPVPlexProtectHASide(virtualPool)) {
_log.info(String.format("RP+VPLEX protect HA Source Volume [%s] to be removed from RP export group.", volume.getLabel()));
// the HA side export group only.
if (volume.getAssociatedVolumes() != null && volume.getAssociatedVolumes().size() == 2) {
for (String associatedVolURI : volume.getAssociatedVolumes()) {
Volume associatedVolume = _dbClient.queryObject(Volume.class, URI.create(associatedVolURI));
if (associatedVolume.getVirtualArray().toString().equals(virtualPool.getHaVarrayConnectedToRp())) {
ExportGroup exportGroup = getExportGroup(rpSystem, volume.getId(), associatedVolume.getVirtualArray(), associatedVolume.getInternalSiteName());
if (exportGroup != null) {
_log.info(String.format("Removing volume [%s] from RP export group [%s].", volume.getLabel(), exportGroup.getGeneratedName()));
}
// Assuming we've found the correct Export Group for this volume, let's
// then add the information we need to the rpExports map.
addExportGroup(rpExports, exportGroup, volumeURI, storageURI);
break;
}
}
}
} else if (volume.getAssociatedVolumes() != null && volume.getAssociatedVolumes().size() == 2) {
for (String associatedVolURI : volume.getAssociatedVolumes()) {
_log.info(String.format("VPLEX %s Volume [%s] to be removed from RP export group.", volume.getPersonality(), associatedVolURI));
Volume associatedVolume = _dbClient.queryObject(Volume.class, URI.create(associatedVolURI));
String internalSiteName = associatedVolume.getInternalSiteName();
URI virtualArray = associatedVolume.getVirtualArray();
if (!VirtualPool.vPoolSpecifiesMetroPoint(virtualPool)) {
// Only MetroPoint associated volumes will have the internalSiteName set. For VPlex distributed volumes
// the parent (virtual volume) internal site name should be used.
internalSiteName = volume.getInternalSiteName();
// If we are using the parent volume's internal site name, we also need to use the parent volume's virtual array.
// Again, only in the case of MetroPoint volumes would we want to use the associated volume's virtual array.
virtualArray = volume.getVirtualArray();
}
ExportGroup exportGroup = getExportGroup(rpSystem, volume.getId(), virtualArray, internalSiteName);
if (exportGroup != null) {
_log.info(String.format("Removing volume [%s] from RP export group [%s].", volume.getLabel(), exportGroup.getGeneratedName()));
}
// Assuming we've found the correct Export Group for this volume, let's
// then add the information we need to the rpExports map.
addExportGroup(rpExports, exportGroup, volumeURI, storageURI);
}
} else {
_log.info(String.format("Volume [%s] to be removed from RP export group.", volume.getLabel()));
// Find the Export Group for this regular RP volume
ExportGroup exportGroup = getExportGroup(rpSystem, volume.getId(), volume.getVirtualArray(), volume.getInternalSiteName());
if (exportGroup != null) {
_log.info(String.format("Removing volume [%s] from RP export group [%s].", volume.getLabel(), exportGroup.getGeneratedName()));
}
// Assuming we've found the correct Export Group for this volume, let's
// then add the information we need to the rpExports map.
addExportGroup(rpExports, exportGroup, volumeURI, storageURI);
}
}
// Generate the workflow steps for export volume removal and volume deletion
for (URI exportURI : rpExports.keySet()) {
_log.info(String.format("RP export group will have these volumes removed: [%s]", Joiner.on(',').join(rpExports.get(exportURI).getVolumes())));
RPExport rpExport = rpExports.get(exportURI);
if (!rpExport.getVolumes().isEmpty()) {
_exportWfUtils.generateExportGroupRemoveVolumes(workflow, STEP_DV_REMOVE_VOLUME_EXPORT, waitFor, rpExport.getStorageSystem(), exportURI, rpExport.getVolumes());
returnStep = STEP_DV_REMOVE_VOLUME_EXPORT;
}
}
_log.info("Completed adding steps to remove volumes from RP export groups.");
return returnStep;
}
use of com.emc.storageos.db.client.model.ExportGroup in project coprhd-controller by CoprHD.
the class RPDeviceController method addExportRemoveVolumeSteps.
/**
* Add the export remove volume step to the workflow
*
* @param workflow
* workflow object
* @param rpSystem
* protection system
* @param exportGroupID
* export group
* @param boIDs
* volume/snapshot IDs
* @throws InternalException
*/
private void addExportRemoveVolumeSteps(Workflow workflow, ProtectionSystem rpSystem, URI exportGroupID, List<URI> boIDs) throws InternalException {
ExportGroup exportGroup = _dbClient.queryObject(ExportGroup.class, exportGroupID);
String exportStep = workflow.createStepId();
initTaskStatus(exportGroup, exportStep, Operation.Status.pending, "export remove volumes (that contain RP snapshots)");
Map<URI, List<URI>> deviceToBlockObjects = new HashMap<URI, List<URI>>();
for (URI snapshotID : boIDs) {
BlockSnapshot snapshot = _dbClient.queryObject(BlockSnapshot.class, snapshotID);
// Get the export objects corresponding to this snapshot
List<BlockObject> objectsToRemove = getExportObjectsForBookmark(snapshot);
for (BlockObject blockObject : objectsToRemove) {
List<URI> blockObjects = deviceToBlockObjects.get(blockObject.getStorageController());
if (blockObjects == null) {
blockObjects = new ArrayList<URI>();
deviceToBlockObjects.put(blockObject.getStorageController(), blockObjects);
}
blockObjects.add(blockObject.getId());
}
}
for (Map.Entry<URI, List<URI>> deviceEntry : deviceToBlockObjects.entrySet()) {
_log.info(String.format("Adding workflow step to remove RP bookmarks and associated target volumes from export. ExportGroup: %s, Storage System: %s, BlockObjects: %s", exportGroup.getId(), deviceEntry.getKey(), deviceEntry.getValue()));
_exportWfUtils.generateExportGroupRemoveVolumes(workflow, STEP_EXPORT_REMOVE_SNAPSHOT, STEP_EXPORT_GROUP_DISABLE, deviceEntry.getKey(), exportGroupID, deviceEntry.getValue());
}
_log.info(String.format("Created export group remove snapshot steps in workflow: %s", exportGroup.getId()));
}
Aggregations