use of com.emc.storageos.db.client.model.Cluster in project coprhd-controller by CoprHD.
the class RPDeviceController method exportOrchestrationSteps.
/**
* @param volumeDescriptors
* - Volume descriptors
* @param rpSystemId
* - RP system
* @param taskId
* - task ID
* @return - True on success, false otherwise
* @throws InternalException
*/
public boolean exportOrchestrationSteps(List<VolumeDescriptor> volumeDescriptors, URI rpSystemId, String taskId) throws InternalException {
List<URI> volUris = VolumeDescriptor.getVolumeURIs(volumeDescriptors);
RPCGExportOrchestrationCompleter completer = new RPCGExportOrchestrationCompleter(volUris, taskId);
Workflow workflow = null;
boolean lockException = false;
Map<URI, Set<URI>> exportGroupVolumesAdded = new HashMap<URI, Set<URI>>();
exportGroupsCreated = new ArrayList<URI>();
final String COMPUTE_RESOURCE_CLUSTER = "cluster";
try {
final String workflowKey = "rpExportOrchestration";
if (!WorkflowService.getInstance().hasWorkflowBeenCreated(taskId, workflowKey)) {
// Generate the Workflow.
workflow = _workflowService.getNewWorkflow(this, EXPORT_ORCHESTRATOR_WF_NAME, true, taskId);
// the wait for key returned by previous call
String waitFor = null;
ProtectionSystem rpSystem = _dbClient.queryObject(ProtectionSystem.class, rpSystemId);
// Get the CG Params based on the volume descriptors
CGRequestParams params = this.getCGRequestParams(volumeDescriptors, rpSystem);
updateCGParams(params);
_log.info("Start adding RP Export Volumes steps....");
// Get the RP Exports from the CGRequestParams object
Collection<RPExport> rpExports = generateStorageSystemExportMaps(params, volumeDescriptors);
Map<String, Set<URI>> rpSiteInitiatorsMap = getRPSiteInitiators(rpSystem, rpExports);
// Acquire all the RP lock keys needed for export before we start assembling the export groups.
acquireRPLockKeysForExport(taskId, rpExports, rpSiteInitiatorsMap);
// or create a new one.
for (RPExport rpExport : rpExports) {
URI storageSystemURI = rpExport.getStorageSystem();
String internalSiteName = rpExport.getRpSite();
URI varrayURI = rpExport.getVarray();
List<URI> volumes = rpExport.getVolumes();
List<URI> initiatorSet = new ArrayList<URI>();
String rpSiteName = (rpSystem.getRpSiteNames() != null) ? rpSystem.getRpSiteNames().get(internalSiteName) : internalSiteName;
StorageSystem storageSystem = _dbClient.queryObject(StorageSystem.class, storageSystemURI);
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, varrayURI);
_log.info("--------------------");
_log.info(String.format("RP Export: StorageSystem = [%s] RPSite = [%s] VirtualArray = [%s]", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
boolean isJournalExport = rpExport.getIsJournalExport();
String exportGroupGeneratedName = RPHelper.generateExportGroupName(rpSystem, storageSystem, internalSiteName, varray, isJournalExport);
// Setup the export group - we may or may not need to create it, but we need to have everything ready in case we do
ExportGroup exportGroup = RPHelper.createRPExportGroup(exportGroupGeneratedName, varray, _dbClient.queryObject(Project.class, params.getProject()), 0, isJournalExport);
// Get the initiators of the RP Cluster (all of the RPAs on one side of a configuration)
Map<String, Map<String, String>> rpaWWNs = RPHelper.getRecoverPointClient(rpSystem).getInitiatorWWNs(internalSiteName);
if (rpaWWNs == null || rpaWWNs.isEmpty()) {
throw DeviceControllerExceptions.recoverpoint.noInitiatorsFoundOnRPAs();
}
// Convert to initiator object
List<Initiator> initiators = new ArrayList<Initiator>();
for (String rpaId : rpaWWNs.keySet()) {
for (Map.Entry<String, String> rpaWWN : rpaWWNs.get(rpaId).entrySet()) {
Initiator initiator = ExportUtils.getInitiator(rpaWWN.getKey(), _dbClient);
initiators.add(initiator);
}
}
// We need to find and distill only those RP initiators that correspond to the network of the
// storage
// system and
// that network has front end port from the storage system.
// In certain lab environments, its quite possible that there are 2 networks one for the storage
// system
// FE ports and one for
// the BE ports.
// In such configs, RP initiators will be spread across those 2 networks. RP controller does not
// care
// about storage system
// back-end ports, so
// we will ignore those initiators that are connected to a network that has only storage system back
// end
// port connectivity.
Map<URI, Set<Initiator>> rpNetworkToInitiatorsMap = new HashMap<URI, Set<Initiator>>();
Set<URI> rpSiteInitiatorUris = rpSiteInitiatorsMap.get(internalSiteName);
if (rpSiteInitiatorUris != null) {
for (URI rpSiteInitiatorUri : rpSiteInitiatorUris) {
Initiator rpSiteInitiator = _dbClient.queryObject(Initiator.class, rpSiteInitiatorUri);
URI rpInitiatorNetworkURI = getInitiatorNetwork(exportGroup, rpSiteInitiator);
if (rpInitiatorNetworkURI != null) {
if (rpNetworkToInitiatorsMap.get(rpInitiatorNetworkURI) == null) {
rpNetworkToInitiatorsMap.put(rpInitiatorNetworkURI, new HashSet<Initiator>());
}
rpNetworkToInitiatorsMap.get(rpInitiatorNetworkURI).add(rpSiteInitiator);
_log.info(String.format("RP Initiator [%s] found on network: [%s]", rpSiteInitiator.getInitiatorPort(), rpInitiatorNetworkURI.toASCIIString()));
} else {
_log.info(String.format("RP Initiator [%s] was not found on any network. Excluding from automated exports", rpSiteInitiator.getInitiatorPort()));
}
}
}
// Compute numPaths. This is how its done:
// We know the RP site and the Network/TransportZone it is on.
// Determine all the storage ports for the storage array for all the networks they are on.
// Next, if we find the network for the RP site in the above list, return all the storage ports
// corresponding to that.
// For RP we will try and use as many Storage ports as possible.
Map<URI, List<StoragePort>> initiatorPortMap = getInitiatorPortsForArray(rpNetworkToInitiatorsMap, storageSystemURI, varrayURI, rpSiteName);
for (URI networkURI : initiatorPortMap.keySet()) {
for (StoragePort storagePort : initiatorPortMap.get(networkURI)) {
_log.info(String.format("Network : [%s] - Port : [%s]", networkURI.toString(), storagePort.getLabel()));
}
}
int numPaths = computeNumPaths(initiatorPortMap, varrayURI, storageSystem);
_log.info("Total paths = " + numPaths);
// Stems from above comment where we distill the RP network and the initiators in that network.
List<Initiator> initiatorList = new ArrayList<Initiator>();
for (URI rpNetworkURI : rpNetworkToInitiatorsMap.keySet()) {
if (initiatorPortMap.containsKey(rpNetworkURI)) {
initiatorList.addAll(rpNetworkToInitiatorsMap.get(rpNetworkURI));
}
}
for (Initiator initiator : initiatorList) {
initiatorSet.add(initiator.getId());
}
// See if the export group already exists
ExportGroup exportGroupInDB = exportGroupExistsInDB(exportGroup);
boolean addExportGroupToDB = false;
if (exportGroupInDB != null) {
exportGroup = exportGroupInDB;
// If the export already exists, check to see if any of the volumes have already been exported.
// No
// need to
// re-export volumes.
List<URI> volumesToRemove = new ArrayList<URI>();
for (URI volumeURI : volumes) {
if (exportGroup.getVolumes() != null && !exportGroup.getVolumes().isEmpty() && exportGroup.getVolumes().containsKey(volumeURI.toString())) {
_log.info(String.format("Volume [%s] already exported to export group [%s], " + "it will be not be re-exported", volumeURI.toString(), exportGroup.getGeneratedName()));
volumesToRemove.add(volumeURI);
}
}
// Remove volumes if they have already been exported
if (!volumesToRemove.isEmpty()) {
volumes.removeAll(volumesToRemove);
}
// nothing else needs to be done here.
if (volumes.isEmpty()) {
_log.info(String.format("No volumes needed to be exported to export group [%s], continue", exportGroup.getGeneratedName()));
continue;
}
} else {
addExportGroupToDB = true;
}
// Add volumes to the export group
Map<URI, Integer> volumesToAdd = new HashMap<URI, Integer>();
for (URI volumeID : volumes) {
exportGroup.addVolume(volumeID, ExportGroup.LUN_UNASSIGNED);
volumesToAdd.put(volumeID, ExportGroup.LUN_UNASSIGNED);
}
// Keep track of volumes added to export group
if (!volumesToAdd.isEmpty()) {
exportGroupVolumesAdded.put(exportGroup.getId(), volumesToAdd.keySet());
}
// volume
if (rpExport.getComputeResource() != null) {
URI computeResource = rpExport.getComputeResource();
_log.info(String.format("RP Export: ComputeResource : %s", computeResource.toString()));
if (computeResource.toString().toLowerCase().contains(COMPUTE_RESOURCE_CLUSTER)) {
Cluster cluster = _dbClient.queryObject(Cluster.class, computeResource);
exportGroup.addCluster(cluster);
} else {
Host host = _dbClient.queryObject(Host.class, rpExport.getComputeResource());
exportGroup.addHost(host);
}
}
// Persist the export group
if (addExportGroupToDB) {
exportGroup.addInitiators(initiatorSet);
exportGroup.setNumPaths(numPaths);
_dbClient.createObject(exportGroup);
// Keep track of newly created EGs in case of rollback
exportGroupsCreated.add(exportGroup.getId());
} else {
_dbClient.updateObject(exportGroup);
}
// If the export group already exists, add the volumes to it, otherwise create a brand new
// export group.
StringBuilder buffer = new StringBuilder();
buffer.append(String.format(DASHED_NEWLINE));
if (!addExportGroupToDB) {
buffer.append(String.format("Adding volumes to existing Export Group for Storage System [%s], RP Site [%s], Virtual Array [%s]%n", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
buffer.append(String.format("Export Group name is : [%s]%n", exportGroup.getGeneratedName()));
buffer.append(String.format("Export Group will have these volumes added: [%s]%n", Joiner.on(',').join(volumes)));
buffer.append(String.format(DASHED_NEWLINE));
_log.info(buffer.toString());
waitFor = _exportWfUtils.generateExportGroupAddVolumes(workflow, STEP_EXPORT_GROUP, waitFor, storageSystemURI, exportGroup.getId(), volumesToAdd);
_log.info("Added Export Group add volumes step in workflow");
} else {
buffer.append(String.format("Creating new Export Group for Storage System [%s], RP Site [%s], Virtual Array [%s]%n", storageSystem.getLabel(), rpSiteName, varray.getLabel()));
buffer.append(String.format("Export Group name is: [%s]%n", exportGroup.getGeneratedName()));
buffer.append(String.format("Export Group will have these initiators: [%s]%n", Joiner.on(',').join(initiatorSet)));
buffer.append(String.format("Export Group will have these volumes added: [%s]%n", Joiner.on(',').join(volumes)));
buffer.append(String.format(DASHED_NEWLINE));
_log.info(buffer.toString());
String exportStep = workflow.createStepId();
initTaskStatus(exportGroup, exportStep, Operation.Status.pending, "create export");
waitFor = _exportWfUtils.generateExportGroupCreateWorkflow(workflow, STEP_EXPORT_GROUP, waitFor, storageSystemURI, exportGroup.getId(), volumesToAdd, initiatorSet);
_log.info("Added Export Group create step in workflow. New Export Group Id: " + exportGroup.getId());
}
}
String successMessage = "Export orchestration completed successfully";
// Finish up and execute the plan.
// The Workflow will handle the TaskCompleter
Object[] callbackArgs = new Object[] { volUris };
workflow.executePlan(completer, successMessage, new WorkflowCallback(), callbackArgs, null, null);
// Mark this workflow as created/executed so we don't do it again on retry/resume
WorkflowService.getInstance().markWorkflowBeenCreated(taskId, workflowKey);
}
} catch (LockRetryException ex) {
/**
* Added this catch block to mark the current workflow as completed so that lock retry will not get exception while creating new
* workflow using the same taskid.
*/
_log.warn(String.format("Lock retry exception key: %s remaining time %d", ex.getLockIdentifier(), ex.getRemainingWaitTimeSeconds()));
if (workflow != null && !NullColumnValueGetter.isNullURI(workflow.getWorkflowURI()) && workflow.getWorkflowState() == WorkflowState.CREATED) {
com.emc.storageos.db.client.model.Workflow wf = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow.getWorkflowURI());
if (!wf.getCompleted()) {
_log.error("Marking the status to completed for the newly created workflow {}", wf.getId());
wf.setCompleted(true);
_dbClient.updateObject(wf);
}
}
throw ex;
} catch (Exception ex) {
_log.error("Could not create volumes: " + volUris, ex);
// Rollback ViPR level RP export group changes
rpExportGroupRollback();
if (workflow != null) {
_workflowService.releaseAllWorkflowLocks(workflow);
}
String opName = ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME.getName();
ServiceError serviceError = null;
if (lockException) {
serviceError = DeviceControllerException.errors.createVolumesAborted(volUris.toString(), ex);
} else {
serviceError = DeviceControllerException.errors.createVolumesFailed(volUris.toString(), opName, ex);
}
completer.error(_dbClient, _locker, serviceError);
return false;
}
_log.info("End adding RP Export Volumes steps.");
return true;
}
use of com.emc.storageos.db.client.model.Cluster in project coprhd-controller by CoprHD.
the class VcenterControllerImpl method createOrUpdateVcenterCluster.
private void createOrUpdateVcenterCluster(boolean createCluster, AsyncTask task, URI clusterUri, URI[] addHostUris, URI[] removeHostUris, URI[] volumeUris) throws InternalException {
TaskCompleter completer = null;
try {
_log.info("createOrUpdateVcenterCluster " + createCluster + " " + task + " " + clusterUri + " " + addHostUris + " " + removeHostUris);
if (task == null) {
_log.error("AsyncTask is null");
throw new Exception("AsyncTask is null");
}
URI vcenterDataCenterId = task._id;
VcenterDataCenter vcenterDataCenter = _dbClient.queryObject(VcenterDataCenter.class, vcenterDataCenterId);
if (clusterUri == null) {
_log.error("Cluster URI is null");
throw new Exception("Cluster URI is null");
}
Cluster cluster = _dbClient.queryObject(Cluster.class, clusterUri);
Vcenter vcenter = _dbClient.queryObject(Vcenter.class, vcenterDataCenter.getVcenter());
_log.info("Request to create or update cluster " + vcenter.getIpAddress() + "/" + vcenterDataCenter.getLabel() + "/" + cluster.getLabel());
Collection<Host> addHosts = new ArrayList<Host>();
if (addHostUris == null || addHostUris.length == 0) {
_log.info("Add host URIs is null or empty - Cluster will be created without hosts");
} else {
for (URI hostUri : addHostUris) {
_log.info("createOrUpdateVcenterCluster " + clusterUri + " with add host " + hostUri);
}
addHosts = _dbClient.queryObject(Host.class, addHostUris);
}
Collection<Host> removeHosts = new ArrayList<Host>();
if (removeHostUris == null || removeHostUris.length == 0) {
_log.info("Remove host URIs is null or empty - Cluster will have no removed hosts");
} else {
for (URI hostUri : removeHostUris) {
_log.info("createOrUpdateVcenterCluster " + clusterUri + " with remove host " + hostUri);
}
removeHosts = _dbClient.queryObject(Host.class, removeHostUris);
}
Collection<Volume> volumes = new ArrayList<Volume>();
if (volumeUris == null || volumeUris.length == 0) {
_log.info("Volume URIs is null or empty - Cluster will be created without datastores");
} else {
for (URI volumeUri : volumeUris) {
_log.info("createOrUpdateVcenterCluster " + clusterUri + " with volume " + volumeUri);
}
volumes = _dbClient.queryObject(Volume.class, volumeUris);
}
completer = new VcenterClusterCompleter(vcenterDataCenterId, task._opId, OperationTypeEnum.CREATE_UPDATE_VCENTER_CLUSTER, "VCENTER_CONTROLLER");
Workflow workflow = _workflowService.getNewWorkflow(this, "CREATE_UPDATE_VCENTER_CLUSTER_WORKFLOW", true, task._opId);
String clusterStep = workflow.createStep("CREATE_UPDATE_VCENTER_CLUSTER_STEP", String.format("vCenter cluster operation in vCenter datacenter %s", vcenterDataCenterId), null, vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method("createUpdateVcenterClusterOperation", createCluster, vcenter.getId(), vcenterDataCenter.getId(), cluster.getId()), null, null);
String lastStep = clusterStep;
if (!removeHosts.isEmpty()) {
for (Host host : removeHosts) {
String hostStep = workflow.createStep("VCENTER_CLUSTER_REMOVE_HOST", String.format("vCenter cluster remove host operation %s", host.getId()), clusterStep, vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method("vcenterClusterRemoveHostOperation", vcenter.getId(), vcenterDataCenter.getId(), cluster.getId(), host.getId()), null, null);
// add host will wait on last of these
lastStep = hostStep;
}
}
if (!addHosts.isEmpty()) {
for (Host host : addHosts) {
String hostStep = workflow.createStep("VCENTER_CLUSTER_ADD_HOST", String.format("vCenter cluster add host operation %s", host.getId()), lastStep, vcenterDataCenterId, vcenterDataCenterId.toString(), this.getClass(), new Workflow.Method("vcenterClusterAddHostOperation", vcenter.getId(), vcenterDataCenter.getId(), cluster.getId(), host.getId()), null, null);
}
}
workflow.executePlan(completer, "Success");
} catch (Exception e) {
_log.error("createOrUpdateVcenterCluster caught an exception.", e);
ServiceError serviceError = DeviceControllerException.errors.jobFailed(e);
completer.error(_dbClient, serviceError);
}
}
use of com.emc.storageos.db.client.model.Cluster in project coprhd-controller by CoprHD.
the class VcenterControllerImpl method removeVcenterCluster.
@Override
public void removeVcenterCluster(URI datacenterUri, URI clusterUri) throws InternalException {
VcenterApiClient vcenterApiClient = null;
try {
VcenterDataCenter vcenterDataCenter = _dbClient.queryObject(VcenterDataCenter.class, datacenterUri);
Cluster cluster = _dbClient.queryObject(Cluster.class, clusterUri);
Vcenter vcenter = _dbClient.queryObject(Vcenter.class, vcenterDataCenter.getVcenter());
_log.info("Request to remove cluster " + vcenter.getLabel() + "/" + vcenterDataCenter.getLabel() + "/" + cluster.getLabel());
vcenterApiClient = new VcenterApiClient(_coordinator.getPropertyInfo());
vcenterApiClient.setup(vcenter.getIpAddress(), vcenter.getUsername(), vcenter.getPassword(), vcenter.getPortNumber());
vcenterApiClient.removeCluster(vcenterDataCenter.getLabel(), cluster.getExternalId());
} catch (VcenterObjectConnectionException e) {
throw VcenterControllerException.exceptions.objectConnectionException(e.getLocalizedMessage(), e);
} catch (VcenterObjectNotFoundException e) {
throw VcenterControllerException.exceptions.objectNotFoundException(e.getLocalizedMessage(), e);
} catch (VcenterServerConnectionException e) {
throw VcenterControllerException.exceptions.serverConnectionException(e.getLocalizedMessage(), e);
} catch (Exception e) {
_log.error("removeVcenterCluster exception " + e);
throw VcenterControllerException.exceptions.unexpectedException(e.getLocalizedMessage(), e);
} finally {
if (vcenterApiClient != null) {
vcenterApiClient.destroy();
}
}
}
use of com.emc.storageos.db.client.model.Cluster in project coprhd-controller by CoprHD.
the class VcenterControllerImpl method exitMaintenanceMode.
@Override
public void exitMaintenanceMode(URI datacenterUri, URI clusterUri, URI hostUri) throws InternalException {
VcenterApiClient vcenterApiClient = null;
try {
Host host = _dbClient.queryObject(Host.class, hostUri);
VcenterDataCenter vcenterDataCenter = _dbClient.queryObject(VcenterDataCenter.class, datacenterUri);
Cluster cluster = _dbClient.queryObject(Cluster.class, clusterUri);
Vcenter vcenter = _dbClient.queryObject(Vcenter.class, vcenterDataCenter.getVcenter());
_log.info("Request to exit maintenance mode for " + vcenter.getLabel() + "/" + vcenterDataCenter.getLabel() + "/" + cluster.getLabel() + "/" + host.getHostName());
vcenterApiClient = new VcenterApiClient(_coordinator.getPropertyInfo());
vcenterApiClient.setup(vcenter.getIpAddress(), vcenter.getUsername(), vcenter.getPassword(), vcenter.getPortNumber());
vcenterApiClient.exitMaintenanceMode(vcenterDataCenter.getLabel(), cluster.getExternalId(), host.getHostName());
} catch (VcenterObjectConnectionException e) {
throw VcenterControllerException.exceptions.objectConnectionException(e.getLocalizedMessage(), e);
} catch (VcenterObjectNotFoundException e) {
throw VcenterControllerException.exceptions.objectNotFoundException(e.getLocalizedMessage(), e);
} catch (VcenterServerConnectionException e) {
throw VcenterControllerException.exceptions.serverConnectionException(e.getLocalizedMessage(), e);
} catch (Exception e) {
_log.error("exitMaintenanceMode exception " + e);
throw VcenterControllerException.exceptions.unexpectedException(e.getLocalizedMessage(), e);
} finally {
if (vcenterApiClient != null) {
vcenterApiClient.destroy();
}
}
}
use of com.emc.storageos.db.client.model.Cluster in project coprhd-controller by CoprHD.
the class VcenterDiscoveryAdapter method deleteDatacenters.
private void deleteDatacenters(Iterable<VcenterDataCenter> datacenters, List<URI> deletedHosts, List<URI> deletedClusters) {
for (VcenterDataCenter datacenter : datacenters) {
boolean containsHosts = false;
boolean clustersInUse = false;
for (Cluster cluster : getClusters(datacenter)) {
deletedClusters.add(cluster.getId());
}
for (Host host : getHosts(datacenter)) {
deletedHosts.add(host.getId());
containsHosts = true;
}
for (Cluster cluster : getClusters(datacenter)) {
URI clusterId = cluster.getId();
List<URI> hostUris = ComputeSystemHelper.getChildrenUris(dbClient, clusterId, Host.class, "cluster");
if (hostUris.isEmpty() && !ComputeSystemHelper.isClusterInExport(dbClient, clusterId) && EventUtils.findAffectedResourcePendingEvents(dbClient, clusterId).isEmpty()) {
info("Deactivating Cluster: " + clusterId);
ComputeSystemHelper.doDeactivateCluster(dbClient, cluster);
} else {
info("Unable to delete cluster " + clusterId);
clustersInUse = true;
}
}
// delete datacenters that don't contain any clusters or hosts, don't have any exports, and don't have any pending events
if (!containsHosts && !clustersInUse && !ComputeSystemHelper.isDataCenterInUse(dbClient, datacenter.getId()) && EventUtils.findAffectedResourcePendingEvents(dbClient, datacenter.getId()).isEmpty()) {
info("Deactivating Datacenter: " + datacenter.getId());
ComputeSystemHelper.doDeactivateVcenterDataCenter(dbClient, datacenter);
} else {
info("Unable to delete datacenter " + datacenter.getId());
}
}
}
Aggregations