use of com.emc.storageos.db.client.model.Cluster in project coprhd-controller by CoprHD.
the class VirtualArrayService method getOtherSearchResults.
/**
* Finds the virtual arrays for the initiator port with the passed
* identifier and returns the id, name, and self link for those virtual
* arrays. This API only supports fiber channel and iSCSI initiator ports,
* and the passed port identifier must be the WWN or IQN of the port.
*
* Note that in order for an initiator to be associated with any virtual,
* arrays it must be in an active network. The virtual arrays for the passed
* initiator are those active virtual arrays associated with the storage
* ports in the initiator's active network. If the initiator is not in a
* network, an empty list is returned.
*
* parameter: 'initiator_port' The identifier of the initiator port.
*
* @param parameters The search parameters.
* @param authorized Whether or not the caller is authorized.
*
* @return The search results specifying the virtual arrays for the
* initiator identified in the passed search parameters.
*/
@Override
protected SearchResults getOtherSearchResults(Map<String, List<String>> parameters, boolean authorized) {
SearchResults result = new SearchResults();
String[] searchCriteria = { SEARCH_INITIATOR_PORT, SEARCH_HOST, SEARCH_CLUSTER };
validateSearchParameters(parameters, searchCriteria);
Set<String> varrayIds = new HashSet<String>();
for (Map.Entry<String, List<String>> entry : parameters.entrySet()) {
if (entry.getKey().equals(SEARCH_INITIATOR_PORT)) {
String initiatorId = parameters.get(SEARCH_INITIATOR_PORT).get(0);
// Validate the user passed a value for the initiator port.
ArgValidator.checkFieldNotEmpty(initiatorId, SEARCH_INITIATOR_PORT);
// Validate the format of the passed initiator port.
if (!EndpointUtility.isValidEndpoint(initiatorId, EndpointType.ANY)) {
throw APIException.badRequests.initiatorPortNotValid();
}
_log.info("Searching for virtual arrays for initiator {}", initiatorId);
varrayIds.addAll(ConnectivityUtil.getInitiatorVarrays(initiatorId, _dbClient));
break;
} else if (entry.getKey().equals(SEARCH_HOST)) {
// find and validate host
String hostId = parameters.get(SEARCH_HOST).get(0);
URI hostUri = URI.create(hostId);
ArgValidator.checkFieldNotEmpty(hostId, SEARCH_HOST);
Host host = queryObject(Host.class, hostUri, false);
verifyAuthorizedInTenantOrg(host.getTenant(), getUserFromContext());
_log.info("looking for virtual arrays connected to host " + host.getHostName());
varrayIds.addAll(getVarraysForHost(hostUri));
break;
} else if (entry.getKey().equals(SEARCH_CLUSTER)) {
// find and validate cluster
String clusterId = parameters.get(SEARCH_CLUSTER).get(0);
URI clusterUri = URI.create(clusterId);
ArgValidator.checkFieldNotEmpty(clusterId, SEARCH_CLUSTER);
Cluster cluster = queryObject(Cluster.class, clusterUri, false);
verifyAuthorizedInTenantOrg(cluster.getTenant(), getUserFromContext());
_log.info("looking for virtual arrays connected to cluster " + cluster.getLabel());
List<Set<String>> hostVarraySets = new ArrayList<Set<String>>();
List<NamedElementQueryResultList.NamedElement> dataObjects = listChildren(clusterUri, Host.class, "label", "cluster");
for (NamedElementQueryResultList.NamedElement dataObject : dataObjects) {
Set<String> hostVarrays = getVarraysForHost(dataObject.getId());
hostVarraySets.add(hostVarrays);
}
boolean first = true;
for (Set<String> varrays : hostVarraySets) {
if (first) {
varrayIds.addAll(varrays);
first = false;
} else {
varrayIds.retainAll(varrays);
}
}
break;
}
}
// For each virtual array in the set create a search result
// and add it to the search results list.
List<SearchResultResourceRep> searchResultList = new ArrayList<SearchResultResourceRep>();
if (!varrayIds.isEmpty()) {
for (String varrayId : varrayIds) {
URI varrayURI = URI.create(varrayId);
VirtualArray varray = _dbClient.queryObject(VirtualArray.class, varrayURI);
// Filter out those that are inactive or not accessible to the user.
if (varray == null || varray.getInactive()) {
_log.info("Could not find virtual array {} in the database, or " + "the virtual array is inactive", varrayURI);
continue;
}
if (!authorized) {
if (!_permissionsHelper.tenantHasUsageACL(URI.create(getUserFromContext().getTenantId()), varray)) {
_log.info("Virtual array {} is not accessible.", varrayURI);
continue;
}
}
RestLinkRep selfLink = new RestLinkRep("self", RestLinkFactory.newLink(getResourceType(), varrayURI));
SearchResultResourceRep searchResult = new SearchResultResourceRep(varrayURI, selfLink, varray.getLabel());
searchResultList.add(searchResult);
}
}
result.setResource(searchResultList);
return result;
}
use of com.emc.storageos.db.client.model.Cluster in project coprhd-controller by CoprHD.
the class VmaxMaskingOrchestrator method mapInitiatorsToComputeResource.
/**
* This function processes the initiatorURIs and return a mapping of String
* host or cluster resource reference to a list Initiator URIs.
*
* This is the default implementation and it will group the
* initiator's host reference
*
* @param exportGroup [in] - ExportGroup object to examine
* @param initiatorURIs [in] - Initiator URIs
* @return Map of String:computeResourceName to List of Initiator URIs
*/
@Override
protected Map<String, List<URI>> mapInitiatorsToComputeResource(ExportGroup exportGroup, Collection<URI> initiatorURIs) {
Map<String, List<URI>> result = new HashMap<String, List<URI>>();
if (exportGroup.forCluster()) {
Cluster singleCluster = null;
if (exportGroup.getClusters() != null && exportGroup.getClusters().size() == 1) {
String clusterUriString = exportGroup.getClusters().iterator().next();
singleCluster = _dbClient.queryObject(Cluster.class, URI.create(clusterUriString));
}
for (URI newExportMaskInitiator : initiatorURIs) {
Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
if (initiator != null) {
String clusterName = getClusterName(singleCluster, initiator);
List<URI> initiatorSet = result.get(clusterName);
if (initiatorSet == null) {
initiatorSet = new ArrayList<URI>();
result.put(clusterName, initiatorSet);
}
initiatorSet.add(newExportMaskInitiator);
_log.info(String.format("cluster = %s, initiators to add to map: %s, ", clusterName, newExportMaskInitiator.toString()));
}
}
} else {
// Bogus URI for those initiators without a host object, helps maintain a good map.
// We want to put bunch up the non-host initiators together.
URI fillerHostURI = NullColumnValueGetter.getNullURI();
for (URI newExportMaskInitiator : initiatorURIs) {
Initiator initiator = _dbClient.queryObject(Initiator.class, newExportMaskInitiator);
// Not all initiators have hosts, be sure to handle either case.
URI hostURI = initiator.getHost();
if (hostURI == null) {
hostURI = fillerHostURI;
}
List<URI> initiatorSet = result.get(hostURI.toString());
if (initiatorSet == null) {
initiatorSet = new ArrayList<URI>();
result.put(hostURI.toString(), initiatorSet);
}
initiatorSet.add(initiator.getId());
_log.info(String.format("host = %s, initiators to add to map: %d, ", hostURI, result.get(hostURI.toString()).size()));
}
}
return result;
}
use of com.emc.storageos.db.client.model.Cluster in project coprhd-controller by CoprHD.
the class VPlexDeviceController method createVirtualVolumes.
/**
* Do the creation of a VPlex Virtual Volume. This is called as a Workflow Step.
* NOTE: The parameters here must match createVirtualVolumesMethod above (except stepId).
*
* @param vplexURI
* -- URI of the VPlex StorageSystem
* @param vplexVolumeURIs
* -- URI of the VPlex volumes to be created. They must contain
* associatedVolumes (URI of the underlying Storage Volumes).
* @param computeResourceMap
* A Map of the compute resource for each volume.
* @param stepId
* - The stepId used for completion.
* @throws WorkflowException
*/
public void createVirtualVolumes(URI vplexURI, List<URI> vplexVolumeURIs, Map<URI, URI> computeResourceMap, String stepId) throws WorkflowException {
List<List<VolumeInfo>> rollbackData = new ArrayList<List<VolumeInfo>>();
List<URI> createdVplexVolumeURIs = new ArrayList<URI>();
try {
WorkflowStepCompleter.stepExecuting(stepId);
// Get the API client.
StorageSystem vplex = getDataObject(StorageSystem.class, vplexURI, _dbClient);
VPlexApiClient client = getVPlexAPIClient(_vplexApiFactory, vplex, _dbClient);
// Make a map of StorageSystem ids to Storage System
Map<URI, StorageSystem> storageMap = new HashMap<URI, StorageSystem>();
// Make a map of Virtual Volumes to Storage Volumes.
Map<Volume, List<Volume>> volumeMap = new HashMap<Volume, List<Volume>>();
// Make a string buffer for volume labels
StringBuffer volumeLabels = new StringBuffer();
// List of storage system Guids
List<String> storageSystemGuids = new ArrayList<String>();
Boolean isDistributedVolume = false;
Map<String, Set<URI>> clusterVarrayMap = new HashMap<>();
for (URI vplexVolumeURI : vplexVolumeURIs) {
Volume vplexVolume = getDataObject(Volume.class, vplexVolumeURI, _dbClient);
URI vplexVolumeVarrayURI = vplexVolume.getVirtualArray();
String clusterId = ConnectivityUtil.getVplexClusterForVarray(vplexVolumeVarrayURI, vplexVolume.getStorageController(), _dbClient);
if (clusterVarrayMap.containsKey(clusterId)) {
clusterVarrayMap.get(clusterId).add(vplexVolumeVarrayURI);
} else {
Set<URI> varraysForCluster = new HashSet<>();
varraysForCluster.add(vplexVolumeVarrayURI);
clusterVarrayMap.put(clusterId, varraysForCluster);
}
volumeLabels.append(vplexVolume.getLabel()).append(" ");
volumeMap.put(vplexVolume, new ArrayList<Volume>());
// Find the underlying Storage Volumes
StringSet associatedVolumes = vplexVolume.getAssociatedVolumes();
if (associatedVolumes.size() > 1) {
isDistributedVolume = true;
}
for (String associatedVolume : associatedVolumes) {
Volume storageVolume = getDataObject(Volume.class, new URI(associatedVolume), _dbClient);
URI storageSystemId = storageVolume.getStorageController();
if (storageMap.containsKey(storageSystemId) == false) {
StorageSystem storage = _dbClient.queryObject(StorageSystem.class, storageSystemId);
storageMap.put(storageSystemId, storage);
if (!storageSystemGuids.contains(storage.getNativeGuid())) {
storageSystemGuids.add(storage.getNativeGuid());
}
}
volumeMap.get(vplexVolume).add(storageVolume);
}
}
_log.info(String.format("Request to create: %s virtual volume(s) %s", volumeMap.size(), volumeLabels));
long startTime = System.currentTimeMillis();
// If a new backend system is connected to a VPLEX and the VPLEX does not
// yet know about the system i.e., the system does not show up in the path
// /clusters/cluster-x/storage-elements/storage-arrays, and a user attempts
// to create a virtual volume, the request may fail because we cannot find
// the storage system. When the backend volume on the new system is created
// and exported to the VPLEX, the VPLEX will recognize new system. However,
// this may not occur immediately. So, when we go to create the vplex volume
// using that backend volume, we may not find that system and volume on the
// first try. We saw this in development. As such there was a retry loop
// added when finding the backend volumes in the discovery that is performed
// in the method to create the virtual volume.
//
// However changes for CTRL-12826 were merged on 7/31/2015 that circumvented
// that retry code. Changes were made to do the array re-discover here prior
// to virtual volume creation, rather than during virtual volume creation and
// false was passed to the create virtual volume routine for the discovery
// required flag. The newly added call does not do any kind of retry if the
// system is not found and so a failure will occur in the scenario described
// above. If a system is not found an exception is thrown. Now we will catch
// that exception and re-enable discovery in the volume creation routine.
// Essentially we revert to what was happening before the 12826 changes if there
// is an issue discovering the systems on the initial try here.
boolean discoveryRequired = false;
try {
client.rediscoverStorageSystems(storageSystemGuids);
} catch (Exception e) {
String warnMsg = String.format("Initial discovery of one or more of these backend systems %s failed: %s." + "Discovery is required during virtual volume creation", storageSystemGuids, e.getMessage());
_log.warn(warnMsg);
discoveryRequired = true;
}
// Now make a call to the VPlexAPIClient.createVirtualVolume for each vplex volume.
StringBuilder buf = new StringBuilder();
buf.append("Vplex: " + vplexURI + " created virtual volume(s): ");
boolean thinEnabled = false;
boolean searchAllClustersForStorageVolumes = ((clusterVarrayMap.keySet().size() > 1 || isDistributedVolume) ? true : false);
List<VPlexVirtualVolumeInfo> virtualVolumeInfos = new ArrayList<VPlexVirtualVolumeInfo>();
Map<String, Volume> vplexVolumeNameMap = new HashMap<String, Volume>();
List<VPlexClusterInfo> clusterInfoList = null;
for (Volume vplexVolume : volumeMap.keySet()) {
URI vplexVolumeId = vplexVolume.getId();
_log.info(String.format("Creating virtual volume: %s (%s)", vplexVolume.getLabel(), vplexVolumeId));
URI vplexVolumeVarrayURI = vplexVolume.getVirtualArray();
String clusterId = null;
for (Entry<String, Set<URI>> clusterEntry : clusterVarrayMap.entrySet()) {
if (clusterEntry.getValue().contains(vplexVolumeVarrayURI)) {
clusterId = clusterEntry.getKey();
}
}
List<VolumeInfo> vinfos = new ArrayList<VolumeInfo>();
for (Volume storageVolume : volumeMap.get(vplexVolume)) {
StorageSystem storage = storageMap.get(storageVolume.getStorageController());
List<String> itls = VPlexControllerUtils.getVolumeITLs(storageVolume);
VolumeInfo info = new VolumeInfo(storage.getNativeGuid(), storage.getSystemType(), storageVolume.getWWN().toUpperCase().replaceAll(":", ""), storageVolume.getNativeId(), storageVolume.getThinlyProvisioned().booleanValue(), itls);
if (storageVolume.getVirtualArray().equals(vplexVolumeVarrayURI)) {
// We always want the source backend volume identified first. It
// may not be first in the map as the map is derived from the
// VPLEX volume's associated volumes list which is an unordered
// StringSet.
vinfos.add(0, info);
} else {
vinfos.add(info);
}
if (info.getIsThinProvisioned()) {
// if either or both legs of distributed is thin, try for thin-enabled
// (or if local and the single backend volume is thin, try as well)
thinEnabled = true;
}
}
// Update rollback information.
rollbackData.add(vinfos);
_workflowService.storeStepData(stepId, rollbackData);
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_045);
// Make a call to get cluster info
if (null == clusterInfoList) {
if (searchAllClustersForStorageVolumes) {
clusterInfoList = client.getClusterInfoDetails();
} else {
clusterInfoList = new ArrayList<VPlexClusterInfo>();
}
}
// Make the call to create a virtual volume. It is distributed if there are two (or more?)
// physical volumes.
boolean isDistributed = (vinfos.size() >= 2);
thinEnabled = thinEnabled && verifyVplexSupportsThinProvisioning(vplex);
VPlexVirtualVolumeInfo vvInfo = client.createVirtualVolume(vinfos, isDistributed, discoveryRequired, false, clusterId, clusterInfoList, false, thinEnabled, searchAllClustersForStorageVolumes);
// Note: according to client.createVirtualVolume, this will never be the case.
if (vvInfo == null) {
VPlexApiException ex = VPlexApiException.exceptions.cantFindRequestedVolume(vplexVolume.getLabel());
throw ex;
}
vplexVolumeNameMap.put(vvInfo.getName(), vplexVolume);
virtualVolumeInfos.add(vvInfo);
}
InvokeTestFailure.internalOnlyInvokeTestFailure(InvokeTestFailure.ARTIFICIAL_FAILURE_046);
Map<String, VPlexVirtualVolumeInfo> foundVirtualVolumes = client.findVirtualVolumes(clusterInfoList, virtualVolumeInfos);
if (!foundVirtualVolumes.isEmpty()) {
for (Entry<String, Volume> entry : vplexVolumeNameMap.entrySet()) {
Volume vplexVolume = entry.getValue();
VPlexVirtualVolumeInfo vvInfo = foundVirtualVolumes.get(entry.getKey());
try {
// Now we try and rename the volume to the customized name. Note that if custom naming
// is disabled the custom name will not be generated and will be null.
// Create the VPLEX volume name custom configuration datasource and generate the
// custom volume name based on whether the volume is a local or distributed volume.
String hostOrClusterName = null;
URI computeResourceURI = computeResourceMap.get(vplexVolume.getId());
if (computeResourceURI != null) {
DataObject hostOrCluster = null;
if (URIUtil.isType(computeResourceURI, Cluster.class)) {
hostOrCluster = getDataObject(Cluster.class, computeResourceURI, _dbClient);
} else if (URIUtil.isType(computeResourceURI, Host.class)) {
hostOrCluster = getDataObject(Host.class, computeResourceURI, _dbClient);
}
if ((hostOrCluster != null) && ((vplexVolume.getPersonality() == null) || (vplexVolume.checkPersonality(Volume.PersonalityTypes.SOURCE)))) {
hostOrClusterName = hostOrCluster.getLabel();
}
}
if (CustomVolumeNamingUtils.isCustomVolumeNamingEnabled(customConfigHandler, vplex.getSystemType())) {
String customConfigName = CustomVolumeNamingUtils.getCustomConfigName(hostOrClusterName != null);
Project project = getDataObject(Project.class, vplexVolume.getProject().getURI(), _dbClient);
TenantOrg tenant = getDataObject(TenantOrg.class, vplexVolume.getTenant().getURI(), _dbClient);
DataSource customNameDataSource = CustomVolumeNamingUtils.getCustomConfigDataSource(project, tenant, vplexVolume.getLabel(), vvInfo.getWwn(), hostOrClusterName, dataSourceFactory, customConfigName, _dbClient);
if (customNameDataSource != null) {
String customVolumeName = CustomVolumeNamingUtils.getCustomName(customConfigHandler, customConfigName, customNameDataSource, vplex.getSystemType());
vvInfo = CustomVolumeNamingUtils.renameVolumeOnVPlex(vvInfo, customVolumeName, client);
// Update the label to match the custom name.
vplexVolume.setLabel(vvInfo.getName());
// Also, we update the name portion of the project and tenant URIs
// to reflect the custom name. This is necessary because the API
// to search for volumes by project, extracts the name portion of the
// project URI to get the volume name.
NamedURI namedURI = vplexVolume.getProject();
namedURI.setName(vvInfo.getName());
vplexVolume.setProject(namedURI);
namedURI = vplexVolume.getTenant();
namedURI.setName(vvInfo.getName());
vplexVolume.setTenant(namedURI);
}
}
} catch (Exception e) {
_log.warn(String.format("Error renaming newly created VPLEX volume %s:%s", vplexVolume.getId(), vplexVolume.getLabel()), e);
}
buf.append(vvInfo.getName() + " ");
_log.info(String.format("Created virtual volume: %s path: %s size: %s", vvInfo.getName(), vvInfo.getPath(), vvInfo.getCapacityBytes()));
vplexVolume.setNativeId(vvInfo.getPath());
vplexVolume.setNativeGuid(vvInfo.getPath());
vplexVolume.setDeviceLabel(vvInfo.getName());
vplexVolume.setThinlyProvisioned(vvInfo.isThinEnabled());
checkThinEnabledResult(vvInfo, thinEnabled, _workflowService.getWorkflowFromStepId(stepId).getOrchTaskId());
vplexVolume.setWWN(vvInfo.getWwn());
// For Vplex virtual volumes set allocated capacity to 0 (cop-18608)
vplexVolume.setAllocatedCapacity(0L);
vplexVolume.setProvisionedCapacity(vvInfo.getCapacityBytes());
_dbClient.updateObject(vplexVolume);
// Record VPLEX volume created event.
createdVplexVolumeURIs.add(vplexVolume.getId());
recordBourneVolumeEvent(vplexVolume.getId(), OperationTypeEnum.CREATE_BLOCK_VOLUME.getEvType(true), Operation.Status.ready, OperationTypeEnum.CREATE_BLOCK_VOLUME.getDescription());
}
}
if (foundVirtualVolumes.size() != vplexVolumeNameMap.size()) {
VPlexApiException ex = VPlexApiException.exceptions.cantFindAllRequestedVolume();
throw ex;
}
long elapsed = System.currentTimeMillis() - startTime;
_log.info(String.format("TIMER: %s virtual volume(s) %s create took %f seconds", volumeMap.size(), volumeLabels.toString(), (double) elapsed / (double) 1000));
WorkflowStepCompleter.stepSucceded(stepId);
} catch (VPlexApiException vae) {
_log.error("Exception creating Vplex Virtual Volume: " + vae.getMessage(), vae);
// not created.
for (URI vplexVolumeURI : vplexVolumeURIs) {
if (!createdVplexVolumeURIs.contains(vplexVolumeURI)) {
recordBourneVolumeEvent(vplexVolumeURI, OperationTypeEnum.CREATE_BLOCK_VOLUME.getEvType(false), Operation.Status.error, OperationTypeEnum.CREATE_BLOCK_VOLUME.getDescription());
}
}
WorkflowStepCompleter.stepFailed(stepId, vae);
} catch (Exception ex) {
_log.error("Exception creating Vplex Virtual Volume: " + ex.getMessage(), ex);
// not created.
for (URI vplexVolumeURI : vplexVolumeURIs) {
if (!createdVplexVolumeURIs.contains(vplexVolumeURI)) {
recordBourneVolumeEvent(vplexVolumeURI, OperationTypeEnum.CREATE_BLOCK_VOLUME.getEvType(false), Operation.Status.error, OperationTypeEnum.CREATE_BLOCK_VOLUME.getDescription());
}
}
String opName = ResourceOperationTypeEnum.CREATE_VIRTUAL_VOLUME.getName();
ServiceError serviceError = VPlexApiException.errors.createVirtualVolumesFailed(opName, ex);
WorkflowStepCompleter.stepFailed(stepId, serviceError);
}
}
use of com.emc.storageos.db.client.model.Cluster in project coprhd-controller by CoprHD.
the class UpdateVcenterClusterService method precheck.
@Override
public void precheck() throws Exception {
StringBuilder preCheckErrors = new StringBuilder();
Cluster cluster = BlockStorageUtils.getCluster(clusterId);
if (cluster == null) {
preCheckErrors.append(ExecutionUtils.getMessage("compute.vcenter.cluster.does.not.exist.update", clusterId) + " ");
} else {
BlockStorageUtils.checkEvents(cluster);
acquireClusterLock(cluster);
}
if (preCheckErrors.length() > 0) {
throw new IllegalStateException(preCheckErrors.toString() + ComputeUtils.getContextErrors(getModelClient()));
}
}
use of com.emc.storageos.db.client.model.Cluster in project coprhd-controller by CoprHD.
the class UpdateVcenterClusterService method execute.
@Override
public void execute() throws Exception {
Cluster cluster = BlockStorageUtils.getCluster(clusterId);
VcenterDataCenter datacenter = ComputeUtils.getVcenterDataCenter(datacenterId);
// If the cluster already has a datacenter associated with it,
// it needs to be updated, else create.
URI existingDatacenterId = cluster.getVcenterDataCenter();
boolean status = false;
if (existingDatacenterId == null) {
logInfo("vcenter.cluster.create", cluster.getLabel());
if (datacenter == null) {
status = ComputeUtils.createVcenterCluster(cluster, datacenterId);
} else {
status = ComputeUtils.createVcenterCluster(cluster, datacenter);
}
if (!status) {
throw new IllegalStateException(ExecutionUtils.getMessage("vcenter.cluster.create.failed", cluster.getLabel() + " "));
}
} else {
logInfo("vcenter.cluster.update", cluster.getLabel());
if (datacenter == null) {
status = ComputeUtils.updateVcenterCluster(cluster, datacenterId);
} else {
status = ComputeUtils.updateVcenterCluster(cluster, datacenter);
}
if (!status) {
throw new IllegalStateException(ExecutionUtils.getMessage("vcenter.cluster.update.failed", cluster.getLabel() + " "));
}
}
}
Aggregations