use of com.emc.storageos.model.block.BlockObjectRestRep in project coprhd-controller by CoprHD.
the class ExportBlockVolumeHelper method exportBlockResources.
/**
* export the block resources identified by URIs in the given resource id list
*
* @param resourceIds the list of URIs which identify the block resources that need to be exported
* @param parentId the parent URI for the list of resourceIds
* @return The list of export groups which have been created/updated
*/
public List<ExportGroupRestRep> exportBlockResources(List<URI> resourceIds, URI parentId) {
// the list of exports to return
List<ExportGroupRestRep> exports = Lists.newArrayList();
List<URI> newVolumes = new ArrayList<URI>();
Map<URI, Set<URI>> addVolumeExports = Maps.newHashMap();
Map<URI, URI> addComputeResourceToExports = Maps.newHashMap();
// we will need to keep track of the current HLU number
Integer currentHlu = hlu;
// get a list of all block resources using the id list provided
List<BlockObjectRestRep> blockResources = BlockStorageUtils.getBlockResources(resourceIds, parentId);
URI virtualArrayId = null;
String exportName = cluster != null ? cluster.getLabel() : host.getHostName();
// Flag to indicate an empty ExportGroup object in ViPR, i.e., ExportGroup without any volumes and initiators in it.
boolean isEmptyExport = true;
ExportGroupRestRep export = null;
for (BlockObjectRestRep blockResource : blockResources) {
virtualArrayId = getVirtualArrayId(blockResource);
// see if we can find an export that uses this block resource
export = findExistingExportGroup(blockResource, virtualArrayId);
boolean createExport = export == null;
isEmptyExport = export != null && BlockStorageUtils.isEmptyExport(export);
// host/cluster name
if (export == null) {
export = BlockStorageUtils.findExportsByName(exportName, projectId, virtualArrayId);
isEmptyExport = export != null && BlockStorageUtils.isEmptyExport(export);
createExport = export == null || !isEmptyExport;
}
if (createExport) {
newVolumes.add(blockResource.getId());
} else // Export exists, check if volume belongs to it
{
if (BlockStorageUtils.isVolumeInExportGroup(export, blockResource.getId())) {
logInfo("export.block.volume.contains.volume", export.getId(), blockResource.getId());
} else {
updateExportVolumes(export, blockResource, addVolumeExports);
}
// If not, add them.
if (isEmptyExport) {
URI computeResource = cluster != null ? cluster.getId() : host.getId();
addComputeResourceToExports.put(export.getId(), computeResource);
}
exports.add(export);
}
}
// If there is an existing non-empty export with the same name, append a time stamp to the name to make it unique
if (export != null && !isEmptyExport) {
exportName = exportName + BlockStorageUtils.UNDERSCORE + new SimpleDateFormat("yyyyMMddhhmmssSSS").format(new Date());
}
// Bulk update multiple volumes to single export
List<URI> volumeIds = Lists.newArrayList();
for (Map.Entry<URI, Set<URI>> entry : addVolumeExports.entrySet()) {
volumeIds.addAll(entry.getValue());
}
Map<URI, Integer> volumeHlus = getVolumeHLUs(volumeIds);
for (Map.Entry<URI, Set<URI>> entry : addVolumeExports.entrySet()) {
BlockStorageUtils.addVolumesToExport(entry.getValue(), currentHlu, entry.getKey(), volumeHlus, minPaths, maxPaths, pathsPerInitiator, portGroup);
logInfo("export.block.volume.add.existing", entry.getValue(), entry.getKey());
if ((currentHlu != null) && (currentHlu > -1)) {
currentHlu += entry.getValue().size();
}
}
for (Map.Entry<URI, URI> entry : addComputeResourceToExports.entrySet()) {
if (cluster != null) {
BlockStorageUtils.addClusterToExport(entry.getKey(), cluster.getId(), minPaths, maxPaths, pathsPerInitiator, portGroup);
logInfo("export.cluster.add.existing", entry.getValue(), entry.getKey());
} else {
BlockStorageUtils.addHostToExport(entry.getKey(), host.getId(), minPaths, maxPaths, pathsPerInitiator, portGroup);
logInfo("export.host.add.existing", entry.getValue(), entry.getKey());
}
}
// Create new export with multiple volumes that don't belong to an export
if (!newVolumes.isEmpty()) {
volumeHlus = getVolumeHLUs(newVolumes);
URI exportId = null;
if (cluster != null) {
exportId = BlockStorageUtils.createClusterExport(projectId, virtualArrayId, newVolumes, currentHlu, cluster, volumeHlus, minPaths, maxPaths, pathsPerInitiator, portGroup);
} else {
exportId = BlockStorageUtils.createHostExport(projectId, virtualArrayId, newVolumes, currentHlu, host, volumeHlus, minPaths, maxPaths, pathsPerInitiator, portGroup);
}
ExportGroupRestRep exportGroup = BlockStorageUtils.getExport(exportId);
// add this export to the list of exports we will return to the caller
exports.add(exportGroup);
}
// add host or cluster to the affected resources
if (host != null) {
ExecutionUtils.addAffectedResource(host.getId().toString());
} else if (cluster != null) {
ExecutionUtils.addAffectedResource(cluster.getId().toString());
}
// Clear the rollback at this point so later errors won't undo the exports
ExecutionUtils.clearRollback();
return exports;
}
use of com.emc.storageos.model.block.BlockObjectRestRep in project coprhd-controller by CoprHD.
the class FailoverBlockVolumeService method precheck.
@Override
public void precheck() {
String sourceId = "";
String targetId = "";
String targetName = "";
String sourceName = "";
if (ConsistencyUtils.isVolumeStorageType(storageType)) {
// The type selected is volume
BlockObjectRestRep targetVolume = BlockStorageUtils.getVolume(protectionTarget);
BlockObjectRestRep sourceVolume = BlockStorageUtils.getVolume(protectionSource);
type = BlockStorageUtils.getFailoverType(targetVolume);
targetId = stringId(targetVolume);
targetName = targetVolume.getName();
sourceId = stringId(sourceVolume);
sourceName = sourceVolume.getName();
} else {
// The type selected is consistency group
BlockConsistencyGroupRestRep cg = BlockStorageUtils.getBlockConsistencyGroup(protectionSource);
VirtualArrayRestRep virtualArray = BlockStorageUtils.getVirtualArray(protectionTarget);
type = ConsistencyUtils.getFailoverType(cg);
targetId = stringId(virtualArray);
targetName = virtualArray.getName();
sourceId = stringId(cg);
sourceName = cg.getName();
}
if (type == null) {
ExecutionUtils.fail("failTask.FailoverBlockVolumeService", args(sourceId, targetId), args());
}
if (type.equals(RECOVER_POINT) && BlockProvider.PIT_IMAGE_OPTION_KEY.equals(imageToAccess) && pointInTime == null) {
ExecutionUtils.fail("failTask.FailoverBlockVolumeService.pit", new Object[] {}, new Object[] {});
}
// TODO: Add new fields
logInfo("fail.over.block.volume.service", type.toUpperCase(), sourceName, targetName);
}
use of com.emc.storageos.model.block.BlockObjectRestRep in project coprhd-controller by CoprHD.
the class CreateFullCopyService method checkAndPurgeObsoleteCopies.
/**
* Check retention policy and delete obsolete full copies if necessary
*
* @param volumeOrCgId - volume id or CG id
*/
private void checkAndPurgeObsoleteCopies(String volumeOrCgId) {
if (!isRetentionRequired()) {
return;
}
List<RetainedReplica> replicas = findObsoleteReplica(volumeOrCgId);
for (RetainedReplica replica : replicas) {
for (String obsoleteCopyId : replica.getAssociatedReplicaIds()) {
BlockObjectRestRep obsoleteCopy = BlockStorageUtils.getVolume(uri(obsoleteCopyId));
info("Delete full copy %s (%s) since it exceeds max number of copies allowed", obsoleteCopyId, obsoleteCopy.getName());
if (ConsistencyUtils.isVolumeStorageType(storageType)) {
BlockStorageUtils.removeFullCopy(uri(obsoleteCopyId), VolumeDeleteTypeEnum.FULL);
} else {
ConsistencyUtils.removeFullCopy(uri(volumeOrCgId), uri(obsoleteCopyId));
}
}
getModelClient().delete(replica);
}
}
use of com.emc.storageos.model.block.BlockObjectRestRep in project coprhd-controller by CoprHD.
the class CreateVolumeAndVmfsDatastoreService method execute.
@Override
public void execute() throws Exception {
if (datastoreNames.size() != createBlockVolumeHelpers.size()) {
throw new IllegalStateException(ExecutionUtils.getMessage("CreateVolumeAndVmfsDatastoreService.datastore.volume.mismatch"));
}
Map<String, BlockObjectRestRep> datastoreVolumeMap = Maps.newHashMap();
List<URI> volumes = BlockStorageUtils.createMultipleVolumes(createBlockVolumeHelpers);
if (volumes.isEmpty()) {
ExecutionUtils.fail("CreateVolumeAndVmfsDatastoreService.illegalState.noVolumesCreated", args(), args());
}
// use one of the helpers to export all volumes to the host or cluster
createBlockVolumeHelpers.get(0).exportVolumes(volumes);
int index = 0;
for (String datastoreName : datastoreNames) {
BlockObjectRestRep volume = BlockStorageUtils.getBlockResource(volumes.get(index));
datastoreVolumeMap.put(datastoreName, volume);
index++;
}
connectAndInitializeHost();
vmware.refreshStorage(host, cluster);
for (Entry<String, BlockObjectRestRep> entry : datastoreVolumeMap.entrySet()) {
Datastore datastore = vmware.createVmfsDatastore(host, cluster, hostId, vmfsVersion, entry.getValue(), entry.getKey());
vmware.setMultipathPolicy(host, cluster, multipathPolicy, entry.getValue());
vmware.setStorageIOControl(datastore, storageIOControl, true);
}
vmware.disconnect();
}
use of com.emc.storageos.model.block.BlockObjectRestRep in project coprhd-controller by CoprHD.
the class UnexportVMwareVolumeService method execute.
@Override
public void execute() throws Exception {
for (BlockObjectRestRep volume : volumes) {
String datastoreName = KnownMachineTags.getBlockVolumeVMFSDatastore(hostId, volume);
if (!StringUtils.isEmpty(datastoreName)) {
Datastore datastore = vmware.getDatastore(datacenter.getLabel(), datastoreName);
if (datastore != null) {
boolean storageIOControlEnabled = datastore.getIormConfiguration() != null ? datastore.getIormConfiguration().isEnabled() : false;
vmware.unmountVmfsDatastore(host, cluster, datastore);
datastore = vmware.getDatastore(datacenter.getLabel(), datastoreName);
if (storageIOControlEnabled && datastore != null && datastore.getSummary() != null && datastore.getSummary().isAccessible()) {
vmware.setStorageIOControl(datastore, true);
}
}
}
}
for (BlockObjectRestRep volume : volumes) {
vmware.detachLuns(host, cluster, volume);
}
vmware.disconnect();
ExecutionUtils.clearRollback();
for (BlockObjectRestRep volume : volumes) {
if (volume.getTags() != null) {
vmware.removeVmfsDatastoreTag(volume, hostId);
}
}
for (ExportGroupRestRep export : filteredExportGroups) {
URI exportId = ResourceUtils.id(export);
String exportName = ResourceUtils.name(export);
// Check each volume to see if it is in this export
Set<URI> exportedVolumeIds = Sets.newHashSet();
for (BlockObjectRestRep volume : volumes) {
URI volumeId = ResourceUtils.id(volume);
String volumeName = ResourceUtils.name(volume);
if (BlockStorageUtils.isVolumeInExportGroup(export, volumeId)) {
logInfo("unexport.host.service.volume.in.export", volumeName, exportName);
exportedVolumeIds.add(volumeId);
}
}
if (!exportedVolumeIds.isEmpty()) {
logInfo("unexport.host.service.volume.remove", exportedVolumeIds.size(), exportName);
BlockStorageUtils.removeBlockResourcesFromExport(exportedVolumeIds, exportId);
} else {
logDebug("unexport.host.service.volume.skip", exportName);
}
String hostOrClusterId = BlockStorageUtils.getHostOrClusterId(hostId);
if (hostOrClusterId != null) {
ExecutionUtils.addAffectedResource(hostOrClusterId.toString());
}
}
connectAndInitializeHost();
vmware.refreshStorage(host, cluster);
}
Aggregations