use of com.emc.storageos.db.client.model.Initiator in project coprhd-controller by CoprHD.
the class ExportUtilsTestUtils method createExportGroup.
public static ExportGroup createExportGroup(DbClientImpl _dbClient, List<Initiator> initiators, List<Volume> volumes, VirtualArray varray, int i) {
ExportGroup eg = new ExportGroup();
String label = "eg" + i;
eg.setId(URI.create(label));
eg.setLabel(label);
eg.setVirtualArray(varray.getId());
for (Initiator initiator : initiators) {
eg.addInitiator(initiator);
}
for (Volume volume : volumes) {
eg.addVolume(volume.getId(), i);
}
_dbClient.createObject(eg);
return eg;
}
use of com.emc.storageos.db.client.model.Initiator in project coprhd-controller by CoprHD.
the class ExportUtilsTests method populateDb.
/**
* Populate the database with ITL components
*/
public void populateDb() {
String[] vmaxFE = { "50:FE:FE:FE:FE:FE:FE:00", "50:FE:FE:FE:FE:FE:FE:01", "50:FE:FE:FE:FE:FE:FE:02", "50:FE:FE:FE:FE:FE:FE:03" };
// Create a Network
Network network = ExportUtilsTestUtils.createNetwork(_dbClient, vmaxFE, "VSANFE", "FC+BROCADE+FE", null);
// Create a Virtual Array
VirtualArray varray = ExportUtilsTestUtils.createVirtualArray(_dbClient, "varray1");
// Create a storage system
StorageSystem storageSystem = ExportUtilsTestUtils.createStorageSystem(_dbClient, "vmax", "vmax1");
// Create two front-end storage ports VMAX
List<StoragePort> vmaxPorts = new ArrayList<StoragePort>();
for (int i = 0; i < vmaxFE.length; i++) {
vmaxPorts.add(ExportUtilsTestUtils.createStoragePort(_dbClient, storageSystem, network, vmaxFE[i], varray, StoragePort.PortType.frontend.name(), "portGroupvmax" + i, "C0+FC0" + i));
}
// Create initiators
List<Initiator> initiators = new ArrayList<Initiator>();
for (int i = 0; i < NUM_INITIATORS; i++) {
initiators.add(ExportUtilsTestUtils.createInitiator(_dbClient, network, i));
}
// Create Volumes
List<Volume> volumes = new ArrayList<Volume>();
for (int i = 0; i < NUM_VOLUMES; i++) {
Volume volume = ExportUtilsTestUtils.createVolume(_dbClient, varray, i);
volumes.add(volume);
_volumeIds.add(volume.getId());
}
// Create export groups
List<ExportGroup> egs = new ArrayList<ExportGroup>();
for (int i = 0; i < NUM_EXPORT_GROUPS; i++) {
egs.add(ExportUtilsTestUtils.createExportGroup(_dbClient, initiators, volumes, varray, i));
}
// Create export masks
List<ExportMask> ems = new ArrayList<ExportMask>();
for (int i = 0; i < NUM_EXPORT_MASKS; i++) {
ems.add(ExportUtilsTestUtils.createExportMask(_dbClient, egs, initiators, volumes, vmaxPorts, i));
}
}
use of com.emc.storageos.db.client.model.Initiator in project coprhd-controller by CoprHD.
the class ComputeSystemControllerImpl method addStepsForRemoveHostFromExport.
/**
* Assembles steps to remove a host from an export group
*
* @param workflow The current workflow
* @param waitFor The current waitFor
* @param hostIds List of hosts to remove
* @param exportId The ID of the export group to remove the host from
* @return Next step
*/
public String addStepsForRemoveHostFromExport(Workflow workflow, String waitFor, List<URI> hostIds, URI exportId) {
ExportGroup export = _dbClient.queryObject(ExportGroup.class, exportId);
String newWaitFor = waitFor;
Set<URI> addedClusters = new HashSet<>();
Set<URI> removedClusters = new HashSet<>();
Set<URI> addedHosts = new HashSet<>();
Set<URI> removedHosts = new HashSet<>(hostIds);
Set<URI> addedInitiators = new HashSet<>();
Set<URI> removedInitiators = new HashSet<>();
if (export != null) {
List<URI> updatedHosts = StringSetUtil.stringSetToUriList(export.getHosts());
Map<URI, Integer> updatedVolumesMap = StringMapUtil.stringMapToVolumeMap(export.getVolumes());
for (URI hostId : hostIds) {
updatedHosts.remove(hostId);
List<Initiator> hostInitiators = ComputeSystemHelper.queryInitiators(_dbClient, hostId);
for (Initiator initiator : hostInitiators) {
removedInitiators.add(initiator.getId());
}
}
// host. I think, this we can raise an enhancemnet and fix this later.
if (updatedHosts.isEmpty()) {
newWaitFor = workflow.createStep(DELETE_EXPORT_GROUP_STEP, String.format("Deleting export group %s", export.getId()), newWaitFor, export.getId(), export.getId().toString(), this.getClass(), deleteExportGroupMethod(export.getId()), rollbackMethodNullMethod(), null);
} else {
newWaitFor = workflow.createStep(UPDATE_EXPORT_GROUP_STEP, String.format("Updating export group %s", export.getId()), newWaitFor, export.getId(), export.getId().toString(), this.getClass(), updateExportGroupMethod(export.getId(), CollectionUtils.isEmpty(export.getInitiators()) ? new HashMap<URI, Integer>() : updatedVolumesMap, addedClusters, removedClusters, addedHosts, removedHosts, addedInitiators, removedInitiators), updateExportGroupRollbackMethod(export.getId()), null);
}
}
return newWaitFor;
}
use of com.emc.storageos.db.client.model.Initiator in project coprhd-controller by CoprHD.
the class ComputeSystemControllerImpl method addStepsForSynchronizeClusterExport.
/**
* Synchronize a cluster's export groups by following steps:
* - Add all hosts in the cluster that are not in the cluster's export groups
* - Remove all hosts in cluster's export groups that don't belong to the cluster
*
* @param workflow
* the workflow
* @param waitFor
* waitfor step
* @param clusterHostIds
* hosts that belong to the cluster
* @param clusterId
* cluster id
* @return
*/
public String addStepsForSynchronizeClusterExport(Workflow workflow, String waitFor, List<URI> clusterHostIds, URI clusterId) {
for (ExportGroup export : getSharedExports(_dbClient, clusterId)) {
List<URI> existingInitiators = StringSetUtil.stringSetToUriList(export.getInitiators());
List<URI> existingHosts = StringSetUtil.stringSetToUriList(export.getHosts());
List<URI> updatedClusters = StringSetUtil.stringSetToUriList(export.getClusters());
Map<URI, Integer> updatedVolumesMap = StringMapUtil.stringMapToVolumeMap(export.getVolumes());
Set<URI> addedClusters = new HashSet<>();
Set<URI> removedClusters = new HashSet<>();
Set<URI> addedHosts = new HashSet<>();
Set<URI> removedHosts = new HashSet<>();
Set<URI> addedInitiators = new HashSet<>();
Set<URI> removedInitiators = new HashSet<>();
// 1. Add all hosts in clusters that are not in the cluster's export groups
for (URI clusterHost : clusterHostIds) {
if (!existingHosts.contains(clusterHost)) {
_log.info("Adding host " + clusterHost + " to cluster export group " + export.getId());
addedHosts.add(clusterHost);
List<Initiator> hostInitiators = ComputeSystemHelper.queryInitiators(_dbClient, clusterHost);
for (Initiator initiator : hostInitiators) {
addedInitiators.add(initiator.getId());
}
}
}
// 2. Remove all hosts in cluster's export groups that don't belong to the cluster
Iterator<URI> existingHostsIterator = existingHosts.iterator();
while (existingHostsIterator.hasNext()) {
URI hostId = existingHostsIterator.next();
if (!clusterHostIds.contains(hostId)) {
removedHosts.add(hostId);
_log.info("Removing host " + hostId + " from shared export group " + export.getId() + " because this host does not belong to the cluster");
List<Initiator> hostInitiators = ComputeSystemHelper.queryInitiators(_dbClient, hostId);
for (Initiator initiator : hostInitiators) {
removedInitiators.add(initiator.getId());
}
}
}
waitFor = workflow.createStep(UPDATE_EXPORT_GROUP_STEP, String.format("Updating export group %s", export.getId()), waitFor, export.getId(), export.getId().toString(), this.getClass(), updateExportGroupMethod(export.getId(), updatedVolumesMap, addedClusters, removedClusters, addedHosts, removedHosts, addedInitiators, removedInitiators), updateExportGroupRollbackMethod(export.getId()), null);
}
return waitFor;
}
use of com.emc.storageos.db.client.model.Initiator in project coprhd-controller by CoprHD.
the class ComputeSystemControllerImpl method addStepsForClusterExportGroups.
public String addStepsForClusterExportGroups(Workflow workflow, String waitFor, URI clusterId) {
List<ExportGroup> exportGroups = CustomQueryUtility.queryActiveResourcesByConstraint(_dbClient, ExportGroup.class, AlternateIdConstraint.Factory.getConstraint(ExportGroup.class, "clusters", clusterId.toString()));
for (ExportGroup export : exportGroups) {
Set<URI> addedClusters = new HashSet<>();
Set<URI> removedClusters = new HashSet<>();
Set<URI> addedHosts = new HashSet<>();
Set<URI> removedHosts = new HashSet<>();
Set<URI> addedInitiators = new HashSet<>();
Set<URI> removedInitiators = new HashSet<>();
Map<URI, Integer> updatedVolumesMap = StringMapUtil.stringMapToVolumeMap(export.getVolumes());
removedClusters.add(clusterId);
List<URI> hostUris = ComputeSystemHelper.getChildrenUris(_dbClient, clusterId, Host.class, "cluster");
for (URI hosturi : hostUris) {
removedHosts.add(hosturi);
removedInitiators.addAll(ComputeSystemHelper.getChildrenUris(_dbClient, hosturi, Initiator.class, "host"));
}
// VBDU [DONE]: COP-28452, This doesn't look that dangerous, as we might see more than one cluster in export
// group. Delete export Group in controller means export all volumes in the export group.
// This call's intention is to remove a host, if for some reason one of the export group doesn't have the
// right set of initiator then we might end up in unexporting all volumes from all the hosts rather than
// executing remove Host.
// Fixed to only perform export update instead of delete
waitFor = workflow.createStep(UPDATE_EXPORT_GROUP_STEP, String.format("Updating export group %s", export.getId()), waitFor, export.getId(), export.getId().toString(), this.getClass(), updateExportGroupMethod(export.getId(), updatedVolumesMap, addedClusters, removedClusters, addedHosts, removedHosts, addedInitiators, removedInitiators), updateExportGroupRollbackMethod(export.getId()), null);
}
return waitFor;
}
Aggregations