use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class LinstorPrimaryDataStoreLifeCycleImpl method initialize.
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
String tags = (String) dsInfos.get("tags");
@SuppressWarnings("unchecked") Map<String, String> details = (Map<String, String>) dsInfos.get("details");
final String resourceGroup = details.get(LinstorUtil.RSC_GROUP);
final String uuid = UUID.randomUUID().toString();
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
// the uuid and then sets the podId and clusterId parameters
if (clusterId != null) {
if (podId == null) {
throw new CloudRuntimeException("The Pod ID must be specified.");
}
if (zoneId == null) {
throw new CloudRuntimeException("The Zone ID must be specified.");
}
ClusterVO cluster = clusterDao.findById(clusterId);
s_logger.info("Linstor: Setting Linstor cluster-wide primary storage uuid to " + uuid);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
HypervisorType hypervisorType = cluster.getHypervisorType();
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
}
if (!url.contains("://")) {
url = "http://" + url;
}
URL controllerURL;
int port = 3370;
try {
controllerURL = new URL(url);
if (!controllerURL.getProtocol().startsWith("http")) {
throw new IllegalArgumentException("Linstor controller URL wrong protocol: " + url);
}
if (!controllerURL.getPath().isEmpty()) {
throw new IllegalArgumentException("Linstor controller URL shouldn't have a path: " + url);
}
if (controllerURL.getPort() == -1) {
port = controllerURL.getProtocol().equals("https") ? 3371 : 3370;
url += ":" + port;
}
} catch (MalformedURLException e) {
throw new IllegalArgumentException("Linstor controller URL is not valid: " + e);
}
long capacityBytes = LinstorUtil.getCapacityBytes(url, resourceGroup);
if (capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
parameters.setHost(url);
parameters.setPort(port);
parameters.setPath(resourceGroup);
parameters.setType(Storage.StoragePoolType.Linstor);
parameters.setUuid(uuid);
parameters.setZoneId(zoneId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(false);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(0L);
parameters.setHypervisorType(HypervisorType.KVM);
parameters.setTags(tags);
parameters.setDetails(details);
parameters.setUserInfo(resourceGroup);
return dataStoreHelper.createPrimaryDataStore(parameters);
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class SolidFireSharedPrimaryDataStoreLifeCycle method initialize.
// invoked to add primary storage that is based on the SolidFire plug-in
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
final String CAPACITY_IOPS = "capacityIops";
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get(CAPACITY_IOPS);
String tags = (String) dsInfos.get("tags");
@SuppressWarnings("unchecked") Map<String, String> details = (Map<String, String>) dsInfos.get("details");
if (podId == null) {
throw new CloudRuntimeException("The Pod ID must be specified.");
}
if (clusterId == null) {
throw new CloudRuntimeException("The Cluster ID must be specified.");
}
String storageVip = SolidFireUtil.getStorageVip(url);
int storagePort = SolidFireUtil.getStoragePort(url);
if (capacityBytes == null || capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
if (capacityIops == null || capacityIops <= 0) {
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
}
HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
String datacenter = SolidFireUtil.getValue(SolidFireUtil.DATACENTER, url, false);
if (HypervisorType.VMware.equals(hypervisorType) && datacenter == null) {
throw new CloudRuntimeException("'Datacenter' must be set for hypervisor type of " + HypervisorType.VMware);
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setType(getStorageType(hypervisorType));
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(false);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(hypervisorType);
parameters.setTags(tags);
parameters.setDetails(details);
String managementVip = SolidFireUtil.getManagementVip(url);
int managementPort = SolidFireUtil.getManagementPort(url);
details.put(SolidFireUtil.MANAGEMENT_VIP, managementVip);
details.put(SolidFireUtil.MANAGEMENT_PORT, String.valueOf(managementPort));
String clusterAdminUsername = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_ADMIN_USERNAME, url);
String clusterAdminPassword = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, url);
details.put(SolidFireUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername);
details.put(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword);
if (capacityBytes < SolidFireUtil.MIN_VOLUME_SIZE) {
capacityBytes = SolidFireUtil.MIN_VOLUME_SIZE;
}
long lMinIops = 100;
long lMaxIops = 15000;
long lBurstIops = 15000;
try {
String minIops = SolidFireUtil.getValue(SolidFireUtil.MIN_IOPS, url);
if (minIops != null && minIops.trim().length() > 0) {
lMinIops = Long.parseLong(minIops);
}
} catch (Exception ex) {
LOGGER.info("[ignored] error getting Min IOPS: " + ex.getLocalizedMessage());
}
try {
String maxIops = SolidFireUtil.getValue(SolidFireUtil.MAX_IOPS, url);
if (maxIops != null && maxIops.trim().length() > 0) {
lMaxIops = Long.parseLong(maxIops);
}
} catch (Exception ex) {
LOGGER.info("[ignored] error getting Max IOPS: " + ex.getLocalizedMessage());
}
try {
String burstIops = SolidFireUtil.getValue(SolidFireUtil.BURST_IOPS, url);
if (burstIops != null && burstIops.trim().length() > 0) {
lBurstIops = Long.parseLong(burstIops);
}
} catch (Exception ex) {
LOGGER.info("[ignored] error getting Burst IOPS: " + ex.getLocalizedMessage());
}
if (lMinIops > lMaxIops) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.MIN_IOPS + "' must be less than or equal to the parameter '" + SolidFireUtil.MAX_IOPS + "'.");
}
if (lMaxIops > lBurstIops) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.MAX_IOPS + "' must be less than or equal to the parameter '" + SolidFireUtil.BURST_IOPS + "'.");
}
if (lMinIops != capacityIops) {
throw new CloudRuntimeException("The parameter '" + CAPACITY_IOPS + "' must be equal to the parameter '" + SolidFireUtil.MIN_IOPS + "'.");
}
if (lMinIops > SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("This volume's Min IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) + " IOPS.");
}
if (lMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("This volume's Max IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS.");
}
if (lBurstIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("This volume's Burst IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS.");
}
details.put(SolidFireUtil.MIN_IOPS, String.valueOf(lMinIops));
details.put(SolidFireUtil.MAX_IOPS, String.valueOf(lMaxIops));
details.put(SolidFireUtil.BURST_IOPS, String.valueOf(lBurstIops));
SolidFireUtil.SolidFireConnection sfConnection = new SolidFireUtil.SolidFireConnection(managementVip, managementPort, clusterAdminUsername, clusterAdminPassword);
SolidFireCreateVolume sfCreateVolume = createSolidFireVolume(sfConnection, storagePoolName, capacityBytes, lMinIops, lMaxIops, lBurstIops);
SolidFireUtil.SolidFireVolume sfVolume = sfCreateVolume.getVolume();
String iqn = sfVolume.getIqn();
details.put(SolidFireUtil.VOLUME_ID, String.valueOf(sfVolume.getId()));
parameters.setUuid(iqn);
if (HypervisorType.VMware.equals(hypervisorType)) {
String datastore = iqn.replace("/", "_");
String path = "/" + datacenter + "/" + datastore;
parameters.setHost("VMFS datastore: " + path);
parameters.setPort(0);
parameters.setPath(path);
details.put(SolidFireUtil.DATASTORE_NAME, datastore);
details.put(SolidFireUtil.IQN, iqn);
details.put(SolidFireUtil.STORAGE_VIP, storageVip);
details.put(SolidFireUtil.STORAGE_PORT, String.valueOf(storagePort));
} else {
parameters.setHost(storageVip);
parameters.setPort(storagePort);
parameters.setPath(iqn);
}
ClusterVO cluster = clusterDao.findById(clusterId);
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
if (!lock.lock(SolidFireUtil.LOCK_TIME_IN_SECONDS)) {
String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid();
LOGGER.debug(errMsg);
throw new CloudRuntimeException(errMsg);
}
DataStore dataStore = null;
try {
// this adds a row in the cloud.storage_pool table for this SolidFire volume
dataStore = primaryDataStoreHelper.createPrimaryDataStore(parameters);
// now that we have a DataStore (we need the id from the DataStore instance), we can create a Volume Access Group, if need be, and
// place the newly created volume in the Volume Access Group
List<HostVO> hosts = hostDao.findByClusterId(clusterId);
String clusterUuId = clusterDao.findById(clusterId).getUuid();
SolidFireUtil.placeVolumeInVolumeAccessGroups(sfConnection, sfVolume.getId(), hosts, clusterUuId);
SolidFireUtil.SolidFireAccount sfAccount = sfCreateVolume.getAccount();
Account csAccount = CallContext.current().getCallingAccount();
SolidFireUtil.updateCsDbWithSolidFireAccountInfo(csAccount.getId(), sfAccount, dataStore.getId(), accountDetailsDao);
} catch (Exception ex) {
if (dataStore != null) {
primaryDataStoreDao.expunge(dataStore.getId());
}
throw new CloudRuntimeException(ex.getMessage());
} finally {
lock.unlock();
lock.releaseRef();
}
return dataStore;
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreLifeCycle method attachCluster.
@Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
final ClusterVO cluster = clusterDao.findById(scope.getScopeId());
if (!isSupportedHypervisorType(cluster.getHypervisorType())) {
throw new CloudRuntimeException("Unsupported hypervisor type: " + cluster.getHypervisorType().toString());
}
List<String> connectedSdcIps = null;
try {
ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStore.getId(), storagePoolDetailsDao);
connectedSdcIps = client.listConnectedSdcIps();
} catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) {
LOGGER.error("Failed to create storage pool", e);
throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to create storage pool");
}
if (connectedSdcIps == null || connectedSdcIps.isEmpty()) {
LOGGER.debug("No connected SDCs found for the PowerFlex storage pool");
throw new CloudRuntimeException("Failed to create storage pool as connected SDCs not found");
}
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore;
List<HostVO> hostsInCluster = resourceManager.listAllUpAndEnabledHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId());
if (hostsInCluster.isEmpty()) {
primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + primaryDataStoreInfo.getClusterId());
}
LOGGER.debug("Attaching the pool to each of the hosts in the cluster: " + primaryDataStoreInfo.getClusterId());
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO host : hostsInCluster) {
try {
if (connectedSdcIps.contains(host.getPrivateIpAddress())) {
storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId());
poolHosts.add(host);
}
} catch (Exception e) {
LOGGER.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e);
}
}
if (poolHosts.isEmpty()) {
LOGGER.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'.");
primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException("Failed to create storage pool in the cluster: " + primaryDataStoreInfo.getClusterId() + " as it is not accessible to hosts");
}
dataStoreHelper.attachCluster(dataStore);
return true;
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class SolidFireUtil method hostAddedToCluster.
public static void hostAddedToCluster(long hostId, long clusterId, String storageProvider, ClusterDao clusterDao, HostDao hostDao, PrimaryDataStoreDao storagePoolDao, StoragePoolDetailsDao storagePoolDetailsDao) {
HostVO hostVO = hostDao.findById(hostId);
Preconditions.checkArgument(hostVO != null, "Could not locate host for ID: " + hostId);
ClusterVO cluster = clusterDao.findById(clusterId);
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
if (!lock.lock(LOCK_TIME_IN_SECONDS)) {
String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid();
LOGGER.warn(errMsg);
throw new CloudRuntimeException(errMsg);
}
try {
List<StoragePoolVO> storagePools = storagePoolDao.findPoolsByProvider(storageProvider);
if (storagePools != null && storagePools.size() > 0) {
List<SolidFireUtil.SolidFireConnection> sfConnections = new ArrayList<>();
for (StoragePoolVO storagePool : storagePools) {
if (!isStorageApplicableToZoneOrCluster(storagePool, clusterId, clusterDao)) {
continue;
}
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePool.getId(), storagePoolDetailsDao);
if (!sfConnections.contains(sfConnection)) {
sfConnections.add(sfConnection);
List<SolidFireUtil.SolidFireVag> sfVags = SolidFireUtil.getAllVags(sfConnection);
SolidFireVag sfVag = getVolumeAccessGroup(hostVO.getStorageUrl(), sfVags);
if (sfVag != null) {
placeVolumeIdsInVag(sfConnection, sfVags, sfVag, hostVO, hostDao);
} else {
handleVagForHost(sfConnection, sfVags, hostVO, hostDao, clusterDao);
}
}
}
}
} finally {
lock.unlock();
lock.releaseRef();
}
}
use of com.cloud.dc.ClusterVO in project cloudstack by apache.
the class VolumeTest method setUp.
@Before
public void setUp() {
// create data center
DataCenterVO dc = new DataCenterVO(UUID.randomUUID().toString(), "test", "8.8.8.8", null, "10.0.0.1", null, "10.0.0.1/24", null, null, NetworkType.Basic, null, null, true, true, null, null);
dc = dcDao.persist(dc);
dcId = dc.getId();
// create pod
HostPodVO pod = new HostPodVO(UUID.randomUUID().toString(), dc.getId(), "192.168.56.1", "192.168.56.0/24", 8, "test");
pod = podDao.persist(pod);
// create xen cluster
ClusterVO cluster = new ClusterVO(dc.getId(), pod.getId(), "devcloud cluster");
cluster.setHypervisorType(HypervisorType.XenServer.toString());
cluster.setClusterType(ClusterType.CloudManaged);
cluster.setManagedState(ManagedState.Managed);
cluster = clusterDao.persist(cluster);
clusterId = cluster.getId();
// create xen host
HostVO host = new HostVO(UUID.randomUUID().toString());
host.setName("devcloud xen host");
host.setType(Host.Type.Routing);
host.setPrivateIpAddress("192.168.56.2");
host.setDataCenterId(dc.getId());
host.setVersion("6.0.1");
host.setAvailable(true);
host.setSetup(true);
host.setLastPinged(0);
host.setResourceState(ResourceState.Enabled);
host.setClusterId(cluster.getId());
host = hostDao.persist(host);
List<HostVO> results = new ArrayList<HostVO>();
results.add(host);
Mockito.when(hostDao.listAll()).thenReturn(results);
Mockito.when(hostDao.findHypervisorHostInCluster(Matchers.anyLong())).thenReturn(results);
// CreateObjectAnswer createVolumeFromImageAnswer = new
// CreateObjectAnswer(null,UUID.randomUUID().toString(), null);
// Mockito.when(primaryStoreDao.findById(Mockito.anyLong())).thenReturn(primaryStore);
}
Aggregations