use of org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters in project cloudstack by apache.
the class SolidFirePrimaryDataStoreLifeCycle method initialize.
// invoked to add primary storage that is based on the SolidFire plug-in
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get("capacityIops");
String tags = (String) dsInfos.get("tags");
@SuppressWarnings("unchecked") Map<String, String> details = (Map<String, String>) dsInfos.get("details");
if (podId != null && clusterId == null) {
throw new CloudRuntimeException("If the Pod ID is specified, the Cluster ID must also be specified.");
}
if (podId == null && clusterId != null) {
throw new CloudRuntimeException("If the Pod ID is not specified, the Cluster ID must also not be specified.");
}
String storageVip = SolidFireUtil.getStorageVip(url);
int storagePort = SolidFireUtil.getStoragePort(url);
if (capacityBytes == null || capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
if (capacityIops == null || capacityIops <= 0) {
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setHost(storageVip);
parameters.setPort(storagePort);
parameters.setPath(SolidFireUtil.getModifiedUrl(url));
parameters.setType(StoragePoolType.Iscsi);
parameters.setUuid(UUID.randomUUID().toString());
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(true);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
if (clusterId != null) {
ClusterVO clusterVO = _clusterDao.findById(clusterId);
Preconditions.checkNotNull(clusterVO, "Unable to locate the specified cluster");
parameters.setHypervisorType(clusterVO.getHypervisorType());
} else {
parameters.setHypervisorType(HypervisorType.Any);
}
parameters.setTags(tags);
parameters.setDetails(details);
String managementVip = SolidFireUtil.getManagementVip(url);
int managementPort = SolidFireUtil.getManagementPort(url);
details.put(SolidFireUtil.MANAGEMENT_VIP, managementVip);
details.put(SolidFireUtil.MANAGEMENT_PORT, String.valueOf(managementPort));
String clusterAdminUsername = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_ADMIN_USERNAME, url);
String clusterAdminPassword = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, url);
details.put(SolidFireUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername);
details.put(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword);
long lClusterDefaultMinIops = 100;
long lClusterDefaultMaxIops = 15000;
float fClusterDefaultBurstIopsPercentOfMaxIops = 1.5f;
try {
String clusterDefaultMinIops = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS, url);
if (clusterDefaultMinIops != null && clusterDefaultMinIops.trim().length() > 0) {
lClusterDefaultMinIops = Long.parseLong(clusterDefaultMinIops);
}
} catch (NumberFormatException ex) {
s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + ", using default value: " + lClusterDefaultMinIops + ". Exception: " + ex);
}
try {
String clusterDefaultMaxIops = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS, url);
if (clusterDefaultMaxIops != null && clusterDefaultMaxIops.trim().length() > 0) {
lClusterDefaultMaxIops = Long.parseLong(clusterDefaultMaxIops);
}
} catch (NumberFormatException ex) {
s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + ", using default value: " + lClusterDefaultMaxIops + ". Exception: " + ex);
}
try {
String clusterDefaultBurstIopsPercentOfMaxIops = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS, url);
if (clusterDefaultBurstIopsPercentOfMaxIops != null && clusterDefaultBurstIopsPercentOfMaxIops.trim().length() > 0) {
fClusterDefaultBurstIopsPercentOfMaxIops = Float.parseFloat(clusterDefaultBurstIopsPercentOfMaxIops);
}
} catch (NumberFormatException ex) {
s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS + ", using default value: " + fClusterDefaultBurstIopsPercentOfMaxIops + ". Exception: " + ex);
}
if (lClusterDefaultMinIops < SolidFireUtil.MIN_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be greater than or equal to " + SolidFireUtil.MIN_IOPS_PER_VOLUME + ".");
}
if (lClusterDefaultMinIops > SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be less than or equal to " + SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME + ".");
}
if (lClusterDefaultMinIops > lClusterDefaultMaxIops) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be less than or equal to the parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + "'.");
}
if (lClusterDefaultMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + "' must be less than or equal to " + SolidFireUtil.MAX_IOPS_PER_VOLUME + ".");
}
if (Float.compare(fClusterDefaultBurstIopsPercentOfMaxIops, 1.0f) < 0) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS + "' must be greater than or equal to 1.");
}
details.put(SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS, String.valueOf(lClusterDefaultMinIops));
details.put(SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS, String.valueOf(lClusterDefaultMaxIops));
details.put(SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS, String.valueOf(fClusterDefaultBurstIopsPercentOfMaxIops));
// this adds a row in the cloud.storage_pool table for this SolidFire cluster
return _dataStoreHelper.createPrimaryDataStore(parameters);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters in project cloudstack by apache.
the class LinstorPrimaryDataStoreLifeCycleImpl method initialize.
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
String tags = (String) dsInfos.get("tags");
@SuppressWarnings("unchecked") Map<String, String> details = (Map<String, String>) dsInfos.get("details");
final String resourceGroup = details.get(LinstorUtil.RSC_GROUP);
final String uuid = UUID.randomUUID().toString();
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
// the uuid and then sets the podId and clusterId parameters
if (clusterId != null) {
if (podId == null) {
throw new CloudRuntimeException("The Pod ID must be specified.");
}
if (zoneId == null) {
throw new CloudRuntimeException("The Zone ID must be specified.");
}
ClusterVO cluster = clusterDao.findById(clusterId);
s_logger.info("Linstor: Setting Linstor cluster-wide primary storage uuid to " + uuid);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
HypervisorType hypervisorType = cluster.getHypervisorType();
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
}
if (!url.contains("://")) {
url = "http://" + url;
}
URL controllerURL;
int port = 3370;
try {
controllerURL = new URL(url);
if (!controllerURL.getProtocol().startsWith("http")) {
throw new IllegalArgumentException("Linstor controller URL wrong protocol: " + url);
}
if (!controllerURL.getPath().isEmpty()) {
throw new IllegalArgumentException("Linstor controller URL shouldn't have a path: " + url);
}
if (controllerURL.getPort() == -1) {
port = controllerURL.getProtocol().equals("https") ? 3371 : 3370;
url += ":" + port;
}
} catch (MalformedURLException e) {
throw new IllegalArgumentException("Linstor controller URL is not valid: " + e);
}
long capacityBytes = LinstorUtil.getCapacityBytes(url, resourceGroup);
if (capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
parameters.setHost(url);
parameters.setPort(port);
parameters.setPath(resourceGroup);
parameters.setType(Storage.StoragePoolType.Linstor);
parameters.setUuid(uuid);
parameters.setZoneId(zoneId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(false);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(0L);
parameters.setHypervisorType(HypervisorType.KVM);
parameters.setTags(tags);
parameters.setDetails(details);
parameters.setUserInfo(resourceGroup);
return dataStoreHelper.createPrimaryDataStore(parameters);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters in project cloudstack by apache.
the class SolidFireSharedPrimaryDataStoreLifeCycle method initialize.
// invoked to add primary storage that is based on the SolidFire plug-in
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
final String CAPACITY_IOPS = "capacityIops";
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get(CAPACITY_IOPS);
String tags = (String) dsInfos.get("tags");
@SuppressWarnings("unchecked") Map<String, String> details = (Map<String, String>) dsInfos.get("details");
if (podId == null) {
throw new CloudRuntimeException("The Pod ID must be specified.");
}
if (clusterId == null) {
throw new CloudRuntimeException("The Cluster ID must be specified.");
}
String storageVip = SolidFireUtil.getStorageVip(url);
int storagePort = SolidFireUtil.getStoragePort(url);
if (capacityBytes == null || capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
if (capacityIops == null || capacityIops <= 0) {
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
}
HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
String datacenter = SolidFireUtil.getValue(SolidFireUtil.DATACENTER, url, false);
if (HypervisorType.VMware.equals(hypervisorType) && datacenter == null) {
throw new CloudRuntimeException("'Datacenter' must be set for hypervisor type of " + HypervisorType.VMware);
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setType(getStorageType(hypervisorType));
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(false);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(hypervisorType);
parameters.setTags(tags);
parameters.setDetails(details);
String managementVip = SolidFireUtil.getManagementVip(url);
int managementPort = SolidFireUtil.getManagementPort(url);
details.put(SolidFireUtil.MANAGEMENT_VIP, managementVip);
details.put(SolidFireUtil.MANAGEMENT_PORT, String.valueOf(managementPort));
String clusterAdminUsername = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_ADMIN_USERNAME, url);
String clusterAdminPassword = SolidFireUtil.getValue(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, url);
details.put(SolidFireUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername);
details.put(SolidFireUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword);
if (capacityBytes < SolidFireUtil.MIN_VOLUME_SIZE) {
capacityBytes = SolidFireUtil.MIN_VOLUME_SIZE;
}
long lMinIops = 100;
long lMaxIops = 15000;
long lBurstIops = 15000;
try {
String minIops = SolidFireUtil.getValue(SolidFireUtil.MIN_IOPS, url);
if (minIops != null && minIops.trim().length() > 0) {
lMinIops = Long.parseLong(minIops);
}
} catch (Exception ex) {
LOGGER.info("[ignored] error getting Min IOPS: " + ex.getLocalizedMessage());
}
try {
String maxIops = SolidFireUtil.getValue(SolidFireUtil.MAX_IOPS, url);
if (maxIops != null && maxIops.trim().length() > 0) {
lMaxIops = Long.parseLong(maxIops);
}
} catch (Exception ex) {
LOGGER.info("[ignored] error getting Max IOPS: " + ex.getLocalizedMessage());
}
try {
String burstIops = SolidFireUtil.getValue(SolidFireUtil.BURST_IOPS, url);
if (burstIops != null && burstIops.trim().length() > 0) {
lBurstIops = Long.parseLong(burstIops);
}
} catch (Exception ex) {
LOGGER.info("[ignored] error getting Burst IOPS: " + ex.getLocalizedMessage());
}
if (lMinIops > lMaxIops) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.MIN_IOPS + "' must be less than or equal to the parameter '" + SolidFireUtil.MAX_IOPS + "'.");
}
if (lMaxIops > lBurstIops) {
throw new CloudRuntimeException("The parameter '" + SolidFireUtil.MAX_IOPS + "' must be less than or equal to the parameter '" + SolidFireUtil.BURST_IOPS + "'.");
}
if (lMinIops != capacityIops) {
throw new CloudRuntimeException("The parameter '" + CAPACITY_IOPS + "' must be equal to the parameter '" + SolidFireUtil.MIN_IOPS + "'.");
}
if (lMinIops > SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("This volume's Min IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_MIN_IOPS_PER_VOLUME) + " IOPS.");
}
if (lMaxIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("This volume's Max IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS.");
}
if (lBurstIops > SolidFireUtil.MAX_IOPS_PER_VOLUME) {
throw new CloudRuntimeException("This volume's Burst IOPS cannot exceed " + NumberFormat.getInstance().format(SolidFireUtil.MAX_IOPS_PER_VOLUME) + " IOPS.");
}
details.put(SolidFireUtil.MIN_IOPS, String.valueOf(lMinIops));
details.put(SolidFireUtil.MAX_IOPS, String.valueOf(lMaxIops));
details.put(SolidFireUtil.BURST_IOPS, String.valueOf(lBurstIops));
SolidFireUtil.SolidFireConnection sfConnection = new SolidFireUtil.SolidFireConnection(managementVip, managementPort, clusterAdminUsername, clusterAdminPassword);
SolidFireCreateVolume sfCreateVolume = createSolidFireVolume(sfConnection, storagePoolName, capacityBytes, lMinIops, lMaxIops, lBurstIops);
SolidFireUtil.SolidFireVolume sfVolume = sfCreateVolume.getVolume();
String iqn = sfVolume.getIqn();
details.put(SolidFireUtil.VOLUME_ID, String.valueOf(sfVolume.getId()));
parameters.setUuid(iqn);
if (HypervisorType.VMware.equals(hypervisorType)) {
String datastore = iqn.replace("/", "_");
String path = "/" + datacenter + "/" + datastore;
parameters.setHost("VMFS datastore: " + path);
parameters.setPort(0);
parameters.setPath(path);
details.put(SolidFireUtil.DATASTORE_NAME, datastore);
details.put(SolidFireUtil.IQN, iqn);
details.put(SolidFireUtil.STORAGE_VIP, storageVip);
details.put(SolidFireUtil.STORAGE_PORT, String.valueOf(storagePort));
} else {
parameters.setHost(storageVip);
parameters.setPort(storagePort);
parameters.setPath(iqn);
}
ClusterVO cluster = clusterDao.findById(clusterId);
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
if (!lock.lock(SolidFireUtil.LOCK_TIME_IN_SECONDS)) {
String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid();
LOGGER.debug(errMsg);
throw new CloudRuntimeException(errMsg);
}
DataStore dataStore = null;
try {
// this adds a row in the cloud.storage_pool table for this SolidFire volume
dataStore = primaryDataStoreHelper.createPrimaryDataStore(parameters);
// now that we have a DataStore (we need the id from the DataStore instance), we can create a Volume Access Group, if need be, and
// place the newly created volume in the Volume Access Group
List<HostVO> hosts = hostDao.findByClusterId(clusterId);
String clusterUuId = clusterDao.findById(clusterId).getUuid();
SolidFireUtil.placeVolumeInVolumeAccessGroups(sfConnection, sfVolume.getId(), hosts, clusterUuId);
SolidFireUtil.SolidFireAccount sfAccount = sfCreateVolume.getAccount();
Account csAccount = CallContext.current().getCallingAccount();
SolidFireUtil.updateCsDbWithSolidFireAccountInfo(csAccount.getId(), sfAccount, dataStore.getId(), accountDetailsDao);
} catch (Exception ex) {
if (dataStore != null) {
primaryDataStoreDao.expunge(dataStore.getId());
}
throw new CloudRuntimeException(ex.getMessage());
} finally {
lock.unlock();
lock.releaseRef();
}
return dataStore;
}
use of org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreLifeCycle method initialize.
@SuppressWarnings("unchecked")
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String dataStoreName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get("capacityIops");
String tags = (String) dsInfos.get("tags");
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
if (zoneId == null) {
throw new CloudRuntimeException("Zone Id must be specified.");
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
if (clusterId != null) {
// Primary datastore is cluster-wide, check and set the podId and clusterId parameters
if (podId == null) {
throw new CloudRuntimeException("Pod Id must also be specified when the Cluster Id is specified for Cluster-wide primary storage.");
}
Hypervisor.HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString());
}
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
} else if (podId != null) {
throw new CloudRuntimeException("Cluster Id must also be specified when the Pod Id is specified for Cluster-wide primary storage.");
}
URI uri = null;
try {
uri = new URI(UriUtils.encodeURIComponent(url));
if (uri.getScheme() == null || !uri.getScheme().equalsIgnoreCase("powerflex")) {
throw new InvalidParameterValueException("scheme is invalid for url: " + url + ", should be powerflex://username:password@gatewayhost/pool");
}
} catch (Exception ignored) {
throw new InvalidParameterValueException(url + " is not a valid uri");
}
String storagePoolName = null;
try {
storagePoolName = URLDecoder.decode(uri.getPath(), "UTF-8");
} catch (UnsupportedEncodingException e) {
LOGGER.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e);
}
if (storagePoolName == null) {
// if decoding fails, use getPath() anyway
storagePoolName = uri.getPath();
}
storagePoolName = storagePoolName.replaceFirst("/", "");
final String storageHost = uri.getHost();
final int port = uri.getPort();
String gatewayApiURL = null;
if (port == -1) {
gatewayApiURL = String.format("https://%s/api", storageHost);
} else {
gatewayApiURL = String.format("https://%s:%d/api", storageHost, port);
}
final String userInfo = uri.getUserInfo();
final String gatewayUsername = userInfo.split(":")[0];
final String gatewayPassword = userInfo.split(":")[1];
List<StoragePoolVO> storagePoolVO = primaryDataStoreDao.findPoolsByProvider(ScaleIOUtil.PROVIDER_NAME);
if (CollectionUtils.isNotEmpty(storagePoolVO)) {
for (StoragePoolVO poolVO : storagePoolVO) {
Map<String, String> poolDetails = primaryDataStoreDao.getDetails(poolVO.getId());
String poolUrl = poolDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT);
String poolName = poolDetails.get(ScaleIOGatewayClient.STORAGE_POOL_NAME);
if (gatewayApiURL.equals(poolUrl) && storagePoolName.equals(poolName)) {
throw new IllegalArgumentException("PowerFlex storage pool: " + storagePoolName + " already exists, please specify other storage pool.");
}
}
}
final org.apache.cloudstack.storage.datastore.api.StoragePool scaleIOPool = this.findStoragePool(gatewayApiURL, gatewayUsername, gatewayPassword, storagePoolName);
parameters.setZoneId(zoneId);
parameters.setName(dataStoreName);
parameters.setProviderName(providerName);
parameters.setManaged(true);
parameters.setHost(storageHost);
parameters.setPath(scaleIOPool.getId());
parameters.setUserInfo(userInfo);
parameters.setType(Storage.StoragePoolType.PowerFlex);
parameters.setHypervisorType(Hypervisor.HypervisorType.KVM);
parameters.setUuid(UUID.randomUUID().toString());
parameters.setTags(tags);
StoragePoolStatistics poolStatistics = scaleIOPool.getStatistics();
if (poolStatistics != null) {
if (capacityBytes == null) {
parameters.setCapacityBytes(poolStatistics.getNetMaxCapacityInBytes());
}
parameters.setUsedBytes(poolStatistics.getNetUsedCapacityInBytes());
}
if (capacityBytes != null) {
parameters.setCapacityBytes(capacityBytes);
}
if (capacityIops != null) {
parameters.setCapacityIops(capacityIops);
}
details.put(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT, gatewayApiURL);
details.put(ScaleIOGatewayClient.GATEWAY_API_USERNAME, DBEncryptionUtil.encrypt(gatewayUsername));
details.put(ScaleIOGatewayClient.GATEWAY_API_PASSWORD, DBEncryptionUtil.encrypt(gatewayPassword));
details.put(ScaleIOGatewayClient.STORAGE_POOL_NAME, storagePoolName);
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, scaleIOPool.getSystemId());
parameters.setDetails(details);
return dataStoreHelper.createPrimaryDataStore(parameters);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters in project cloudstack by apache.
the class ElastistorPrimaryDataStoreLifeCycle method initialize.
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get("capacityIops");
String tags = (String) dsInfos.get("tags");
boolean managed = (Boolean) dsInfos.get("managed");
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
String domainName = details.get("domainname");
String storageIp;
int storagePort = 0;
StoragePoolType storagetype = null;
String accesspath = null;
String protocoltype = null;
String mountpoint = null;
if (!managed) {
storageIp = getStorageIp(url);
storagePort = getDefaultStoragePort(url);
storagetype = getStorageType(url);
accesspath = getAccessPath(url);
protocoltype = getProtocolType(url);
String[] mp = accesspath.split("/");
mountpoint = mp[1];
} else if (details.get("hypervisortype") == "KVM") {
storageIp = url;
storagePort = 3260;
storagetype = StoragePoolType.Iscsi;
accesspath = storageIp + ":/" + storagePoolName;
} else {
storageIp = url;
storagePort = 2049;
storagetype = StoragePoolType.NetworkFilesystem;
accesspath = storageIp + ":/" + storagePoolName;
}
/**
* if the elastistor params which are required for plugin configuration
* are not injected through spring-storage-volume-cloudbyte-context.xml,
* it can be set from details map.
*/
if (details.get("esaccountid") != null)
ElastistorUtil.setElastistorAccountId(details.get("esaccountid"));
if (details.get("esdefaultgateway") != null)
ElastistorUtil.setElastistorGateway(details.get("esdefaultgateway"));
if (details.get("estntinterface") != null)
ElastistorUtil.setElastistorInterface(details.get("estntinterface"));
if (details.get("espoolid") != null)
ElastistorUtil.setElastistorPoolId(details.get("espoolid"));
if (details.get("essubnet") != null)
ElastistorUtil.setElastistorSubnet(details.get("essubnet"));
s_logger.info("Elastistor details was set successfully.");
if (capacityBytes == null || capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
if (capacityIops == null || capacityIops <= 0) {
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
}
if (domainName == null) {
domainName = "ROOT";
s_logger.debug("setting the domain to ROOT");
}
// elastistor does not allow same name and ip pools.
List<StoragePoolVO> storagePoolVO = _storagePoolDao.listAll();
for (StoragePoolVO poolVO : storagePoolVO) {
if (storagePoolName.equals(poolVO.getName())) {
throw new IllegalArgumentException("Storage pool with this name already exists in elastistor, please specify a unique name. [name:" + storagePoolName + "]");
}
if (storageIp.equals(poolVO.getHostAddress())) {
throw new IllegalArgumentException("Storage pool with this ip already exists in elastistor, please specify a unique ip. [ip:" + storageIp + "]");
}
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setHost(storageIp);
parameters.setPort(storagePort);
parameters.setPath(accesspath);
parameters.setType(storagetype);
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(managed);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(HypervisorType.Any);
parameters.setTags(tags);
parameters.setDetails(details);
parameters.setClusterId(clusterId);
Tsm tsm = null;
if (managed) {
// creates the TSM in elastistor
tsm = createElastistorTSM(storagePoolName, storageIp, capacityBytes, capacityIops, domainName);
} else {
// creates the TSM & Volume in elastistor
tsm = createElastistorTSM(storagePoolName, storageIp, capacityBytes, capacityIops, domainName);
parameters = createElastistorVolume(parameters, tsm, storagePoolName, capacityBytes, capacityIops, protocoltype, mountpoint);
}
// setting tsm's uuid as storagepool's uuid
parameters.setUuid(tsm.getUuid());
return _dataStoreHelper.createPrimaryDataStore(parameters);
}
Aggregations