use of org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters in project cloudstack by apache.
the class DateraPrimaryDataStoreLifeCycle method initialize.
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
Long podId = (Long) dsInfos.get("podId");
Long clusterId = (Long) dsInfos.get("clusterId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get("capacityIops");
String tags = (String) dsInfos.get("tags");
@SuppressWarnings("unchecked") Map<String, String> details = (Map<String, String>) dsInfos.get("details");
String domainName = details.get("domainname");
String storageVip = DateraUtil.getStorageVip(url);
int storagePort = DateraUtil.getStoragePort(url);
int numReplicas = DateraUtil.getNumReplicas(url);
String volPlacement = DateraUtil.getVolPlacement(url);
String clusterAdminUsername = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_USERNAME, url);
String clusterAdminPassword = DateraUtil.getValue(DateraUtil.CLUSTER_ADMIN_PASSWORD, url);
String uuid;
String randomString;
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
// the uuid and then sets the podId and clusterId parameters
if (clusterId != null) {
if (podId == null) {
throw new CloudRuntimeException("The Pod ID must be specified.");
}
if (zoneId == null) {
throw new CloudRuntimeException("The Zone ID must be specified.");
}
ClusterVO cluster = _clusterDao.findById(clusterId);
String clusterUuid = cluster.getUuid();
randomString = DateraUtil.generateUUID(clusterUuid);
// uuid = DateraUtil.PROVIDER_NAME + "_" + cluster.getUuid() + "_" + storageVip
// + "_" + clusterAdminUsername + "_" + numReplicas + "_" + volPlacement;
uuid = DateraUtil.PROVIDER_NAME + "_" + clusterUuid + "_" + randomString;
s_logger.debug("Datera - Setting Datera cluster-wide primary storage uuid to " + uuid);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId);
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
} else // sets the uuid with zoneid in it
{
DataCenterVO zone = zoneDao.findById(zoneId);
String zoneUuid = zone.getUuid();
randomString = DateraUtil.generateUUID(zoneUuid);
// uuid = DateraUtil.PROVIDER_NAME + "_" + zone.getUuid() + "_" + storageVip +
// "_" + clusterAdminUsername + "_" + numReplicas + "_" + volPlacement;
uuid = DateraUtil.PROVIDER_NAME + "_" + zoneUuid + "_" + randomString;
s_logger.debug("Datera - Setting Datera zone-wide primary storage uuid to " + uuid);
}
if (capacityBytes == null || capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
if (capacityIops == null || capacityIops <= 0) {
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
}
if (domainName == null) {
domainName = "ROOT";
s_logger.debug("setting the domain to ROOT");
}
s_logger.debug("Datera - domainName: " + domainName);
parameters.setHost(storageVip);
parameters.setPort(storagePort);
parameters.setPath(DateraUtil.getModifiedUrl(url));
parameters.setType(StoragePoolType.Iscsi);
parameters.setUuid(uuid);
parameters.setZoneId(zoneId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(true);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(HypervisorType.Any);
parameters.setTags(tags);
parameters.setDetails(details);
String managementVip = DateraUtil.getManagementVip(url);
int managementPort = DateraUtil.getManagementPort(url);
details.put(DateraUtil.MANAGEMENT_VIP, managementVip);
details.put(DateraUtil.MANAGEMENT_PORT, String.valueOf(managementPort));
details.put(DateraUtil.CLUSTER_ADMIN_USERNAME, clusterAdminUsername);
details.put(DateraUtil.CLUSTER_ADMIN_PASSWORD, clusterAdminPassword);
long lClusterDefaultMinIops = 100;
long lClusterDefaultMaxIops = 15000;
try {
String clusterDefaultMinIops = DateraUtil.getValue(DateraUtil.CLUSTER_DEFAULT_MIN_IOPS, url);
if (clusterDefaultMinIops != null && clusterDefaultMinIops.trim().length() > 0) {
lClusterDefaultMinIops = Long.parseLong(clusterDefaultMinIops);
}
} catch (NumberFormatException ex) {
s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS + ", using default value: " + lClusterDefaultMinIops + ". Exception: " + ex);
}
try {
String clusterDefaultMaxIops = DateraUtil.getValue(DateraUtil.CLUSTER_DEFAULT_MAX_IOPS, url);
if (clusterDefaultMaxIops != null && clusterDefaultMaxIops.trim().length() > 0) {
lClusterDefaultMaxIops = Long.parseLong(clusterDefaultMaxIops);
}
} catch (NumberFormatException ex) {
s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + ", using default value: " + lClusterDefaultMaxIops + ". Exception: " + ex);
}
if (lClusterDefaultMinIops > lClusterDefaultMaxIops) {
throw new CloudRuntimeException("The parameter '" + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS + "' must be less than or equal to the parameter '" + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "'.");
}
if (numReplicas < DateraUtil.MIN_NUM_REPLICAS || numReplicas > DateraUtil.MAX_NUM_REPLICAS) {
throw new CloudRuntimeException("The parameter '" + DateraUtil.NUM_REPLICAS + "' must be between " + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + "' and " + DateraUtil.MAX_NUM_REPLICAS);
}
details.put(DateraUtil.CLUSTER_DEFAULT_MIN_IOPS, String.valueOf(lClusterDefaultMinIops));
details.put(DateraUtil.CLUSTER_DEFAULT_MAX_IOPS, String.valueOf(lClusterDefaultMaxIops));
details.put(DateraUtil.NUM_REPLICAS, String.valueOf(DateraUtil.getNumReplicas(url)));
details.put(DateraUtil.VOL_PLACEMENT, String.valueOf(DateraUtil.getVolPlacement(url)));
details.put(DateraUtil.IP_POOL, String.valueOf(DateraUtil.getIpPool(url)));
return dataStoreHelper.createPrimaryDataStore(parameters);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters in project cloudstack by apache.
the class CloudStackPrimaryDataStoreLifeCycleImpl method initialize.
@SuppressWarnings("unchecked")
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
Long clusterId = (Long) dsInfos.get("clusterId");
Long podId = (Long) dsInfos.get("podId");
Long zoneId = (Long) dsInfos.get("zoneId");
String url = (String) dsInfos.get("url");
String providerName = (String) dsInfos.get("providerName");
HypervisorType hypervisorType = (HypervisorType) dsInfos.get("hypervisorType");
if (clusterId != null && podId == null) {
throw new InvalidParameterValueException("Cluster id requires pod id");
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
URI uri = null;
try {
uri = new URI(UriUtils.encodeURIComponent(url));
if (uri.getScheme() == null) {
throw new InvalidParameterValueException("scheme is null " + url + ", add nfs:// (or cifs://) as a prefix");
} else if (uri.getScheme().equalsIgnoreCase("nfs")) {
String uriHost = uri.getHost();
String uriPath = uri.getPath();
if (uriHost == null || uriPath == null || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) {
throw new InvalidParameterValueException("host or path is null, should be nfs://hostname/path");
}
} else if (uri.getScheme().equalsIgnoreCase("cifs")) {
// Don't validate against a URI encoded URI.
URI cifsUri = new URI(url);
String warnMsg = UriUtils.getCifsUriParametersProblems(cifsUri);
if (warnMsg != null) {
throw new InvalidParameterValueException(warnMsg);
}
} else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) {
String uriPath = uri.getPath();
if (uriPath == null) {
throw new InvalidParameterValueException("host or path is null, should be sharedmountpoint://localhost/path");
}
} else if (uri.getScheme().equalsIgnoreCase("rbd")) {
String uriPath = uri.getPath();
if (uriPath == null) {
throw new InvalidParameterValueException("host or path is null, should be rbd://hostname/pool");
}
} else if (uri.getScheme().equalsIgnoreCase("gluster")) {
String uriHost = uri.getHost();
String uriPath = uri.getPath();
if (uriHost == null || uriPath == null || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) {
throw new InvalidParameterValueException("host or path is null, should be gluster://hostname/volume");
}
}
} catch (URISyntaxException e) {
throw new InvalidParameterValueException(url + " is not a valid uri");
}
String tags = (String) dsInfos.get("tags");
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
parameters.setTags(tags);
parameters.setDetails(details);
String scheme = uri.getScheme();
String storageHost = uri.getHost();
String hostPath = null;
try {
hostPath = URLDecoder.decode(uri.getPath(), "UTF-8");
} catch (UnsupportedEncodingException e) {
s_logger.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e);
}
if (hostPath == null) {
// if decoding fails, use getPath() anyway
hostPath = uri.getPath();
}
Object localStorage = dsInfos.get("localStorage");
if (localStorage != null) {
hostPath = hostPath.replaceFirst("/", "");
hostPath = hostPath.replace("+", " ");
}
String userInfo = uri.getUserInfo();
int port = uri.getPort();
if (s_logger.isDebugEnabled()) {
s_logger.debug("createPool Params @ scheme - " + scheme + " storageHost - " + storageHost + " hostPath - " + hostPath + " port - " + port);
}
if (scheme.equalsIgnoreCase("nfs")) {
if (port == -1) {
port = 2049;
}
parameters.setType(StoragePoolType.NetworkFilesystem);
parameters.setHost(storageHost);
parameters.setPort(port);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("cifs")) {
if (port == -1) {
port = 445;
}
parameters.setType(StoragePoolType.SMB);
parameters.setHost(storageHost);
parameters.setPort(port);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("file")) {
if (port == -1) {
port = 0;
}
parameters.setType(StoragePoolType.Filesystem);
parameters.setHost("localhost");
parameters.setPort(0);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("sharedMountPoint")) {
parameters.setType(StoragePoolType.SharedMountPoint);
parameters.setHost(storageHost);
parameters.setPort(0);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("clvm")) {
parameters.setType(StoragePoolType.CLVM);
parameters.setHost(storageHost);
parameters.setPort(0);
parameters.setPath(hostPath.replaceFirst("/", ""));
} else if (scheme.equalsIgnoreCase("rbd")) {
if (port == -1) {
port = 0;
}
parameters.setType(StoragePoolType.RBD);
parameters.setHost(storageHost);
parameters.setPort(port);
parameters.setPath(hostPath.replaceFirst("/", ""));
parameters.setUserInfo(userInfo);
} else if (scheme.equalsIgnoreCase("PreSetup")) {
if (HypervisorType.VMware.equals(hypervisorType)) {
validateVcenterDetails(zoneId, podId, clusterId, storageHost);
}
parameters.setType(StoragePoolType.PreSetup);
parameters.setHost(storageHost);
parameters.setPort(0);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("DatastoreCluster")) {
if (HypervisorType.VMware.equals(hypervisorType)) {
validateVcenterDetails(zoneId, podId, clusterId, storageHost);
}
parameters.setType(StoragePoolType.DatastoreCluster);
parameters.setHost(storageHost);
parameters.setPort(0);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("iscsi")) {
String[] tokens = hostPath.split("/");
int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1);
if (port == -1) {
port = 3260;
}
if (lun != -1) {
if (clusterId == null) {
throw new IllegalArgumentException("IscsiLUN need to have clusters specified");
}
parameters.setType(StoragePoolType.IscsiLUN);
parameters.setHost(storageHost);
parameters.setPort(port);
parameters.setPath(hostPath);
} else {
throw new IllegalArgumentException("iSCSI needs to have LUN number");
}
} else if (scheme.equalsIgnoreCase("iso")) {
if (port == -1) {
port = 2049;
}
parameters.setType(StoragePoolType.ISO);
parameters.setHost(storageHost);
parameters.setPort(port);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("vmfs")) {
parameters.setType(StoragePoolType.VMFS);
parameters.setHost("VMFS datastore: " + hostPath);
parameters.setPort(0);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("ocfs2")) {
port = 7777;
parameters.setType(StoragePoolType.OCFS2);
parameters.setHost("clustered");
parameters.setPort(port);
parameters.setPath(hostPath);
} else if (scheme.equalsIgnoreCase("gluster")) {
if (port == -1) {
port = 24007;
}
parameters.setType(StoragePoolType.Gluster);
parameters.setHost(storageHost);
parameters.setPort(port);
parameters.setPath(hostPath);
} else {
StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme);
if (type != null) {
parameters.setType(type);
parameters.setHost(storageHost);
parameters.setPort(0);
parameters.setPath(hostPath);
} else {
s_logger.warn("Unable to figure out the scheme for URI: " + uri);
throw new IllegalArgumentException("Unable to figure out the scheme for URI: " + uri);
}
}
if (localStorage == null) {
List<StoragePoolVO> pools = primaryDataStoreDao.listPoolByHostPath(storageHost, hostPath);
if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) {
Long oldPodId = pools.get(0).getPodId();
throw new CloudRuntimeException("Storage pool " + uri + " already in use by another pod (id=" + oldPodId + ")");
}
}
Object existingUuid = dsInfos.get("uuid");
String uuid = null;
if (existingUuid != null) {
uuid = (String) existingUuid;
} else if (scheme.equalsIgnoreCase("sharedmountpoint") || scheme.equalsIgnoreCase("clvm")) {
uuid = UUID.randomUUID().toString();
} else if ("PreSetup".equalsIgnoreCase(scheme) && !HypervisorType.VMware.equals(hypervisorType)) {
uuid = hostPath.replace("/", "");
} else {
uuid = UUID.nameUUIDFromBytes((storageHost + hostPath).getBytes()).toString();
}
List<StoragePoolVO> spHandles = primaryDataStoreDao.findIfDuplicatePoolsExistByUUID(uuid);
if ((spHandles != null) && (spHandles.size() > 0)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Another active pool with the same uuid already exists");
}
throw new CloudRuntimeException("Another active pool with the same uuid already exists");
}
String poolName = (String) dsInfos.get("name");
parameters.setUuid(uuid);
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setName(poolName);
parameters.setClusterId(clusterId);
parameters.setProviderName(providerName);
parameters.setHypervisorType(hypervisorType);
return dataStoreHelper.createPrimaryDataStore(parameters);
}
use of org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters in project cloudstack by apache.
the class NexentaPrimaryDataStoreLifeCycle method initialize.
@Override
public DataStore initialize(Map<String, Object> dsInfos) {
String url = (String) dsInfos.get("url");
Long zoneId = (Long) dsInfos.get("zoneId");
String storagePoolName = (String) dsInfos.get("name");
String providerName = (String) dsInfos.get("providerName");
Long capacityBytes = (Long) dsInfos.get("capacityBytes");
Long capacityIops = (Long) dsInfos.get("capacityIops");
String tags = (String) dsInfos.get("tags");
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
NexentaUtil.NexentaPluginParameters params = NexentaUtil.parseNexentaPluginUrl(url);
DataCenterVO zone = zoneDao.findById(zoneId);
String uuid = String.format("%s_%s_%s", NexentaUtil.PROVIDER_NAME, zone.getUuid(), params.getNmsUrl().getHost());
if (capacityBytes == null || capacityBytes <= 0) {
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
}
if (capacityIops == null || capacityIops <= 0) {
throw new IllegalArgumentException("'capacityIops' must be present and greater than 0.");
}
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
parameters.setHost(params.getStorageHost());
parameters.setPort(params.getStoragePort());
parameters.setPath(params.getStoragePath());
parameters.setType(params.getStorageType());
parameters.setUuid(uuid);
parameters.setZoneId(zoneId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(true);
parameters.setCapacityBytes(capacityBytes);
parameters.setUsedBytes(0);
parameters.setCapacityIops(capacityIops);
parameters.setHypervisorType(Hypervisor.HypervisorType.Any);
parameters.setTags(tags);
details.put(NexentaUtil.NMS_URL, params.getNmsUrl().toString());
details.put(NexentaUtil.VOLUME, params.getVolume());
details.put(NexentaUtil.SPARSE_VOLUMES, params.isSparseVolumes().toString());
details.put(NexentaUtil.STORAGE_TYPE, params.getStorageType().toString());
details.put(NexentaUtil.STORAGE_HOST, params.getStorageHost());
details.put(NexentaUtil.STORAGE_PORT, params.getStoragePort().toString());
details.put(NexentaUtil.STORAGE_PATH, params.getStoragePath());
parameters.setDetails(details);
// this adds a row in the cloud.storage_pool table for this SolidFire cluster
return dataStoreHelper.createPrimaryDataStore(parameters);
}
Aggregations