use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method getStorageStats.
@Override
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
Preconditions.checkArgument(storagePool != null, "storagePool cannot be null");
try {
final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId());
StoragePoolStatistics poolStatistics = client.getStoragePoolStatistics(storagePool.getPath());
if (poolStatistics != null && poolStatistics.getNetMaxCapacityInBytes() != null && poolStatistics.getNetUsedCapacityInBytes() != null) {
Long capacityBytes = poolStatistics.getNetMaxCapacityInBytes();
Long usedBytes = poolStatistics.getNetUsedCapacityInBytes();
return new Pair<Long, Long>(capacityBytes, usedBytes);
}
} catch (Exception e) {
String errMsg = "Unable to get storage stats for the pool: " + storagePool.getId() + " due to " + e.getMessage();
LOGGER.warn(errMsg);
throw new CloudRuntimeException(errMsg, e);
}
return null;
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method migrateVolume.
private Answer migrateVolume(DataObject srcData, DataObject destData) {
// Volume migration within same PowerFlex/ScaleIO cluster (with same System ID)
DataStore srcStore = srcData.getDataStore();
DataStore destStore = destData.getDataStore();
Answer answer = null;
try {
long srcPoolId = srcStore.getId();
long destPoolId = destStore.getId();
final ScaleIOGatewayClient client = getScaleIOClient(srcPoolId);
final String srcVolumePath = ((VolumeInfo) srcData).getPath();
final String srcVolumeId = ScaleIOUtil.getVolumePath(srcVolumePath);
final StoragePoolVO destStoragePool = storagePoolDao.findById(destPoolId);
final String destStoragePoolId = destStoragePool.getPath();
int migrationTimeout = StorageManager.KvmStorageOfflineMigrationWait.value();
boolean migrateStatus = client.migrateVolume(srcVolumeId, destStoragePoolId, migrationTimeout);
if (migrateStatus) {
String newVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.VOLUME_PREFIX, destData.getId(), destStoragePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value());
boolean renamed = client.renameVolume(srcVolumeId, newVolumeName);
if (srcData.getId() != destData.getId()) {
VolumeVO destVolume = volumeDao.findById(destData.getId());
// Update PowerFlex volume name only after it is renamed, to maintain the consistency
if (renamed) {
String newVolumePath = ScaleIOUtil.updatedPathWithVolumeName(srcVolumeId, newVolumeName);
destVolume.set_iScsiName(newVolumePath);
destVolume.setPath(newVolumePath);
} else {
destVolume.set_iScsiName(srcVolumePath);
destVolume.setPath(srcVolumePath);
}
volumeDao.update(destData.getId(), destVolume);
VolumeVO srcVolume = volumeDao.findById(srcData.getId());
srcVolume.set_iScsiName(null);
srcVolume.setPath(null);
srcVolume.setFolder(null);
volumeDao.update(srcData.getId(), srcVolume);
} else {
// Live migrate volume
VolumeVO volume = volumeDao.findById(srcData.getId());
Long oldPoolId = volume.getPoolId();
volume.setPoolId(destPoolId);
volume.setLastPoolId(oldPoolId);
volumeDao.update(srcData.getId(), volume);
}
List<SnapshotVO> snapshots = snapshotDao.listByVolumeId(srcData.getId());
if (CollectionUtils.isNotEmpty(snapshots)) {
for (SnapshotVO snapshot : snapshots) {
SnapshotDataStoreVO snapshotStore = snapshotDataStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary);
if (snapshotStore == null) {
continue;
}
String snapshotVolumeId = ScaleIOUtil.getVolumePath(snapshotStore.getInstallPath());
String newSnapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.SNAPSHOT_PREFIX, snapshot.getId(), destStoragePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value());
renamed = client.renameVolume(snapshotVolumeId, newSnapshotName);
snapshotStore.setDataStoreId(destPoolId);
// Update PowerFlex snapshot name only after it is renamed, to maintain the consistency
if (renamed) {
snapshotStore.setInstallPath(ScaleIOUtil.updatedPathWithVolumeName(snapshotVolumeId, newSnapshotName));
}
snapshotDataStoreDao.update(snapshotStore.getId(), snapshotStore);
}
}
answer = new Answer(null, true, null);
} else {
String errorMsg = "Failed to migrate PowerFlex volume: " + srcData.getId() + " to storage pool " + destPoolId;
LOGGER.debug(errorMsg);
answer = new Answer(null, false, errorMsg);
}
} catch (Exception e) {
LOGGER.error("Failed to migrate PowerFlex volume: " + srcData.getId() + " due to: " + e.getMessage());
answer = new Answer(null, false, e.getMessage());
}
return answer;
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreLifeCycle method attachZone.
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString());
}
List<String> connectedSdcIps = null;
try {
ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStore.getId(), storagePoolDetailsDao);
connectedSdcIps = client.listConnectedSdcIps();
} catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) {
LOGGER.error("Failed to create storage pool", e);
throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to create storage pool");
}
if (connectedSdcIps == null || connectedSdcIps.isEmpty()) {
LOGGER.debug("No connected SDCs found for the PowerFlex storage pool");
throw new CloudRuntimeException("Failed to create storage pool as connected SDCs not found");
}
LOGGER.debug("Attaching the pool to each of the hosts in the zone: " + scope.getScopeId());
List<HostVO> hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId());
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO host : hosts) {
try {
if (connectedSdcIps.contains(host.getPrivateIpAddress())) {
storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
poolHosts.add(host);
}
} catch (Exception e) {
LOGGER.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
}
}
if (poolHosts.isEmpty()) {
LOGGER.warn("No host can access storage pool " + dataStore + " in this zone.");
primaryDataStoreDao.expunge(dataStore.getId());
throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts.");
}
dataStoreHelper.attachZone(dataStore);
return true;
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreLifeCycle method findStoragePool.
private org.apache.cloudstack.storage.datastore.api.StoragePool findStoragePool(String url, String username, String password, String storagePoolName) {
try {
final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.value();
final int clientMaxConnections = StorageManager.STORAGE_POOL_CLIENT_MAX_CONNECTIONS.value();
ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout, clientMaxConnections);
List<org.apache.cloudstack.storage.datastore.api.StoragePool> storagePools = client.listStoragePools();
for (org.apache.cloudstack.storage.datastore.api.StoragePool pool : storagePools) {
if (pool.getName().equals(storagePoolName)) {
LOGGER.info("Found PowerFlex storage pool: " + storagePoolName);
final org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics poolStatistics = client.getStoragePoolStatistics(pool.getId());
pool.setStatistics(poolStatistics);
String systemId = client.getSystemId(pool.getProtectionDomainId());
pool.setSystemId(systemId);
return pool;
}
}
} catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) {
LOGGER.error("Failed to add storage pool", e);
throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to find and validate storage pool: " + storagePoolName);
}
throw new CloudRuntimeException("Failed to find the provided storage pool name: " + storagePoolName + " in the discovered PowerFlex storage pools");
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method createVolume.
private String createVolume(VolumeInfo volumeInfo, long storagePoolId) {
LOGGER.debug("Creating PowerFlex volume");
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null");
Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0");
Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null");
try {
final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId);
final String scaleIOStoragePoolId = storagePool.getPath();
final Long sizeInBytes = volumeInfo.getSize();
final long sizeInGb = (long) Math.ceil(sizeInBytes / (1024.0 * 1024.0 * 1024.0));
final String scaleIOVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.VOLUME_PREFIX, volumeInfo.getId(), storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value());
org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null;
scaleIOVolume = client.createVolume(scaleIOVolumeName, scaleIOStoragePoolId, (int) sizeInGb, volumeInfo.getProvisioningType());
if (scaleIOVolume == null) {
throw new CloudRuntimeException("Failed to create volume on PowerFlex cluster");
}
VolumeVO volume = volumeDao.findById(volumeInfo.getId());
String volumePath = ScaleIOUtil.updatedPathWithVolumeName(scaleIOVolume.getId(), scaleIOVolumeName);
volume.set_iScsiName(volumePath);
volume.setPath(volumePath);
volume.setFolder(scaleIOVolume.getVtreeId());
volume.setSize(scaleIOVolume.getSizeInKb() * 1024);
volume.setPoolType(Storage.StoragePoolType.PowerFlex);
volume.setFormat(Storage.ImageFormat.RAW);
volume.setPoolId(storagePoolId);
volumeDao.update(volume.getId(), volume);
long capacityBytes = storagePool.getCapacityBytes();
long usedBytes = storagePool.getUsedBytes();
usedBytes += volume.getSize();
storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes);
storagePoolDao.update(storagePoolId, storagePool);
return volumePath;
} catch (Exception e) {
String errMsg = "Unable to create PowerFlex Volume due to " + e.getMessage();
LOGGER.warn(errMsg);
throw new CloudRuntimeException(errMsg, e);
}
}
Aggregations