use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreLifeCycle method attachCluster.
@Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
final ClusterVO cluster = clusterDao.findById(scope.getScopeId());
if (!isSupportedHypervisorType(cluster.getHypervisorType())) {
throw new CloudRuntimeException("Unsupported hypervisor type: " + cluster.getHypervisorType().toString());
}
List<String> connectedSdcIps = null;
try {
ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStore.getId(), storagePoolDetailsDao);
connectedSdcIps = client.listConnectedSdcIps();
} catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) {
LOGGER.error("Failed to create storage pool", e);
throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to create storage pool");
}
if (connectedSdcIps == null || connectedSdcIps.isEmpty()) {
LOGGER.debug("No connected SDCs found for the PowerFlex storage pool");
throw new CloudRuntimeException("Failed to create storage pool as connected SDCs not found");
}
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore;
List<HostVO> hostsInCluster = resourceManager.listAllUpAndEnabledHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId());
if (hostsInCluster.isEmpty()) {
primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + primaryDataStoreInfo.getClusterId());
}
LOGGER.debug("Attaching the pool to each of the hosts in the cluster: " + primaryDataStoreInfo.getClusterId());
List<HostVO> poolHosts = new ArrayList<HostVO>();
for (HostVO host : hostsInCluster) {
try {
if (connectedSdcIps.contains(host.getPrivateIpAddress())) {
storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId());
poolHosts.add(host);
}
} catch (Exception e) {
LOGGER.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e);
}
}
if (poolHosts.isEmpty()) {
LOGGER.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'.");
primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
throw new CloudRuntimeException("Failed to create storage pool in the cluster: " + primaryDataStoreInfo.getClusterId() + " as it is not accessible to hosts");
}
dataStoreHelper.attachCluster(dataStore);
return true;
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method revokeAccess.
@Override
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
try {
if (DataObjectType.VOLUME.equals(dataObject.getType())) {
final VolumeVO volume = volumeDao.findById(dataObject.getId());
LOGGER.debug("Revoking access for PowerFlex volume: " + volume.getPath());
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress());
if (sdc == null) {
throw new CloudRuntimeException("Unable to revoke access for volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
}
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volume.getPath()), sdc.getId());
} else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null);
LOGGER.debug("Revoking access for PowerFlex template volume: " + templatePoolRef.getInstallPath());
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress());
if (sdc == null) {
throw new CloudRuntimeException("Unable to revoke access for template: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
}
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdc.getId());
} else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) {
SnapshotInfo snapshot = (SnapshotInfo) dataObject;
LOGGER.debug("Revoking access for PowerFlex volume snapshot: " + snapshot.getPath());
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress());
if (sdc == null) {
throw new CloudRuntimeException("Unable to revoke access for snapshot: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
}
client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdc.getId());
}
} catch (Exception e) {
LOGGER.warn("Failed to revoke access due to: " + e.getMessage(), e);
}
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method revertSnapshot.
@Override
public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback<CommandResult> callback) {
LOGGER.debug("Reverting to PowerFlex volume snapshot");
Preconditions.checkArgument(snapshot != null, "snapshotInfo cannot be null");
VolumeInfo volumeInfo = snapshot.getBaseVolume();
Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null");
VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId());
try {
if (volumeVO == null || volumeVO.getRemoved() != null) {
String errMsg = "The volume that the snapshot belongs to no longer exists.";
CommandResult commandResult = new CommandResult();
commandResult.setResult(errMsg);
callback.complete(commandResult);
return;
}
long storagePoolId = volumeVO.getPoolId();
final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId);
String snapshotVolumeId = ScaleIOUtil.getVolumePath(snapshot.getPath());
final String destVolumeId = ScaleIOUtil.getVolumePath(volumeVO.getPath());
client.revertSnapshot(snapshotVolumeId, destVolumeId);
CommandResult commandResult = new CommandResult();
callback.complete(commandResult);
} catch (Exception ex) {
LOGGER.debug("Unable to revert to PowerFlex snapshot: " + snapshot.getId(), ex);
throw new CloudRuntimeException(ex.getMessage());
}
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method deleteAsync.
@Override
public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback<CommandResult> callback) {
Preconditions.checkArgument(dataObject != null, "dataObject cannot be null");
long storagePoolId = dataStore.getId();
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0");
Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null");
String errMsg = null;
String scaleIOVolumePath = null;
try {
boolean deleteResult = false;
if (dataObject.getType() == DataObjectType.VOLUME) {
LOGGER.debug("deleteAsync - deleting volume");
scaleIOVolumePath = ((VolumeInfo) dataObject).getPath();
} else if (dataObject.getType() == DataObjectType.SNAPSHOT) {
LOGGER.debug("deleteAsync - deleting snapshot");
scaleIOVolumePath = ((SnapshotInfo) dataObject).getPath();
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
LOGGER.debug("deleteAsync - deleting template");
scaleIOVolumePath = ((TemplateInfo) dataObject).getInstallPath();
} else {
errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync";
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
try {
String scaleIOVolumeId = ScaleIOUtil.getVolumePath(scaleIOVolumePath);
final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId);
deleteResult = client.deleteVolume(scaleIOVolumeId);
if (!deleteResult) {
errMsg = "Failed to delete PowerFlex volume with id: " + scaleIOVolumeId;
}
long usedBytes = storagePool.getUsedBytes();
usedBytes -= dataObject.getSize();
storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes);
storagePoolDao.update(storagePoolId, storagePool);
} catch (Exception e) {
errMsg = "Unable to delete PowerFlex volume: " + scaleIOVolumePath + " due to " + e.getMessage();
LOGGER.warn(errMsg);
throw new CloudRuntimeException(errMsg, e);
}
} catch (Exception ex) {
errMsg = ex.getMessage();
LOGGER.error(errMsg);
if (callback == null) {
throw ex;
}
}
if (callback != null) {
CommandResult result = new CommandResult();
result.setResult(errMsg);
callback.complete(result);
}
}
use of org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient in project cloudstack by apache.
the class ScaleIOPrimaryDataStoreDriver method grantAccess.
@Override
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
try {
if (DataObjectType.VOLUME.equals(dataObject.getType())) {
final VolumeVO volume = volumeDao.findById(dataObject.getId());
LOGGER.debug("Granting access for PowerFlex volume: " + volume.getPath());
// Unlimited
Long bandwidthLimitInKbps = Long.valueOf(0);
// Check Bandwidht Limit parameter in volume details
final VolumeDetailVO bandwidthVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS);
if (bandwidthVolumeDetail != null && bandwidthVolumeDetail.getValue() != null) {
bandwidthLimitInKbps = Long.parseLong(bandwidthVolumeDetail.getValue()) * 1024;
}
// Unlimited
Long iopsLimit = Long.valueOf(0);
// Check IOPS Limit parameter in volume details, else try MaxIOPS
final VolumeDetailVO iopsVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.IOPS_LIMIT);
if (iopsVolumeDetail != null && iopsVolumeDetail.getValue() != null) {
iopsLimit = Long.parseLong(iopsVolumeDetail.getValue());
} else if (volume.getMaxIops() != null) {
iopsLimit = volume.getMaxIops();
}
if (iopsLimit > 0 && iopsLimit < ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT) {
iopsLimit = ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT;
}
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress());
if (sdc == null) {
alertHostSdcDisconnection(host);
throw new CloudRuntimeException("Unable to grant access to volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
}
return client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), sdc.getId(), iopsLimit, bandwidthLimitInKbps);
} else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null);
LOGGER.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath());
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress());
if (sdc == null) {
alertHostSdcDisconnection(host);
throw new CloudRuntimeException("Unable to grant access to template: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
}
return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdc.getId());
} else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) {
SnapshotInfo snapshot = (SnapshotInfo) dataObject;
LOGGER.debug("Granting access for PowerFlex volume snapshot: " + snapshot.getPath());
final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress());
if (sdc == null) {
alertHostSdcDisconnection(host);
throw new CloudRuntimeException("Unable to grant access to snapshot: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
}
return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdc.getId());
}
return false;
} catch (Exception e) {
throw new CloudRuntimeException(e);
}
}
Aggregations